We want our own clearly defined error field for NVMe passthrough commands,
and the request errors field is going away in its current form.
Just store the status and result field in the nvme_request field from
hardirq completion context (using a new helper) and then generate a
Linux errno for the block layer only when we actually need it.
Because we can't overload the status value with a negative error code
for cancelled command we now have a flags filed in struct nvme_request
that contains a bit for this condition.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
static struct class *nvme_class;
static struct class *nvme_class;
+int nvme_error_status(struct request *req)
+{
+ switch (nvme_req(req)->status & 0x7ff) {
+ case NVME_SC_SUCCESS:
+ return 0;
+ case NVME_SC_CAP_EXCEEDED:
+ return -ENOSPC;
+ default:
+ return -EIO;
+ }
+}
+EXPORT_SYMBOL_GPL(nvme_error_status);
+
static inline bool nvme_req_needs_retry(struct request *req)
{
if (blk_noretry_request(req))
return false;
static inline bool nvme_req_needs_retry(struct request *req)
{
if (blk_noretry_request(req))
return false;
- if (req->errors & NVME_SC_DNR)
+ if (nvme_req(req)->status & NVME_SC_DNR)
return false;
if (jiffies - req->start_time >= req->timeout)
return false;
return false;
if (jiffies - req->start_time >= req->timeout)
return false;
void nvme_complete_rq(struct request *req)
{
void nvme_complete_rq(struct request *req)
{
- int error = 0;
-
- if (unlikely(req->errors)) {
- if (nvme_req_needs_retry(req)) {
- nvme_req(req)->retries++;
- blk_mq_requeue_request(req,
- !blk_mq_queue_stopped(req->q));
- return;
- }
-
- if (blk_rq_is_passthrough(req))
- error = req->errors;
- else
- error = nvme_error_status(req->errors);
+ if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) {
+ nvme_req(req)->retries++;
+ blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q));
+ return;
- blk_mq_end_request(req, error);
+ blk_mq_end_request(req, nvme_error_status(req));
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
status = NVME_SC_ABORT_REQ;
if (blk_queue_dying(req->q))
status |= NVME_SC_DNR;
status = NVME_SC_ABORT_REQ;
if (blk_queue_dying(req->q))
status |= NVME_SC_DNR;
- blk_mq_complete_request(req, status);
+ nvme_req(req)->status = status;
+ blk_mq_complete_request(req, 0);
+
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
if (!(req->rq_flags & RQF_DONTPREP)) {
nvme_req(req)->retries = 0;
if (!(req->rq_flags & RQF_DONTPREP)) {
nvme_req(req)->retries = 0;
+ nvme_req(req)->flags = 0;
req->rq_flags |= RQF_DONTPREP;
}
req->rq_flags |= RQF_DONTPREP;
}
blk_execute_rq(req->q, NULL, req, at_head);
if (result)
*result = nvme_req(req)->result;
blk_execute_rq(req->q, NULL, req, at_head);
if (result)
*result = nvme_req(req)->result;
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ ret = -EINTR;
+ else
+ ret = nvme_req(req)->status;
out:
blk_mq_free_request(req);
return ret;
out:
blk_mq_free_request(req);
return ret;
}
submit:
blk_execute_rq(req->q, disk, req, 0);
}
submit:
blk_execute_rq(req->q, disk, req, 0);
+ if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+ ret = -EINTR;
+ else
+ ret = nvme_req(req)->status;
if (result)
*result = le32_to_cpu(nvme_req(req)->result.u32);
if (meta && !ret && !write) {
if (result)
*result = le32_to_cpu(nvme_req(req)->result.u32);
if (meta && !ret && !write) {
struct nvme_fc_queue *queue = op->queue;
struct nvme_completion *cqe = &op->rsp_iu.cqe;
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
struct nvme_fc_queue *queue = op->queue;
struct nvme_completion *cqe = &op->rsp_iu.cqe;
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
+ union nvme_result result;
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
goto done;
}
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
goto done;
}
- op->nreq.result.u64 = 0;
break;
case sizeof(struct nvme_fc_ersp_iu):
break;
case sizeof(struct nvme_fc_ersp_iu):
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
goto done;
}
status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
goto done;
}
- op->nreq.result = cqe->result;
status = cqe->status;
break;
status = cqe->status;
break;
done:
if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
done:
if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
- nvme_complete_async_event(&queue->ctrl->ctrl, status,
- &op->nreq.result);
+ nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
nvme_fc_ctrl_put(ctrl);
return;
}
nvme_fc_ctrl_put(ctrl);
return;
}
- blk_mq_complete_request(rq, le16_to_cpu(status) >> 1);
+ nvme_end_request(rq, status, result);
struct nvm_rq *rqd = rq->end_io_data;
rqd->ppa_status = nvme_req(rq)->result.u64;
struct nvm_rq *rqd = rq->end_io_data;
rqd->ppa_status = nvme_req(rq)->result.u64;
+ rqd->error = nvme_req(rq)->status;
nvm_end_io(rqd);
kfree(nvme_req(rq)->cmd);
nvm_end_io(rqd);
kfree(nvme_req(rq)->cmd);
wait_for_completion_io(&wait);
wait_for_completion_io(&wait);
- ret = nvme_error_status(rq->errors);
+ if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
+ ret = -EINTR;
+ else
+ ret = nvme_error_status(rq);
- *result = rq->errors & 0x7ff;
+ *result = nvme_req(rq)->status & 0x7ff;
if (status)
*status = le64_to_cpu(nvme_req(rq)->result.u64);
if (status)
*status = le64_to_cpu(nvme_req(rq)->result.u64);
#include <linux/lightnvm.h>
#include <linux/sed-opal.h>
#include <linux/lightnvm.h>
#include <linux/sed-opal.h>
-enum {
- /*
- * Driver internal status code for commands that were cancelled due
- * to timeouts or controller shutdown. The value is negative so
- * that it a) doesn't overlap with the unsigned hardware error codes,
- * and b) can easily be tested for.
- */
- NVME_SC_CANCELLED = -EINTR,
-};
-
extern unsigned char nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
extern unsigned char nvme_io_timeout;
#define NVME_IO_TIMEOUT (nvme_io_timeout * HZ)
struct nvme_command *cmd;
union nvme_result result;
u8 retries;
struct nvme_command *cmd;
union nvme_result result;
u8 retries;
+ u8 flags;
+ u16 status;
+};
+
+enum {
+ NVME_REQ_CANCELLED = (1 << 0),
};
static inline struct nvme_request *nvme_req(struct request *req)
};
static inline struct nvme_request *nvme_req(struct request *req)
-static inline int nvme_error_status(u16 status)
+static inline void nvme_end_request(struct request *req, __le16 status,
+ union nvme_result result)
- switch (status & 0x7ff) {
- case NVME_SC_SUCCESS:
- return 0;
- case NVME_SC_CAP_EXCEEDED:
- return -ENOSPC;
- default:
- return -EIO;
- }
+ struct nvme_request *rq = nvme_req(req);
+
+ rq->status = le16_to_cpu(status) >> 1;
+ rq->result = result;
+ blk_mq_complete_request(req, 0);
+int nvme_error_status(struct request *req);
void nvme_complete_rq(struct request *req);
void nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
void nvme_complete_rq(struct request *req);
void nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
- nvme_req(req)->result = cqe.result;
- blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
+ nvme_end_request(req, cqe.status, cqe.result);
}
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
}
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = iod->nvmeq;
- u16 status = req->errors;
- dev_warn(nvmeq->dev->ctrl.device, "Abort status: 0x%x", status);
+ dev_warn(nvmeq->dev->ctrl.device,
+ "Abort status: 0x%x", nvme_req(req)->status);
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req);
}
atomic_inc(&nvmeq->dev->ctrl.abort_limit);
blk_mq_free_request(req);
}
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
- req->errors = NVME_SC_CANCELLED;
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
* Mark the request as handled, since the inline shutdown
* forces all outstanding requests to complete.
*/
* Mark the request as handled, since the inline shutdown
* forces all outstanding requests to complete.
*/
- req->errors = NVME_SC_CANCELLED;
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
wc->ex.invalidate_rkey == req->mr->rkey)
req->mr->need_inval = false;
wc->ex.invalidate_rkey == req->mr->rkey)
req->mr->need_inval = false;
- req->req.result = cqe->result;
- blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
+ nvme_end_request(rq, cqe->status, cqe->result);
nvme_rdma_error_recovery(req->queue->ctrl);
/* fail with DNR on cmd timeout */
nvme_rdma_error_recovery(req->queue->ctrl);
/* fail with DNR on cmd timeout */
- rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
&cqe->result);
} else {
struct request *rq;
&cqe->result);
} else {
struct request *rq;
- struct nvme_loop_iod *iod;
rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
if (!rq) {
rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
if (!rq) {
- iod = blk_mq_rq_to_pdu(rq);
- iod->nvme_req.result = cqe->result;
- blk_mq_complete_request(rq, le16_to_cpu(cqe->status) >> 1);
+ nvme_end_request(rq, cqe->status, cqe->result);
schedule_work(&iod->queue->ctrl->reset_work);
/* fail with DNR on admin cmd timeout */
schedule_work(&iod->queue->ctrl->reset_work);
/* fail with DNR on admin cmd timeout */
- rq->errors = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;