]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - block/blk-mq.c
x86/vm86/32: Switch to flush_tlb_mm_range() in mark_screen_rdonly()
[karo-tx-linux.git] / block / blk-mq.c
index 159187a28d66521b4ab0109d3db38e6225ac71b3..6b6e7bc041dbf3c4699ca9bfa8e36c58a8f119d0 100644 (file)
@@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
 {
        struct blk_mq_timeout_data *data = priv;
 
-       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
-               /*
-                * If a request wasn't started before the queue was
-                * marked dying, kill it here or it'll go unnoticed.
-                */
-               if (unlikely(blk_queue_dying(rq->q))) {
-                       rq->errors = -EIO;
-                       blk_mq_end_request(rq, rq->errors);
-               }
+       if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
                return;
-       }
 
        if (time_after_eq(jiffies, rq->deadline)) {
                if (!blk_mark_rq_complete(rq))
@@ -978,7 +969,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
        struct request *rq;
        LIST_HEAD(driver_list);
        struct list_head *dptr;
-       int queued, ret = BLK_MQ_RQ_QUEUE_OK;
+       int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
 
        /*
         * Start off with dptr being NULL, so we start the first request
@@ -989,7 +980,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
        /*
         * Now process all the entries, sending them to the driver.
         */
-       queued = 0;
+       errors = queued = 0;
        while (!list_empty(list)) {
                struct blk_mq_queue_data bd;
 
@@ -1046,6 +1037,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                default:
                        pr_err("blk-mq: bad return on queue: %d\n", ret);
                case BLK_MQ_RQ_QUEUE_ERROR:
+                       errors++;
                        rq->errors = -EIO;
                        blk_mq_end_request(rq, rq->errors);
                        break;
@@ -1097,7 +1089,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
                        blk_mq_run_hw_queue(hctx, true);
        }
 
-       return queued != 0;
+       return (queued + errors) != 0;
 }
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
@@ -1434,7 +1426,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
        return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
 }
 
-static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
+static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
+                                     bool may_sleep)
 {
        struct request_queue *q = rq->q;
        struct blk_mq_queue_data bd = {
@@ -1475,7 +1468,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
        }
 
 insert:
-       blk_mq_sched_insert_request(rq, false, true, true, false);
+       blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
 }
 
 /*
@@ -1569,11 +1562,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
                        rcu_read_lock();
-                       blk_mq_try_issue_directly(old_rq, &cookie);
+                       blk_mq_try_issue_directly(old_rq, &cookie, false);
                        rcu_read_unlock();
                } else {
                        srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
-                       blk_mq_try_issue_directly(old_rq, &cookie);
+                       blk_mq_try_issue_directly(old_rq, &cookie, true);
                        srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
                }
                goto done;