]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
blk-mq: use QUEUE_FLAG_QUIESCED to quiesce queue
authorMing Lei <ming.lei@redhat.com>
Sun, 18 Jun 2017 20:24:27 +0000 (14:24 -0600)
committerJens Axboe <axboe@kernel.dk>
Sun, 18 Jun 2017 20:24:27 +0000 (14:24 -0600)
It is required that no dispatch can happen any more once
blk_mq_quiesce_queue() returns, and we don't have such requirement
on APIs of stopping queue.

But blk_mq_quiesce_queue() still may not block/drain dispatch in the
the case of BLK_MQ_S_START_ON_RUN, so use the new introduced flag of
QUEUE_FLAG_QUIESCED and evaluate it inside RCU read-side critical
sections for fixing this issue.

Also blk_mq_quiesce_queue() is implemented via stopping queue, which
limits its uses, and easy to cause race, because any queue restart in
other paths may break blk_mq_quiesce_queue(). With the introduced
flag of QUEUE_FLAG_QUIESCED, we don't need to depend on stopping queue
for quiescing any more.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Reviewed-by: Bart Van Assche <Bart.VanAssche@sandisk.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-sched.c
block/blk-mq.c
include/linux/blk-mq.h
include/linux/blkdev.h

index 254d1c164567d72a631385589b2083dc1e972c9e..9f025289da63c6a4e24a43ccdf835822dd34abfc 100644 (file)
@@ -58,7 +58,8 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
        bool did_work = false;
        LIST_HEAD(rq_list);
 
-       if (unlikely(blk_mq_hctx_stopped(hctx)))
+       /* RCU or SRCU read lock is needed before checking quiesced flag */
+       if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
                return;
 
        hctx->run++;
index 07785b5cf2bc083047b8359d7b94f81edb44d355..40b22c7f684e5c175aeea1e92fe11aaecf02de00 100644 (file)
@@ -170,6 +170,10 @@ void blk_mq_quiesce_queue(struct request_queue *q)
 
        __blk_mq_stop_hw_queues(q, true);
 
+       spin_lock_irq(q->queue_lock);
+       queue_flag_set(QUEUE_FLAG_QUIESCED, q);
+       spin_unlock_irq(q->queue_lock);
+
        queue_for_each_hw_ctx(q, hctx, i) {
                if (hctx->flags & BLK_MQ_F_BLOCKING)
                        synchronize_srcu(&hctx->queue_rq_srcu);
@@ -190,6 +194,10 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue);
  */
 void blk_mq_unquiesce_queue(struct request_queue *q)
 {
+       spin_lock_irq(q->queue_lock);
+       queue_flag_clear(QUEUE_FLAG_QUIESCED, q);
+       spin_unlock_irq(q->queue_lock);
+
        blk_mq_start_stopped_hw_queues(q, true);
 }
 EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
@@ -1444,7 +1452,8 @@ static void __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
        blk_status_t ret;
        bool run_queue = true;
 
-       if (blk_mq_hctx_stopped(hctx)) {
+       /* RCU or SRCU read lock is needed before checking quiesced flag */
+       if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
                run_queue = false;
                goto insert;
        }
index 787d8a2a2ac606b063f6b3ac583b6fd971062250..de6536c14ae713c0f35a93762fc563740f1fee1d 100644 (file)
@@ -268,6 +268,10 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
  */
 static inline void blk_mq_quiesce_queue_nowait(struct request_queue *q)
 {
+       spin_lock_irq(q->queue_lock);
+       queue_flag_set(QUEUE_FLAG_QUIESCED, q);
+       spin_unlock_irq(q->queue_lock);
+
        blk_mq_stop_hw_queues(q);
 }
 
index 8423f6baf81858a36aa581ce136d37d128819c8a..22cfba64ce8146b64de1ab0b74e8fdfd140e0154 100644 (file)
@@ -619,6 +619,7 @@ struct request_queue {
 #define QUEUE_FLAG_POLL_STATS  28      /* collecting stats for hybrid polling */
 #define QUEUE_FLAG_REGISTERED  29      /* queue has been registered to a disk */
 #define QUEUE_FLAG_SCSI_PASSTHROUGH 30 /* queue supports SCSI commands */
+#define QUEUE_FLAG_QUIESCED    31      /* queue has been quiesced */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -715,6 +716,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_noretry_request(rq) \
        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
                             REQ_FAILFAST_DRIVER))
+#define blk_queue_quiesced(q)  test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
 
 static inline bool blk_account_rq(struct request *rq)
 {