]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
blk-mq: unify hctx delayed_run_work and run_work
authorJens Axboe <axboe@fb.com>
Mon, 10 Apr 2017 15:54:54 +0000 (09:54 -0600)
committerJens Axboe <axboe@fb.com>
Fri, 28 Apr 2017 14:10:15 +0000 (08:10 -0600)
They serve the exact same purpose. Get rid of the non-delayed
work variant, and just run it without delay for the normal case.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <Bart.VanAssche@sandisk.com>
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-mq.c
include/linux/blk-mq.h

index 6bd4d1754d294a1ff4f6e9a7ebdc011580794e72..37939672d4df2a908f22f87f819500379aeed09f 100644 (file)
@@ -269,7 +269,7 @@ void blk_sync_queue(struct request_queue *q)
                int i;
 
                queue_for_each_hw_ctx(q, hctx, i) {
-                       cancel_work_sync(&hctx->run_work);
+                       cancel_delayed_work_sync(&hctx->run_work);
                        cancel_delayed_work_sync(&hctx->delay_work);
                }
        } else {
index e6aad49c168608faa987ed42730a0548fc166a3d..5c68fce87ffcd5e1799b7996c0f6d683d9b35ab2 100644 (file)
@@ -1166,13 +1166,9 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
                put_cpu();
        }
 
-       if (msecs == 0)
-               kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
-                                        &hctx->run_work);
-       else
-               kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-                                                &hctx->delayed_run_work,
-                                                msecs_to_jiffies(msecs));
+       kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+                                        &hctx->run_work,
+                                        msecs_to_jiffies(msecs));
 }
 
 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
@@ -1224,7 +1220,7 @@ EXPORT_SYMBOL(blk_mq_queue_stopped);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
-       cancel_work(&hctx->run_work);
+       cancel_delayed_work_sync(&hctx->run_work);
        cancel_delayed_work(&hctx->delay_work);
        set_bit(BLK_MQ_S_STOPPED, &hctx->state);
 }
@@ -1282,17 +1278,7 @@ static void blk_mq_run_work_fn(struct work_struct *work)
 {
        struct blk_mq_hw_ctx *hctx;
 
-       hctx = container_of(work, struct blk_mq_hw_ctx, run_work);
-
-       __blk_mq_run_hw_queue(hctx);
-}
-
-static void blk_mq_delayed_run_work_fn(struct work_struct *work)
-{
-       struct blk_mq_hw_ctx *hctx;
-
-       hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
-
+       hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
        __blk_mq_run_hw_queue(hctx);
 }
 
@@ -1898,8 +1884,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
        if (node == NUMA_NO_NODE)
                node = hctx->numa_node = set->numa_node;
 
-       INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
-       INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
+       INIT_DELAYED_WORK(&hctx->run_work, blk_mq_run_work_fn);
        INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
        spin_lock_init(&hctx->lock);
        INIT_LIST_HEAD(&hctx->dispatch);
index 32bd8eb5ba67b11c673bbb4ade84ab400892d4ca..c7cc903284269cf8e78c60156bf703f3966b4cd3 100644 (file)
@@ -15,7 +15,7 @@ struct blk_mq_hw_ctx {
                unsigned long           state;          /* BLK_MQ_S_* flags */
        } ____cacheline_aligned_in_smp;
 
-       struct work_struct      run_work;
+       struct delayed_work     run_work;
        cpumask_var_t           cpumask;
        int                     next_cpu;
        int                     next_cpu_batch;
@@ -51,7 +51,6 @@ struct blk_mq_hw_ctx {
 
        atomic_t                nr_active;
 
-       struct delayed_work     delayed_run_work;
        struct delayed_work     delay_work;
 
        struct hlist_node       cpuhp_dead;