rq = blk_mq_sched_get_request(q, NULL, rw, &alloc_data);
- blk_mq_put_ctx(alloc_data.ctx);
blk_queue_exit(q);
if (!rq)
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
- blk_mq_sched_completed_request(hctx, rq);
- blk_mq_sched_restart_queues(hctx);
+ blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
+ blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
{
blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
}
+EXPORT_SYMBOL_GPL(blk_mq_finish_request);
void blk_mq_free_request(struct request *rq)
{
rq->q->softirq_done_fn(rq);
}
-static void blk_mq_ipi_complete_request(struct request *rq)
+static void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
bool shared = false;
int cpu;
+ if (rq->internal_tag != -1)
+ blk_mq_sched_completed_request(rq);
+ if (rq->rq_flags & RQF_STATS) {
+ blk_mq_poll_stats_start(rq->q);
+ blk_stat_add(rq);
+ }
+
if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
rq->q->softirq_done_fn(rq);
return;
put_cpu();
}
-static void blk_mq_stat_add(struct request *rq)
-{
- if (rq->rq_flags & RQF_STATS) {
- blk_mq_poll_stats_start(rq->q);
- blk_stat_add(rq);
- }
-}
-
-static void __blk_mq_complete_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
-
- blk_mq_stat_add(rq);
-
- if (!q->softirq_done_fn)
- blk_mq_end_request(rq, rq->errors);
- else
- blk_mq_ipi_complete_request(rq);
-}
-
/**
* blk_mq_complete_request - end I/O on a request
* @rq: the request being processed
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
-void blk_mq_complete_request(struct request *rq, int error)
+void blk_mq_complete_request(struct request *rq)
{
struct request_queue *q = rq->q;
if (unlikely(blk_should_fake_timeout(q)))
return;
- if (!blk_mark_rq_complete(rq)) {
- rq->errors = error;
+ if (!blk_mark_rq_complete(rq))
__blk_mq_complete_request(rq);
- }
}
EXPORT_SYMBOL(blk_mq_complete_request);
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
};
- if (rq->tag != -1) {
-done:
- if (hctx)
- *hctx = data.hctx;
- return true;
- }
+ if (rq->tag != -1)
+ goto done;
if (blk_mq_tag_is_reserved(data.hctx->sched_tags, rq->internal_tag))
data.flags |= BLK_MQ_REQ_RESERVED;
atomic_inc(&data.hctx->nr_active);
}
data.hctx->tags->rqs[rq->tag] = rq;
- goto done;
}
- return false;
+done:
+ if (hctx)
+ *hctx = data.hctx;
+ return rq->tag != -1;
}
static void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
return true;
}
-bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
{
- struct request_queue *q = hctx->queue;
+ struct blk_mq_hw_ctx *hctx;
struct request *rq;
- LIST_HEAD(driver_list);
- struct list_head *dptr;
- int queued, ret = BLK_MQ_RQ_QUEUE_OK;
+ int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
- /*
- * Start off with dptr being NULL, so we start the first request
- * immediately, even if we have more pending.
- */
- dptr = NULL;
+ if (list_empty(list))
+ return false;
/*
* Now process all the entries, sending them to the driver.
*/
- queued = 0;
- while (!list_empty(list)) {
+ errors = queued = 0;
+ do {
struct blk_mq_queue_data bd;
rq = list_first_entry(list, struct request, queuelist);
* The initial allocation attempt failed, so we need to
* rerun the hardware queue when a tag is freed.
*/
- if (blk_mq_dispatch_wait_add(hctx)) {
- /*
- * It's possible that a tag was freed in the
- * window between the allocation failure and
- * adding the hardware queue to the wait queue.
- */
- if (!blk_mq_get_driver_tag(rq, &hctx, false))
- break;
- } else {
+ if (!blk_mq_dispatch_wait_add(hctx))
+ break;
+
+ /*
+ * It's possible that a tag was freed in the window
+ * between the allocation failure and adding the
+ * hardware queue to the wait queue.
+ */
+ if (!blk_mq_get_driver_tag(rq, &hctx, false))
break;
- }
}
list_del_init(&rq->queuelist);
bd.rq = rq;
- bd.list = dptr;
/*
* Flag last if we have no more requests, or if we have more
default:
pr_err("blk-mq: bad return on queue: %d\n", ret);
case BLK_MQ_RQ_QUEUE_ERROR:
+ errors++;
rq->errors = -EIO;
blk_mq_end_request(rq, rq->errors);
break;
if (ret == BLK_MQ_RQ_QUEUE_BUSY)
break;
-
- /*
- * We've done the first request. If we have more than 1
- * left in the list, set dptr to defer issue.
- */
- if (!dptr && list->next != list->prev)
- dptr = &driver_list;
- }
+ } while (!list_empty(list));
hctx->dispatched[queued_to_index(queued)]++;
*/
if (!list_empty(list)) {
/*
- * If we got a driver tag for the next request already,
- * free it again.
+ * If an I/O scheduler has been configured and we got a driver
+ * tag for the next request already, free it again.
*/
rq = list_first_entry(list, struct request, queuelist);
blk_mq_put_driver_tag(rq);
spin_unlock(&hctx->lock);
/*
- * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
- * it's possible the queue is stopped and restarted again
- * before this. Queue restart will dispatch requests. And since
- * requests in rq_list aren't added into hctx->dispatch yet,
- * the requests in rq_list might get lost.
+ * If SCHED_RESTART was set by the caller of this function and
+ * it is no longer set that means that it was cleared by another
+ * thread and hence that a queue rerun is needed.
*
- * blk_mq_run_hw_queue() already checks the STOPPED bit
+ * If TAG_WAITING is set that means that an I/O scheduler has
+ * been configured and another thread is waiting for a driver
+ * tag. To guarantee fairness, do not rerun this hardware queue
+ * but let the other thread grab the driver tag.
*
- * If RESTART or TAG_WAITING is set, then let completion restart
- * the queue instead of potentially looping here.
+ * If no I/O scheduler has been configured it is possible that
+ * the hardware queue got stopped and restarted before requests
+ * were pushed back onto the dispatch list. Rerun the queue to
+ * avoid starvation. Notes:
+ * - blk_mq_run_hw_queue() checks whether or not a queue has
+ * been stopped before rerunning a queue.
+ * - Some but not all block drivers stop a queue before
+ * returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq
+ * and dm-rq.
*/
if (!blk_mq_sched_needs_restart(hctx) &&
!test_bit(BLK_MQ_S_TAG_WAITING, &hctx->state))
blk_mq_run_hw_queue(hctx, true);
}
- return queued != 0;
+ return (queued + errors) != 0;
}
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
blk_mq_sched_dispatch_requests(hctx);
rcu_read_unlock();
} else {
+ might_sleep();
+
srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
blk_mq_sched_dispatch_requests(hctx);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
return hctx->next_cpu;
}
-void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
+ unsigned long msecs)
{
if (unlikely(blk_mq_hctx_stopped(hctx) ||
!blk_mq_hw_queue_mapped(hctx)))
put_cpu();
}
- kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
+ if (msecs == 0)
+ kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->run_work);
+ else
+ kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
+ &hctx->delayed_run_work,
+ msecs_to_jiffies(msecs));
+}
+
+void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
+{
+ __blk_mq_delay_run_hw_queue(hctx, true, msecs);
+}
+EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
+
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
+{
+ __blk_mq_delay_run_hw_queue(hctx, async, 0);
}
+EXPORT_SYMBOL(blk_mq_run_hw_queue);
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
__blk_mq_run_hw_queue(hctx);
}
+static void blk_mq_delayed_run_work_fn(struct work_struct *work)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(work, struct blk_mq_hw_ctx, delayed_run_work.work);
+
+ __blk_mq_run_hw_queue(hctx);
+}
+
static void blk_mq_delay_work_fn(struct work_struct *work)
{
struct blk_mq_hw_ctx *hctx;
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
- init_request_from_bio(rq, bio);
+ blk_init_request_from_bio(rq, bio);
blk_account_io_start(rq, true);
}
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
.rq = rq,
- .list = NULL,
- .last = 1
+ .last = true,
};
struct blk_mq_hw_ctx *hctx;
blk_qc_t new_cookie;
__blk_mq_try_issue_directly(rq, cookie, false);
rcu_read_unlock();
} else {
- unsigned int srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
+ unsigned int srcu_idx;
+
+ might_sleep();
+
+ srcu_idx = srcu_read_lock(&hctx->queue_rq_srcu);
__blk_mq_try_issue_directly(rq, cookie, true);
srcu_read_unlock(&hctx->queue_rq_srcu, srcu_idx);
}
list_del_init(&same_queue_rq->queuelist);
list_add_tail(&rq->queuelist, &plug->mq_list);
+ blk_mq_put_ctx(data.ctx);
+
if (same_queue_rq)
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
&cookie);
+
+ return cookie;
} else if (q->nr_hw_queues > 1 && is_sync) {
+ blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+ return cookie;
} else if (q->elevator) {
blk_mq_bio_to_request(rq, bio);
blk_mq_sched_insert_request(rq, false, true, true, true);
- } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
+ } else if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio))
blk_mq_run_hw_queue(data.hctx, true);
- }
blk_mq_put_ctx(data.ctx);
return cookie;
hctx->fq->flush_rq, hctx_idx,
flush_start_tag + hctx_idx);
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
+
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
node = hctx->numa_node = set->numa_node;
INIT_WORK(&hctx->run_work, blk_mq_run_work_fn);
+ INIT_DELAYED_WORK(&hctx->delayed_run_work, blk_mq_delayed_run_work_fn);
INIT_DELAYED_WORK(&hctx->delay_work, blk_mq_delay_work_fn);
spin_lock_init(&hctx->lock);
INIT_LIST_HEAD(&hctx->dispatch);
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
+ if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
+ goto exit_hctx;
+
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq)
- goto exit_hctx;
+ goto sched_exit_hctx;
if (set->ops->init_request &&
set->ops->init_request(set->driver_data,
free_fq:
kfree(hctx->fq);
+ sched_exit_hctx:
+ blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx:
if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx);
{
struct request_queue *q;
+ lockdep_assert_held(&set->tag_list_lock);
+
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q);
queue_set_hctx_shared(q, shared);
struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(&set->tag_list_lock);
- list_del_init(&q->tag_set_list);
+ list_del_rcu(&q->tag_set_list);
+ INIT_LIST_HEAD(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);
+
+ synchronize_rcu();
}
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
}
if (set->flags & BLK_MQ_F_TAG_SHARED)
queue_set_hctx_shared(q, true);
- list_add_tail(&q->tag_set_list, &set->tag_list);
+ list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
mutex_unlock(&set->tag_list_lock);
}
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- blk_mq_sched_teardown(q);
-
/* hctx kobj stays in hctx */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx)
return 0;
}
+static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+{
+ if (set->ops->map_queues)
+ return set->ops->map_queues(set);
+ else
+ return blk_mq_map_queues(set);
+}
+
/*
* Alloc a tag set to be associated with one or more request queues.
* May fail with EINVAL for various error conditions. May adjust the
if (!set->mq_map)
goto out_free_tags;
- if (set->ops->map_queues)
- ret = set->ops->map_queues(set);
- else
- ret = blk_mq_map_queues(set);
+ ret = blk_mq_update_queue_map(set);
if (ret)
goto out_free_mq_map;
{
struct request_queue *q;
+ lockdep_assert_held(&set->tag_list_lock);
+
if (nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids;
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
blk_mq_freeze_queue(q);
set->nr_hw_queues = nr_hw_queues;
+ blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
blk_mq_queue_reinit(q, cpu_online_mask);