2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
3 * over multiple cachelines to avoid ping-pong between multiple submitters
4 * or submitter and completer. Uses rolling wakeups to avoid falling of
5 * the scaling cliff when we run out of tags and have to start putting
8 * Uses active queue tracking to support fairer distribution of tags
9 * between multiple submitters when a shared tag map is used.
11 * Copyright (C) 2013-2014 Jens Axboe
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/random.h>
17 #include <linux/blk-mq.h>
20 #include "blk-mq-tag.h"
22 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
26 for (i = 0; i < bt->map_nr; i++) {
27 struct blk_align_bitmap *bm = &bt->map[i];
30 ret = find_first_zero_bit(&bm->word, bm->depth);
38 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
43 return bt_has_free_tags(&tags->bitmap_tags);
46 static inline int bt_index_inc(int index)
48 return (index + 1) & (BT_WAIT_QUEUES - 1);
51 static inline void bt_index_atomic_inc(atomic_t *index)
53 int old = atomic_read(index);
54 int new = bt_index_inc(old);
55 atomic_cmpxchg(index, old, new);
59 * If a previously inactive queue goes active, bump the active user count.
61 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
63 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
64 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
65 atomic_inc(&hctx->tags->active_queues);
71 * Wakeup all potentially sleeping on normal (non-reserved) tags
73 static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
75 struct blk_mq_bitmap_tags *bt;
78 bt = &tags->bitmap_tags;
79 wake_index = atomic_read(&bt->wake_index);
80 for (i = 0; i < BT_WAIT_QUEUES; i++) {
81 struct bt_wait_state *bs = &bt->bs[wake_index];
83 if (waitqueue_active(&bs->wait))
86 wake_index = bt_index_inc(wake_index);
91 * If a previously busy queue goes inactive, potential waiters could now
92 * be allowed to queue. Wake them up and check.
94 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
96 struct blk_mq_tags *tags = hctx->tags;
98 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
101 atomic_dec(&tags->active_queues);
103 blk_mq_tag_wakeup_all(tags);
107 * For shared tag users, we track the number of currently active users
108 * and attempt to provide a fair share of the tag depth for each of them.
110 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
111 struct blk_mq_bitmap_tags *bt)
113 unsigned int depth, users;
115 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
117 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
121 * Don't try dividing an ant
126 users = atomic_read(&hctx->tags->active_queues);
131 * Allow at least some tags
133 depth = max((bt->depth + users - 1) / users, 4U);
134 return atomic_read(&hctx->nr_active) < depth;
137 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
139 int tag, org_last_tag, end;
141 org_last_tag = last_tag;
145 tag = find_next_zero_bit(&bm->word, end, last_tag);
146 if (unlikely(tag >= end)) {
148 * We started with an offset, start from 0 to
151 if (org_last_tag && last_tag) {
159 } while (test_and_set_bit_lock(tag, &bm->word));
165 * Straight forward bitmap tag implementation, where each bit is a tag
166 * (cleared == free, and set == busy). The small twist is using per-cpu
167 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
168 * contexts. This enables us to drastically limit the space searched,
169 * without dirtying an extra shared cacheline like we would if we stored
170 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
171 * of that, each word of tags is in a separate cacheline. This means that
172 * multiple users will tend to stick to different cachelines, at least
173 * until the map is exhausted.
175 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
176 unsigned int *tag_cache)
178 unsigned int last_tag, org_last_tag;
181 if (!hctx_may_queue(hctx, bt))
184 last_tag = org_last_tag = *tag_cache;
185 index = TAG_TO_INDEX(bt, last_tag);
187 for (i = 0; i < bt->map_nr; i++) {
188 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
190 tag += (index << bt->bits_per_word);
195 if (++index >= bt->map_nr)
203 * Only update the cache from the allocation path, if we ended
204 * up using the specific cached tag.
207 if (tag == org_last_tag) {
209 if (last_tag >= bt->depth - 1)
212 *tag_cache = last_tag;
218 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
219 struct blk_mq_hw_ctx *hctx)
221 struct bt_wait_state *bs;
227 wait_index = atomic_read(&hctx->wait_index);
228 bs = &bt->bs[wait_index];
229 bt_index_atomic_inc(&hctx->wait_index);
233 static int bt_get(struct blk_mq_alloc_data *data,
234 struct blk_mq_bitmap_tags *bt,
235 struct blk_mq_hw_ctx *hctx,
236 unsigned int *last_tag)
238 struct bt_wait_state *bs;
242 tag = __bt_get(hctx, bt, last_tag);
246 if (!(data->gfp & __GFP_WAIT))
249 bs = bt_wait_ptr(bt, hctx);
253 was_empty = list_empty(&wait.task_list);
254 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
256 tag = __bt_get(hctx, bt, last_tag);
261 atomic_set(&bs->wait_cnt, bt->wake_cnt);
263 blk_mq_put_ctx(data->ctx);
267 data->ctx = blk_mq_get_ctx(data->q);
268 data->hctx = data->q->mq_ops->map_queue(data->q,
270 if (data->reserved) {
271 bt = &data->hctx->tags->breserved_tags;
273 last_tag = &data->ctx->last_tag;
275 bt = &hctx->tags->bitmap_tags;
277 finish_wait(&bs->wait, &wait);
278 bs = bt_wait_ptr(bt, hctx);
281 finish_wait(&bs->wait, &wait);
285 static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
289 tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
290 &data->ctx->last_tag);
292 return tag + data->hctx->tags->nr_reserved_tags;
294 return BLK_MQ_TAG_FAIL;
297 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
301 if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
303 return BLK_MQ_TAG_FAIL;
306 tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero);
308 return BLK_MQ_TAG_FAIL;
313 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
316 return __blk_mq_get_tag(data);
318 return __blk_mq_get_reserved_tag(data);
321 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
325 wake_index = atomic_read(&bt->wake_index);
326 for (i = 0; i < BT_WAIT_QUEUES; i++) {
327 struct bt_wait_state *bs = &bt->bs[wake_index];
329 if (waitqueue_active(&bs->wait)) {
330 int o = atomic_read(&bt->wake_index);
332 atomic_cmpxchg(&bt->wake_index, o, wake_index);
337 wake_index = bt_index_inc(wake_index);
343 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
345 const int index = TAG_TO_INDEX(bt, tag);
346 struct bt_wait_state *bs;
349 * The unlock memory barrier need to order access to req in free
350 * path and clearing tag bit
352 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
354 bs = bt_wake_ptr(bt);
355 if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
356 atomic_set(&bs->wait_cnt, bt->wake_cnt);
357 bt_index_atomic_inc(&bt->wake_index);
362 static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
364 BUG_ON(tag >= tags->nr_tags);
366 bt_clear_tag(&tags->bitmap_tags, tag);
369 static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
372 BUG_ON(tag >= tags->nr_reserved_tags);
374 bt_clear_tag(&tags->breserved_tags, tag);
377 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
378 unsigned int *last_tag)
380 struct blk_mq_tags *tags = hctx->tags;
382 if (tag >= tags->nr_reserved_tags) {
383 const int real_tag = tag - tags->nr_reserved_tags;
385 __blk_mq_put_tag(tags, real_tag);
386 *last_tag = real_tag;
388 __blk_mq_put_reserved_tag(tags, tag);
391 static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
392 unsigned long *free_map, unsigned int off)
396 for (i = 0; i < bt->map_nr; i++) {
397 struct blk_align_bitmap *bm = &bt->map[i];
401 bit = find_next_zero_bit(&bm->word, bm->depth, bit);
402 if (bit >= bm->depth)
405 __set_bit(bit + off, free_map);
409 off += (1 << bt->bits_per_word);
413 void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
414 void (*fn)(void *, unsigned long *), void *data)
416 unsigned long *tag_map;
419 map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
420 tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
424 bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
425 if (tags->nr_reserved_tags)
426 bt_for_each_free(&tags->breserved_tags, tag_map, 0);
431 EXPORT_SYMBOL(blk_mq_tag_busy_iter);
433 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
435 unsigned int i, used;
437 for (i = 0, used = 0; i < bt->map_nr; i++) {
438 struct blk_align_bitmap *bm = &bt->map[i];
440 used += bitmap_weight(&bm->word, bm->depth);
443 return bt->depth - used;
446 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
449 unsigned int tags_per_word = 1U << bt->bits_per_word;
450 unsigned int map_depth = depth;
455 for (i = 0; i < bt->map_nr; i++) {
456 bt->map[i].depth = min(map_depth, tags_per_word);
457 map_depth -= bt->map[i].depth;
461 bt->wake_cnt = BT_WAIT_BATCH;
462 if (bt->wake_cnt > depth / 4)
463 bt->wake_cnt = max(1U, depth / 4);
468 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
469 int node, bool reserved)
473 bt->bits_per_word = ilog2(BITS_PER_LONG);
476 * Depth can be zero for reserved tags, that's not a failure
480 unsigned int nr, tags_per_word;
482 tags_per_word = (1 << bt->bits_per_word);
485 * If the tag space is small, shrink the number of tags
486 * per word so we spread over a few cachelines, at least.
487 * If less than 4 tags, just forget about it, it's not
488 * going to work optimally anyway.
491 while (tags_per_word * 4 > depth) {
493 tags_per_word = (1 << bt->bits_per_word);
497 nr = ALIGN(depth, tags_per_word) / tags_per_word;
498 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
506 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
512 for (i = 0; i < BT_WAIT_QUEUES; i++)
513 init_waitqueue_head(&bt->bs[i].wait);
515 bt_update_count(bt, depth);
519 static void bt_free(struct blk_mq_bitmap_tags *bt)
525 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
528 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
530 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
532 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
537 bt_free(&tags->bitmap_tags);
542 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
543 unsigned int reserved_tags, int node)
545 struct blk_mq_tags *tags;
547 if (total_tags > BLK_MQ_TAG_MAX) {
548 pr_err("blk-mq: tag depth too large\n");
552 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
556 tags->nr_tags = total_tags;
557 tags->nr_reserved_tags = reserved_tags;
559 return blk_mq_init_bitmap_tags(tags, node);
562 void blk_mq_free_tags(struct blk_mq_tags *tags)
564 bt_free(&tags->bitmap_tags);
565 bt_free(&tags->breserved_tags);
569 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
571 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
573 *tag = prandom_u32() % depth;
576 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
578 tdepth -= tags->nr_reserved_tags;
579 if (tdepth > tags->nr_tags)
583 * Don't need (or can't) update reserved tags here, they remain
584 * static and should never need resizing.
586 bt_update_count(&tags->bitmap_tags, tdepth);
587 blk_mq_tag_wakeup_all(tags);
591 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
593 char *orig_page = page;
594 unsigned int free, res;
599 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
600 "bits_per_word=%u\n",
601 tags->nr_tags, tags->nr_reserved_tags,
602 tags->bitmap_tags.bits_per_word);
604 free = bt_unused_tags(&tags->bitmap_tags);
605 res = bt_unused_tags(&tags->breserved_tags);
607 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
608 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
610 return page - orig_page;