1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/random.h>
5 #include <linux/blk-mq.h>
8 #include "blk-mq-tag.h"
10 void blk_mq_wait_for_tags(struct blk_mq_hw_ctx *hctx, bool reserved)
14 tag = blk_mq_get_tag(hctx, &zero, __GFP_WAIT, reserved);
15 blk_mq_put_tag(hctx, tag, &zero);
18 static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
22 for (i = 0; i < bt->map_nr; i++) {
23 struct blk_align_bitmap *bm = &bt->map[i];
26 ret = find_first_zero_bit(&bm->word, bm->depth);
34 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
39 return bt_has_free_tags(&tags->bitmap_tags);
42 static inline void bt_index_inc(unsigned int *index)
44 *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
48 * If a previously inactive queue goes active, bump the active user count.
50 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
52 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
53 !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
54 atomic_inc(&hctx->tags->active_queues);
60 * Wakeup all potentially sleeping on normal (non-reserved) tags
62 static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
64 struct blk_mq_bitmap_tags *bt;
67 bt = &tags->bitmap_tags;
68 wake_index = bt->wake_index;
69 for (i = 0; i < BT_WAIT_QUEUES; i++) {
70 struct bt_wait_state *bs = &bt->bs[wake_index];
72 if (waitqueue_active(&bs->wait))
75 bt_index_inc(&wake_index);
80 * If a previously busy queue goes inactive, potential waiters could now
81 * be allowed to queue. Wake them up and check.
83 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
85 struct blk_mq_tags *tags = hctx->tags;
87 if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
90 atomic_dec(&tags->active_queues);
92 blk_mq_tag_wakeup_all(tags);
96 * For shared tag users, we track the number of currently active users
97 * and attempt to provide a fair share of the tag depth for each of them.
99 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
100 struct blk_mq_bitmap_tags *bt)
102 unsigned int depth, users;
104 if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
106 if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
110 * Don't try dividing an ant
115 users = atomic_read(&hctx->tags->active_queues);
120 * Allow at least some tags
122 depth = max((bt->depth + users - 1) / users, 4U);
123 return atomic_read(&hctx->nr_active) < depth;
126 static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag)
128 int tag, org_last_tag, end;
130 org_last_tag = last_tag;
134 tag = find_next_zero_bit(&bm->word, end, last_tag);
135 if (unlikely(tag >= end)) {
137 * We started with an offset, start from 0 to
140 if (org_last_tag && last_tag) {
148 } while (test_and_set_bit_lock(tag, &bm->word));
154 * Straight forward bitmap tag implementation, where each bit is a tag
155 * (cleared == free, and set == busy). The small twist is using per-cpu
156 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
157 * contexts. This enables us to drastically limit the space searched,
158 * without dirtying an extra shared cacheline like we would if we stored
159 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
160 * of that, each word of tags is in a separate cacheline. This means that
161 * multiple users will tend to stick to different cachelines, at least
162 * until the map is exhausted.
164 static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
165 unsigned int *tag_cache)
167 unsigned int last_tag, org_last_tag;
170 if (!hctx_may_queue(hctx, bt))
173 last_tag = org_last_tag = *tag_cache;
174 index = TAG_TO_INDEX(bt, last_tag);
176 for (i = 0; i < bt->map_nr; i++) {
177 tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
179 tag += (index << bt->bits_per_word);
184 if (++index >= bt->map_nr)
192 * Only update the cache from the allocation path, if we ended
193 * up using the specific cached tag.
196 if (tag == org_last_tag) {
198 if (last_tag >= bt->depth - 1)
201 *tag_cache = last_tag;
207 static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
208 struct blk_mq_hw_ctx *hctx)
210 struct bt_wait_state *bs;
215 bs = &bt->bs[hctx->wait_index];
216 bt_index_inc(&hctx->wait_index);
220 static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
221 unsigned int *last_tag, gfp_t gfp)
223 struct bt_wait_state *bs;
227 tag = __bt_get(hctx, bt, last_tag);
231 if (!(gfp & __GFP_WAIT))
234 bs = bt_wait_ptr(bt, hctx);
238 was_empty = list_empty(&wait.task_list);
239 prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
241 tag = __bt_get(hctx, bt, last_tag);
246 atomic_set(&bs->wait_cnt, bt->wake_cnt);
251 finish_wait(&bs->wait, &wait);
255 static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags,
256 struct blk_mq_hw_ctx *hctx,
257 unsigned int *last_tag, gfp_t gfp)
261 tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp);
263 return tag + tags->nr_reserved_tags;
265 return BLK_MQ_TAG_FAIL;
268 static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
273 if (unlikely(!tags->nr_reserved_tags)) {
275 return BLK_MQ_TAG_FAIL;
278 tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp);
280 return BLK_MQ_TAG_FAIL;
285 unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag,
286 gfp_t gfp, bool reserved)
289 return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp);
291 return __blk_mq_get_reserved_tag(hctx->tags, gfp);
294 static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
298 wake_index = bt->wake_index;
299 for (i = 0; i < BT_WAIT_QUEUES; i++) {
300 struct bt_wait_state *bs = &bt->bs[wake_index];
302 if (waitqueue_active(&bs->wait)) {
303 if (wake_index != bt->wake_index)
304 bt->wake_index = wake_index;
309 bt_index_inc(&wake_index);
315 static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
317 const int index = TAG_TO_INDEX(bt, tag);
318 struct bt_wait_state *bs;
321 * The unlock memory barrier need to order access to req in free
322 * path and clearing tag bit
324 clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
326 bs = bt_wake_ptr(bt);
327 if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
328 atomic_set(&bs->wait_cnt, bt->wake_cnt);
329 bt_index_inc(&bt->wake_index);
334 static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
336 BUG_ON(tag >= tags->nr_tags);
338 bt_clear_tag(&tags->bitmap_tags, tag);
341 static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
344 BUG_ON(tag >= tags->nr_reserved_tags);
346 bt_clear_tag(&tags->breserved_tags, tag);
349 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
350 unsigned int *last_tag)
352 struct blk_mq_tags *tags = hctx->tags;
354 if (tag >= tags->nr_reserved_tags) {
355 const int real_tag = tag - tags->nr_reserved_tags;
357 __blk_mq_put_tag(tags, real_tag);
358 *last_tag = real_tag;
360 __blk_mq_put_reserved_tag(tags, tag);
363 static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
364 unsigned long *free_map, unsigned int off)
368 for (i = 0; i < bt->map_nr; i++) {
369 struct blk_align_bitmap *bm = &bt->map[i];
373 bit = find_next_zero_bit(&bm->word, bm->depth, bit);
374 if (bit >= bm->depth)
377 __set_bit(bit + off, free_map);
381 off += (1 << bt->bits_per_word);
385 void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
386 void (*fn)(void *, unsigned long *), void *data)
388 unsigned long *tag_map;
391 map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
392 tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
396 bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags);
397 if (tags->nr_reserved_tags)
398 bt_for_each_free(&tags->breserved_tags, tag_map, 0);
403 EXPORT_SYMBOL(blk_mq_tag_busy_iter);
405 static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
407 unsigned int i, used;
409 for (i = 0, used = 0; i < bt->map_nr; i++) {
410 struct blk_align_bitmap *bm = &bt->map[i];
412 used += bitmap_weight(&bm->word, bm->depth);
415 return bt->depth - used;
418 static void bt_update_count(struct blk_mq_bitmap_tags *bt,
421 unsigned int tags_per_word = 1U << bt->bits_per_word;
422 unsigned int map_depth = depth;
427 for (i = 0; i < bt->map_nr; i++) {
428 bt->map[i].depth = min(map_depth, tags_per_word);
429 map_depth -= bt->map[i].depth;
433 bt->wake_cnt = BT_WAIT_BATCH;
434 if (bt->wake_cnt > depth / 4)
435 bt->wake_cnt = max(1U, depth / 4);
440 static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
441 int node, bool reserved)
445 bt->bits_per_word = ilog2(BITS_PER_LONG);
448 * Depth can be zero for reserved tags, that's not a failure
452 unsigned int nr, tags_per_word;
454 tags_per_word = (1 << bt->bits_per_word);
457 * If the tag space is small, shrink the number of tags
458 * per word so we spread over a few cachelines, at least.
459 * If less than 4 tags, just forget about it, it's not
460 * going to work optimally anyway.
463 while (tags_per_word * 4 > depth) {
465 tags_per_word = (1 << bt->bits_per_word);
469 nr = ALIGN(depth, tags_per_word) / tags_per_word;
470 bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
478 bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
484 for (i = 0; i < BT_WAIT_QUEUES; i++)
485 init_waitqueue_head(&bt->bs[i].wait);
487 bt_update_count(bt, depth);
491 static void bt_free(struct blk_mq_bitmap_tags *bt)
497 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
500 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
502 if (bt_alloc(&tags->bitmap_tags, depth, node, false))
504 if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
509 bt_free(&tags->bitmap_tags);
514 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
515 unsigned int reserved_tags, int node)
517 struct blk_mq_tags *tags;
519 if (total_tags > BLK_MQ_TAG_MAX) {
520 pr_err("blk-mq: tag depth too large\n");
524 tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
528 tags->nr_tags = total_tags;
529 tags->nr_reserved_tags = reserved_tags;
531 return blk_mq_init_bitmap_tags(tags, node);
534 void blk_mq_free_tags(struct blk_mq_tags *tags)
536 bt_free(&tags->bitmap_tags);
537 bt_free(&tags->breserved_tags);
541 void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
543 unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
545 *tag = prandom_u32() % depth;
548 int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
550 tdepth -= tags->nr_reserved_tags;
551 if (tdepth > tags->nr_tags)
555 * Don't need (or can't) update reserved tags here, they remain
556 * static and should never need resizing.
558 bt_update_count(&tags->bitmap_tags, tdepth);
559 blk_mq_tag_wakeup_all(tags);
563 ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
565 char *orig_page = page;
566 unsigned int free, res;
571 page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
572 "bits_per_word=%u\n",
573 tags->nr_tags, tags->nr_reserved_tags,
574 tags->bitmap_tags.bits_per_word);
576 free = bt_unused_tags(&tags->bitmap_tags);
577 res = bt_unused_tags(&tags->breserved_tags);
579 page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
580 page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
582 return page - orig_page;