return atomic_read(&hctx->nr_active) < depth;
}
-#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
-
-static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
- struct blk_mq_tags *tags)
+static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
{
if (!hctx_may_queue(hctx, bt))
return -1;
- return __sbitmap_queue_get(bt, BT_ALLOC_RR(tags));
+ return __sbitmap_queue_get(bt);
}
static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
DEFINE_WAIT(wait);
int tag;
- tag = __bt_get(hctx, bt, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
return tag;
do {
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __bt_get(hctx, bt, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
break;
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __bt_get(hctx, bt, tags);
+ tag = __bt_get(hctx, bt);
if (tag != -1)
break;
const int real_tag = tag - tags->nr_reserved_tags;
BUG_ON(real_tag >= tags->nr_tags);
- sbitmap_queue_clear(&tags->bitmap_tags, real_tag,
- BT_ALLOC_RR(tags), ctx->cpu);
+ sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
} else {
BUG_ON(tag >= tags->nr_reserved_tags);
- sbitmap_queue_clear(&tags->breserved_tags, tag,
- BT_ALLOC_RR(tags), ctx->cpu);
+ sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
}
}
return bt->sb.depth - sbitmap_weight(&bt->sb);
}
-static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, int node)
+static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
+ bool round_robin, int node)
{
- return sbitmap_queue_init_node(bt, depth, -1, GFP_KERNEL, node);
+ return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
+ node);
}
static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
int node, int alloc_policy)
{
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
+ bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
- tags->alloc_policy = alloc_policy;
-
- if (bt_alloc(&tags->bitmap_tags, depth, node))
+ if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
goto free_tags;
- if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node))
+ if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
+ node))
goto free_bitmap_tags;
return tags;
struct request **rqs;
struct list_head page_list;
- int alloc_policy;
cpumask_var_t cpumask;
};
* @ws: Wait queues.
*/
struct sbq_wait_state *ws;
+
+ /**
+ * @round_robin: Allocate bits in strict round-robin order.
+ */
+ bool round_robin;
};
/**
* @sbq: Bitmap queue to initialize.
* @depth: See sbitmap_init_node().
* @shift: See sbitmap_init_node().
+ * @round_robin: See sbitmap_get().
* @flags: Allocation flags.
* @node: Memory node to allocate on.
*
* Return: Zero on success or negative errno on failure.
*/
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
- int shift, gfp_t flags, int node);
+ int shift, bool round_robin, gfp_t flags, int node);
/**
* sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
* __sbitmap_queue_get() - Try to allocate a free bit from a &struct
* sbitmap_queue with preemption already disabled.
* @sbq: Bitmap queue to allocate from.
- * @round_robin: See sbitmap_get().
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin);
+int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
* sbitmap_queue.
* @sbq: Bitmap queue to allocate from.
- * @round_robin: See sbitmap_get().
* @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
* sbitmap_queue_clear()).
*
* Return: Non-negative allocated bit number if successful, -1 otherwise.
*/
-static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin,
+static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
unsigned int *cpu)
{
int nr;
*cpu = get_cpu();
- nr = __sbitmap_queue_get(sbq, round_robin);
+ nr = __sbitmap_queue_get(sbq);
put_cpu();
return nr;
}
* &struct sbitmap_queue.
* @sbq: Bitmap to free from.
* @nr: Bit number to free.
- * @round_robin: See sbitmap_get().
* @cpu: CPU the bit was allocated on.
*/
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
- bool round_robin, unsigned int cpu);
+ unsigned int cpu);
static inline int sbq_index_inc(int index)
{
}
int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
- int shift, gfp_t flags, int node)
+ int shift, bool round_robin, gfp_t flags, int node)
{
int ret;
int i;
init_waitqueue_head(&sbq->ws[i].wait);
atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
}
+
+ sbq->round_robin = round_robin;
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
-int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin)
+int __sbitmap_queue_get(struct sbitmap_queue *sbq)
{
unsigned int hint;
int nr;
hint = this_cpu_read(*sbq->alloc_hint);
- nr = sbitmap_get(&sbq->sb, hint, round_robin);
+ nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
if (nr == -1) {
/* If the map is full, a hint won't do us much good. */
this_cpu_write(*sbq->alloc_hint, 0);
- } else if (nr == hint || unlikely(round_robin)) {
+ } else if (nr == hint || unlikely(sbq->round_robin)) {
/* Only update the hint if we used it. */
hint = nr + 1;
if (hint >= sbq->sb.depth - 1)
}
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
- bool round_robin, unsigned int cpu)
+ unsigned int cpu)
{
sbitmap_clear_bit(&sbq->sb, nr);
sbq_wake_up(sbq);
- if (likely(!round_robin))
+ if (likely(!sbq->round_robin))
*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_clear);