4 #include <linux/blkdev.h>
5 #include <linux/sbitmap.h>
6 #include <linux/srcu.h>
9 struct blk_flush_queue;
11 struct blk_mq_hw_ctx {
14 struct list_head dispatch;
15 unsigned long state; /* BLK_MQ_S_* flags */
16 } ____cacheline_aligned_in_smp;
18 struct work_struct run_work;
19 cpumask_var_t cpumask;
23 unsigned long flags; /* BLK_MQ_F_* flags */
26 struct request_queue *queue;
27 struct blk_flush_queue *fq;
31 struct sbitmap ctx_map;
33 struct blk_mq_ctx **ctxs;
36 wait_queue_t dispatch_wait;
39 struct blk_mq_tags *tags;
40 struct blk_mq_tags *sched_tags;
42 struct srcu_struct queue_rq_srcu;
46 #define BLK_MQ_MAX_DISPATCH_ORDER 7
47 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
49 unsigned int numa_node;
50 unsigned int queue_num;
54 struct delayed_work delayed_run_work;
55 struct delayed_work delay_work;
57 struct hlist_node cpuhp_dead;
60 unsigned long poll_considered;
61 unsigned long poll_invoked;
62 unsigned long poll_success;
65 struct blk_mq_tag_set {
67 const struct blk_mq_ops *ops;
68 unsigned int nr_hw_queues;
69 unsigned int queue_depth; /* max hw supported */
70 unsigned int reserved_tags;
71 unsigned int cmd_size; /* per-request extra data */
74 unsigned int flags; /* BLK_MQ_F_* */
77 struct blk_mq_tags **tags;
79 struct mutex tag_list_lock;
80 struct list_head tag_list;
83 struct blk_mq_queue_data {
88 typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
89 typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
90 typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
91 typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
92 typedef int (init_request_fn)(void *, struct request *, unsigned int,
93 unsigned int, unsigned int);
94 typedef void (exit_request_fn)(void *, struct request *, unsigned int,
96 typedef int (reinit_request_fn)(void *, struct request *);
98 typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
100 typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
101 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
102 typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
109 queue_rq_fn *queue_rq;
112 * Called on request timeout
117 * Called to poll for completion of a specific tag.
121 softirq_done_fn *complete;
124 * Called when the block layer side of a hardware queue has been
125 * set up, allowing the driver to allocate/init matching structures.
126 * Ditto for exit/teardown.
128 init_hctx_fn *init_hctx;
129 exit_hctx_fn *exit_hctx;
132 * Called for every command allocated by the block layer to allow
133 * the driver to set up driver specific data.
135 * Tag greater than or equal to queue_depth is for setting up
138 * Ditto for exit/teardown.
140 init_request_fn *init_request;
141 exit_request_fn *exit_request;
142 reinit_request_fn *reinit_request;
144 map_queues_fn *map_queues;
148 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
149 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
150 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
152 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
153 BLK_MQ_F_TAG_SHARED = 1 << 1,
154 BLK_MQ_F_SG_MERGE = 1 << 2,
155 BLK_MQ_F_BLOCKING = 1 << 5,
156 BLK_MQ_F_NO_SCHED = 1 << 6,
157 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
158 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
160 BLK_MQ_S_STOPPED = 0,
161 BLK_MQ_S_TAG_ACTIVE = 1,
162 BLK_MQ_S_SCHED_RESTART = 2,
163 BLK_MQ_S_TAG_WAITING = 3,
165 BLK_MQ_MAX_DEPTH = 10240,
167 BLK_MQ_CPU_WORK_BATCH = 8,
169 #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
170 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
171 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
172 #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
173 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
174 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
176 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
177 struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
178 struct request_queue *q);
179 int blk_mq_register_dev(struct device *, struct request_queue *);
180 void blk_mq_unregister_dev(struct device *, struct request_queue *);
182 int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
183 void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
185 void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
187 void blk_mq_free_request(struct request *rq);
188 bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
191 BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
192 BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
193 BLK_MQ_REQ_INTERNAL = (1 << 2), /* allocate internal/sched tag */
196 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
198 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int op,
199 unsigned int flags, unsigned int hctx_idx);
200 struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
203 BLK_MQ_UNIQUE_TAG_BITS = 16,
204 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
207 u32 blk_mq_unique_tag(struct request *rq);
209 static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
211 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
214 static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
216 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
220 int blk_mq_request_started(struct request *rq);
221 void blk_mq_start_request(struct request *rq);
222 void blk_mq_end_request(struct request *rq, int error);
223 void __blk_mq_end_request(struct request *rq, int error);
225 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
226 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
227 bool kick_requeue_list);
228 void blk_mq_kick_requeue_list(struct request_queue *q);
229 void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
230 void blk_mq_abort_requeue_list(struct request_queue *q);
231 void blk_mq_complete_request(struct request *rq, int error);
233 bool blk_mq_queue_stopped(struct request_queue *q);
234 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
235 void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
236 void blk_mq_stop_hw_queues(struct request_queue *q);
237 void blk_mq_start_hw_queues(struct request_queue *q);
238 void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
239 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
240 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
241 void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
242 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
243 void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
244 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
245 busy_tag_iter_fn *fn, void *priv);
246 void blk_mq_freeze_queue(struct request_queue *q);
247 void blk_mq_unfreeze_queue(struct request_queue *q);
248 void blk_freeze_queue_start(struct request_queue *q);
249 void blk_mq_freeze_queue_wait(struct request_queue *q);
250 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
251 unsigned long timeout);
252 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set);
254 int blk_mq_map_queues(struct blk_mq_tag_set *set);
255 void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
258 * Driver command data is immediately after the request. So subtract request
259 * size to get back to the original request, add request size to get the PDU.
261 static inline struct request *blk_mq_rq_from_pdu(void *pdu)
263 return pdu - sizeof(struct request);
265 static inline void *blk_mq_rq_to_pdu(struct request *rq)
270 #define queue_for_each_hw_ctx(q, hctx, i) \
271 for ((i) = 0; (i) < (q)->nr_hw_queues && \
272 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
274 #define hctx_for_each_ctx(hctx, ctx, i) \
275 for ((i) = 0; (i) < (hctx)->nr_ctx && \
276 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)