]> git.karo-electronics.de Git - karo-tx-linux.git/blob - block/blk-sysfs.c
blk-throttle: make throtl_slice tunable
[karo-tx-linux.git] / block / blk-sysfs.c
1 /*
2  * Functions related to sysfs handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10 #include <linux/blktrace_api.h>
11 #include <linux/blk-mq.h>
12 #include <linux/blk-cgroup.h>
13
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-wbt.h"
17
18 struct queue_sysfs_entry {
19         struct attribute attr;
20         ssize_t (*show)(struct request_queue *, char *);
21         ssize_t (*store)(struct request_queue *, const char *, size_t);
22 };
23
24 static ssize_t
25 queue_var_show(unsigned long var, char *page)
26 {
27         return sprintf(page, "%lu\n", var);
28 }
29
30 static ssize_t
31 queue_var_store(unsigned long *var, const char *page, size_t count)
32 {
33         int err;
34         unsigned long v;
35
36         err = kstrtoul(page, 10, &v);
37         if (err || v > UINT_MAX)
38                 return -EINVAL;
39
40         *var = v;
41
42         return count;
43 }
44
45 static ssize_t queue_var_store64(s64 *var, const char *page)
46 {
47         int err;
48         s64 v;
49
50         err = kstrtos64(page, 10, &v);
51         if (err < 0)
52                 return err;
53
54         *var = v;
55         return 0;
56 }
57
58 static ssize_t queue_requests_show(struct request_queue *q, char *page)
59 {
60         return queue_var_show(q->nr_requests, (page));
61 }
62
63 static ssize_t
64 queue_requests_store(struct request_queue *q, const char *page, size_t count)
65 {
66         unsigned long nr;
67         int ret, err;
68
69         if (!q->request_fn && !q->mq_ops)
70                 return -EINVAL;
71
72         ret = queue_var_store(&nr, page, count);
73         if (ret < 0)
74                 return ret;
75
76         if (nr < BLKDEV_MIN_RQ)
77                 nr = BLKDEV_MIN_RQ;
78
79         if (q->request_fn)
80                 err = blk_update_nr_requests(q, nr);
81         else
82                 err = blk_mq_update_nr_requests(q, nr);
83
84         if (err)
85                 return err;
86
87         return ret;
88 }
89
90 static ssize_t queue_ra_show(struct request_queue *q, char *page)
91 {
92         unsigned long ra_kb = q->backing_dev_info->ra_pages <<
93                                         (PAGE_SHIFT - 10);
94
95         return queue_var_show(ra_kb, (page));
96 }
97
98 static ssize_t
99 queue_ra_store(struct request_queue *q, const char *page, size_t count)
100 {
101         unsigned long ra_kb;
102         ssize_t ret = queue_var_store(&ra_kb, page, count);
103
104         if (ret < 0)
105                 return ret;
106
107         q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10);
108
109         return ret;
110 }
111
112 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
113 {
114         int max_sectors_kb = queue_max_sectors(q) >> 1;
115
116         return queue_var_show(max_sectors_kb, (page));
117 }
118
119 static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
120 {
121         return queue_var_show(queue_max_segments(q), (page));
122 }
123
124 static ssize_t queue_max_discard_segments_show(struct request_queue *q,
125                 char *page)
126 {
127         return queue_var_show(queue_max_discard_segments(q), (page));
128 }
129
130 static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page)
131 {
132         return queue_var_show(q->limits.max_integrity_segments, (page));
133 }
134
135 static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
136 {
137         if (blk_queue_cluster(q))
138                 return queue_var_show(queue_max_segment_size(q), (page));
139
140         return queue_var_show(PAGE_SIZE, (page));
141 }
142
143 static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
144 {
145         return queue_var_show(queue_logical_block_size(q), page);
146 }
147
148 static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
149 {
150         return queue_var_show(queue_physical_block_size(q), page);
151 }
152
153 static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page)
154 {
155         return queue_var_show(q->limits.chunk_sectors, page);
156 }
157
158 static ssize_t queue_io_min_show(struct request_queue *q, char *page)
159 {
160         return queue_var_show(queue_io_min(q), page);
161 }
162
163 static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
164 {
165         return queue_var_show(queue_io_opt(q), page);
166 }
167
168 static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page)
169 {
170         return queue_var_show(q->limits.discard_granularity, page);
171 }
172
173 static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
174 {
175
176         return sprintf(page, "%llu\n",
177                 (unsigned long long)q->limits.max_hw_discard_sectors << 9);
178 }
179
180 static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
181 {
182         return sprintf(page, "%llu\n",
183                        (unsigned long long)q->limits.max_discard_sectors << 9);
184 }
185
186 static ssize_t queue_discard_max_store(struct request_queue *q,
187                                        const char *page, size_t count)
188 {
189         unsigned long max_discard;
190         ssize_t ret = queue_var_store(&max_discard, page, count);
191
192         if (ret < 0)
193                 return ret;
194
195         if (max_discard & (q->limits.discard_granularity - 1))
196                 return -EINVAL;
197
198         max_discard >>= 9;
199         if (max_discard > UINT_MAX)
200                 return -EINVAL;
201
202         if (max_discard > q->limits.max_hw_discard_sectors)
203                 max_discard = q->limits.max_hw_discard_sectors;
204
205         q->limits.max_discard_sectors = max_discard;
206         return ret;
207 }
208
209 static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page)
210 {
211         return queue_var_show(queue_discard_zeroes_data(q), page);
212 }
213
214 static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
215 {
216         return sprintf(page, "%llu\n",
217                 (unsigned long long)q->limits.max_write_same_sectors << 9);
218 }
219
220 static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page)
221 {
222         return sprintf(page, "%llu\n",
223                 (unsigned long long)q->limits.max_write_zeroes_sectors << 9);
224 }
225
226 static ssize_t
227 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
228 {
229         unsigned long max_sectors_kb,
230                 max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
231                         page_kb = 1 << (PAGE_SHIFT - 10);
232         ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
233
234         if (ret < 0)
235                 return ret;
236
237         max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
238                                          q->limits.max_dev_sectors >> 1);
239
240         if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
241                 return -EINVAL;
242
243         spin_lock_irq(q->queue_lock);
244         q->limits.max_sectors = max_sectors_kb << 1;
245         q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
246         spin_unlock_irq(q->queue_lock);
247
248         return ret;
249 }
250
251 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
252 {
253         int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
254
255         return queue_var_show(max_hw_sectors_kb, (page));
256 }
257
258 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg)                            \
259 static ssize_t                                                          \
260 queue_show_##name(struct request_queue *q, char *page)                  \
261 {                                                                       \
262         int bit;                                                        \
263         bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags);             \
264         return queue_var_show(neg ? !bit : bit, page);                  \
265 }                                                                       \
266 static ssize_t                                                          \
267 queue_store_##name(struct request_queue *q, const char *page, size_t count) \
268 {                                                                       \
269         unsigned long val;                                              \
270         ssize_t ret;                                                    \
271         ret = queue_var_store(&val, page, count);                       \
272         if (ret < 0)                                                    \
273                  return ret;                                            \
274         if (neg)                                                        \
275                 val = !val;                                             \
276                                                                         \
277         spin_lock_irq(q->queue_lock);                                   \
278         if (val)                                                        \
279                 queue_flag_set(QUEUE_FLAG_##flag, q);                   \
280         else                                                            \
281                 queue_flag_clear(QUEUE_FLAG_##flag, q);                 \
282         spin_unlock_irq(q->queue_lock);                                 \
283         return ret;                                                     \
284 }
285
286 QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1);
287 QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0);
288 QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0);
289 #undef QUEUE_SYSFS_BIT_FNS
290
291 static ssize_t queue_zoned_show(struct request_queue *q, char *page)
292 {
293         switch (blk_queue_zoned_model(q)) {
294         case BLK_ZONED_HA:
295                 return sprintf(page, "host-aware\n");
296         case BLK_ZONED_HM:
297                 return sprintf(page, "host-managed\n");
298         default:
299                 return sprintf(page, "none\n");
300         }
301 }
302
303 static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
304 {
305         return queue_var_show((blk_queue_nomerges(q) << 1) |
306                                blk_queue_noxmerges(q), page);
307 }
308
309 static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
310                                     size_t count)
311 {
312         unsigned long nm;
313         ssize_t ret = queue_var_store(&nm, page, count);
314
315         if (ret < 0)
316                 return ret;
317
318         spin_lock_irq(q->queue_lock);
319         queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
320         queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
321         if (nm == 2)
322                 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
323         else if (nm)
324                 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
325         spin_unlock_irq(q->queue_lock);
326
327         return ret;
328 }
329
330 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
331 {
332         bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
333         bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
334
335         return queue_var_show(set << force, page);
336 }
337
338 static ssize_t
339 queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
340 {
341         ssize_t ret = -EINVAL;
342 #ifdef CONFIG_SMP
343         unsigned long val;
344
345         ret = queue_var_store(&val, page, count);
346         if (ret < 0)
347                 return ret;
348
349         spin_lock_irq(q->queue_lock);
350         if (val == 2) {
351                 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
352                 queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
353         } else if (val == 1) {
354                 queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
355                 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
356         } else if (val == 0) {
357                 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
358                 queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
359         }
360         spin_unlock_irq(q->queue_lock);
361 #endif
362         return ret;
363 }
364
365 static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
366 {
367         int val;
368
369         if (q->poll_nsec == -1)
370                 val = -1;
371         else
372                 val = q->poll_nsec / 1000;
373
374         return sprintf(page, "%d\n", val);
375 }
376
377 static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
378                                 size_t count)
379 {
380         int err, val;
381
382         if (!q->mq_ops || !q->mq_ops->poll)
383                 return -EINVAL;
384
385         err = kstrtoint(page, 10, &val);
386         if (err < 0)
387                 return err;
388
389         if (val == -1)
390                 q->poll_nsec = -1;
391         else
392                 q->poll_nsec = val * 1000;
393
394         return count;
395 }
396
397 static ssize_t queue_poll_show(struct request_queue *q, char *page)
398 {
399         return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page);
400 }
401
402 static ssize_t queue_poll_store(struct request_queue *q, const char *page,
403                                 size_t count)
404 {
405         unsigned long poll_on;
406         ssize_t ret;
407
408         if (!q->mq_ops || !q->mq_ops->poll)
409                 return -EINVAL;
410
411         ret = queue_var_store(&poll_on, page, count);
412         if (ret < 0)
413                 return ret;
414
415         spin_lock_irq(q->queue_lock);
416         if (poll_on)
417                 queue_flag_set(QUEUE_FLAG_POLL, q);
418         else
419                 queue_flag_clear(QUEUE_FLAG_POLL, q);
420         spin_unlock_irq(q->queue_lock);
421
422         return ret;
423 }
424
425 static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
426 {
427         if (!q->rq_wb)
428                 return -EINVAL;
429
430         return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000));
431 }
432
433 static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
434                                   size_t count)
435 {
436         struct rq_wb *rwb;
437         ssize_t ret;
438         s64 val;
439
440         ret = queue_var_store64(&val, page);
441         if (ret < 0)
442                 return ret;
443         if (val < -1)
444                 return -EINVAL;
445
446         rwb = q->rq_wb;
447         if (!rwb) {
448                 ret = wbt_init(q);
449                 if (ret)
450                         return ret;
451
452                 rwb = q->rq_wb;
453                 if (!rwb)
454                         return -EINVAL;
455         }
456
457         if (val == -1)
458                 rwb->min_lat_nsec = wbt_default_latency_nsec(q);
459         else if (val >= 0)
460                 rwb->min_lat_nsec = val * 1000ULL;
461
462         if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
463                 rwb->enable_state = WBT_STATE_ON_MANUAL;
464
465         wbt_update_limits(rwb);
466         return count;
467 }
468
469 static ssize_t queue_wc_show(struct request_queue *q, char *page)
470 {
471         if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
472                 return sprintf(page, "write back\n");
473
474         return sprintf(page, "write through\n");
475 }
476
477 static ssize_t queue_wc_store(struct request_queue *q, const char *page,
478                               size_t count)
479 {
480         int set = -1;
481
482         if (!strncmp(page, "write back", 10))
483                 set = 1;
484         else if (!strncmp(page, "write through", 13) ||
485                  !strncmp(page, "none", 4))
486                 set = 0;
487
488         if (set == -1)
489                 return -EINVAL;
490
491         spin_lock_irq(q->queue_lock);
492         if (set)
493                 queue_flag_set(QUEUE_FLAG_WC, q);
494         else
495                 queue_flag_clear(QUEUE_FLAG_WC, q);
496         spin_unlock_irq(q->queue_lock);
497
498         return count;
499 }
500
501 static ssize_t queue_dax_show(struct request_queue *q, char *page)
502 {
503         return queue_var_show(blk_queue_dax(q), page);
504 }
505
506 static struct queue_sysfs_entry queue_requests_entry = {
507         .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
508         .show = queue_requests_show,
509         .store = queue_requests_store,
510 };
511
512 static struct queue_sysfs_entry queue_ra_entry = {
513         .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
514         .show = queue_ra_show,
515         .store = queue_ra_store,
516 };
517
518 static struct queue_sysfs_entry queue_max_sectors_entry = {
519         .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
520         .show = queue_max_sectors_show,
521         .store = queue_max_sectors_store,
522 };
523
524 static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
525         .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
526         .show = queue_max_hw_sectors_show,
527 };
528
529 static struct queue_sysfs_entry queue_max_segments_entry = {
530         .attr = {.name = "max_segments", .mode = S_IRUGO },
531         .show = queue_max_segments_show,
532 };
533
534 static struct queue_sysfs_entry queue_max_discard_segments_entry = {
535         .attr = {.name = "max_discard_segments", .mode = S_IRUGO },
536         .show = queue_max_discard_segments_show,
537 };
538
539 static struct queue_sysfs_entry queue_max_integrity_segments_entry = {
540         .attr = {.name = "max_integrity_segments", .mode = S_IRUGO },
541         .show = queue_max_integrity_segments_show,
542 };
543
544 static struct queue_sysfs_entry queue_max_segment_size_entry = {
545         .attr = {.name = "max_segment_size", .mode = S_IRUGO },
546         .show = queue_max_segment_size_show,
547 };
548
549 static struct queue_sysfs_entry queue_iosched_entry = {
550         .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
551         .show = elv_iosched_show,
552         .store = elv_iosched_store,
553 };
554
555 static struct queue_sysfs_entry queue_hw_sector_size_entry = {
556         .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
557         .show = queue_logical_block_size_show,
558 };
559
560 static struct queue_sysfs_entry queue_logical_block_size_entry = {
561         .attr = {.name = "logical_block_size", .mode = S_IRUGO },
562         .show = queue_logical_block_size_show,
563 };
564
565 static struct queue_sysfs_entry queue_physical_block_size_entry = {
566         .attr = {.name = "physical_block_size", .mode = S_IRUGO },
567         .show = queue_physical_block_size_show,
568 };
569
570 static struct queue_sysfs_entry queue_chunk_sectors_entry = {
571         .attr = {.name = "chunk_sectors", .mode = S_IRUGO },
572         .show = queue_chunk_sectors_show,
573 };
574
575 static struct queue_sysfs_entry queue_io_min_entry = {
576         .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
577         .show = queue_io_min_show,
578 };
579
580 static struct queue_sysfs_entry queue_io_opt_entry = {
581         .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
582         .show = queue_io_opt_show,
583 };
584
585 static struct queue_sysfs_entry queue_discard_granularity_entry = {
586         .attr = {.name = "discard_granularity", .mode = S_IRUGO },
587         .show = queue_discard_granularity_show,
588 };
589
590 static struct queue_sysfs_entry queue_discard_max_hw_entry = {
591         .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO },
592         .show = queue_discard_max_hw_show,
593 };
594
595 static struct queue_sysfs_entry queue_discard_max_entry = {
596         .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR },
597         .show = queue_discard_max_show,
598         .store = queue_discard_max_store,
599 };
600
601 static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
602         .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO },
603         .show = queue_discard_zeroes_data_show,
604 };
605
606 static struct queue_sysfs_entry queue_write_same_max_entry = {
607         .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
608         .show = queue_write_same_max_show,
609 };
610
611 static struct queue_sysfs_entry queue_write_zeroes_max_entry = {
612         .attr = {.name = "write_zeroes_max_bytes", .mode = S_IRUGO },
613         .show = queue_write_zeroes_max_show,
614 };
615
616 static struct queue_sysfs_entry queue_nonrot_entry = {
617         .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
618         .show = queue_show_nonrot,
619         .store = queue_store_nonrot,
620 };
621
622 static struct queue_sysfs_entry queue_zoned_entry = {
623         .attr = {.name = "zoned", .mode = S_IRUGO },
624         .show = queue_zoned_show,
625 };
626
627 static struct queue_sysfs_entry queue_nomerges_entry = {
628         .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
629         .show = queue_nomerges_show,
630         .store = queue_nomerges_store,
631 };
632
633 static struct queue_sysfs_entry queue_rq_affinity_entry = {
634         .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
635         .show = queue_rq_affinity_show,
636         .store = queue_rq_affinity_store,
637 };
638
639 static struct queue_sysfs_entry queue_iostats_entry = {
640         .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
641         .show = queue_show_iostats,
642         .store = queue_store_iostats,
643 };
644
645 static struct queue_sysfs_entry queue_random_entry = {
646         .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR },
647         .show = queue_show_random,
648         .store = queue_store_random,
649 };
650
651 static struct queue_sysfs_entry queue_poll_entry = {
652         .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR },
653         .show = queue_poll_show,
654         .store = queue_poll_store,
655 };
656
657 static struct queue_sysfs_entry queue_poll_delay_entry = {
658         .attr = {.name = "io_poll_delay", .mode = S_IRUGO | S_IWUSR },
659         .show = queue_poll_delay_show,
660         .store = queue_poll_delay_store,
661 };
662
663 static struct queue_sysfs_entry queue_wc_entry = {
664         .attr = {.name = "write_cache", .mode = S_IRUGO | S_IWUSR },
665         .show = queue_wc_show,
666         .store = queue_wc_store,
667 };
668
669 static struct queue_sysfs_entry queue_dax_entry = {
670         .attr = {.name = "dax", .mode = S_IRUGO },
671         .show = queue_dax_show,
672 };
673
674 static struct queue_sysfs_entry queue_wb_lat_entry = {
675         .attr = {.name = "wbt_lat_usec", .mode = S_IRUGO | S_IWUSR },
676         .show = queue_wb_lat_show,
677         .store = queue_wb_lat_store,
678 };
679
680 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
681 static struct queue_sysfs_entry throtl_sample_time_entry = {
682         .attr = {.name = "throttle_sample_time", .mode = S_IRUGO | S_IWUSR },
683         .show = blk_throtl_sample_time_show,
684         .store = blk_throtl_sample_time_store,
685 };
686 #endif
687
688 static struct attribute *default_attrs[] = {
689         &queue_requests_entry.attr,
690         &queue_ra_entry.attr,
691         &queue_max_hw_sectors_entry.attr,
692         &queue_max_sectors_entry.attr,
693         &queue_max_segments_entry.attr,
694         &queue_max_discard_segments_entry.attr,
695         &queue_max_integrity_segments_entry.attr,
696         &queue_max_segment_size_entry.attr,
697         &queue_iosched_entry.attr,
698         &queue_hw_sector_size_entry.attr,
699         &queue_logical_block_size_entry.attr,
700         &queue_physical_block_size_entry.attr,
701         &queue_chunk_sectors_entry.attr,
702         &queue_io_min_entry.attr,
703         &queue_io_opt_entry.attr,
704         &queue_discard_granularity_entry.attr,
705         &queue_discard_max_entry.attr,
706         &queue_discard_max_hw_entry.attr,
707         &queue_discard_zeroes_data_entry.attr,
708         &queue_write_same_max_entry.attr,
709         &queue_write_zeroes_max_entry.attr,
710         &queue_nonrot_entry.attr,
711         &queue_zoned_entry.attr,
712         &queue_nomerges_entry.attr,
713         &queue_rq_affinity_entry.attr,
714         &queue_iostats_entry.attr,
715         &queue_random_entry.attr,
716         &queue_poll_entry.attr,
717         &queue_wc_entry.attr,
718         &queue_dax_entry.attr,
719         &queue_wb_lat_entry.attr,
720         &queue_poll_delay_entry.attr,
721 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
722         &throtl_sample_time_entry.attr,
723 #endif
724         NULL,
725 };
726
727 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
728
729 static ssize_t
730 queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
731 {
732         struct queue_sysfs_entry *entry = to_queue(attr);
733         struct request_queue *q =
734                 container_of(kobj, struct request_queue, kobj);
735         ssize_t res;
736
737         if (!entry->show)
738                 return -EIO;
739         mutex_lock(&q->sysfs_lock);
740         if (blk_queue_dying(q)) {
741                 mutex_unlock(&q->sysfs_lock);
742                 return -ENOENT;
743         }
744         res = entry->show(q, page);
745         mutex_unlock(&q->sysfs_lock);
746         return res;
747 }
748
749 static ssize_t
750 queue_attr_store(struct kobject *kobj, struct attribute *attr,
751                     const char *page, size_t length)
752 {
753         struct queue_sysfs_entry *entry = to_queue(attr);
754         struct request_queue *q;
755         ssize_t res;
756
757         if (!entry->store)
758                 return -EIO;
759
760         q = container_of(kobj, struct request_queue, kobj);
761         mutex_lock(&q->sysfs_lock);
762         if (blk_queue_dying(q)) {
763                 mutex_unlock(&q->sysfs_lock);
764                 return -ENOENT;
765         }
766         res = entry->store(q, page, length);
767         mutex_unlock(&q->sysfs_lock);
768         return res;
769 }
770
771 static void blk_free_queue_rcu(struct rcu_head *rcu_head)
772 {
773         struct request_queue *q = container_of(rcu_head, struct request_queue,
774                                                rcu_head);
775         kmem_cache_free(blk_requestq_cachep, q);
776 }
777
778 /**
779  * blk_release_queue: - release a &struct request_queue when it is no longer needed
780  * @kobj:    the kobj belonging to the request queue to be released
781  *
782  * Description:
783  *     blk_release_queue is the pair to blk_init_queue() or
784  *     blk_queue_make_request().  It should be called when a request queue is
785  *     being released; typically when a block device is being de-registered.
786  *     Currently, its primary task it to free all the &struct request
787  *     structures that were allocated to the queue and the queue itself.
788  *
789  * Note:
790  *     The low level driver must have finished any outstanding requests first
791  *     via blk_cleanup_queue().
792  **/
793 static void blk_release_queue(struct kobject *kobj)
794 {
795         struct request_queue *q =
796                 container_of(kobj, struct request_queue, kobj);
797
798         wbt_exit(q);
799         if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
800                 blk_stat_remove_callback(q, q->poll_cb);
801         blk_stat_free_callback(q->poll_cb);
802         bdi_put(q->backing_dev_info);
803         blkcg_exit_queue(q);
804
805         if (q->elevator) {
806                 ioc_clear_queue(q);
807                 elevator_exit(q->elevator);
808         }
809
810         blk_free_queue_stats(q->stats);
811
812         blk_exit_rl(&q->root_rl);
813
814         if (q->queue_tags)
815                 __blk_queue_free_tags(q);
816
817         if (!q->mq_ops) {
818                 if (q->exit_rq_fn)
819                         q->exit_rq_fn(q, q->fq->flush_rq);
820                 blk_free_flush_queue(q->fq);
821         } else {
822                 blk_mq_release(q);
823         }
824
825         blk_trace_shutdown(q);
826
827         if (q->mq_ops)
828                 blk_mq_debugfs_unregister(q);
829
830         if (q->bio_split)
831                 bioset_free(q->bio_split);
832
833         ida_simple_remove(&blk_queue_ida, q->id);
834         call_rcu(&q->rcu_head, blk_free_queue_rcu);
835 }
836
837 static const struct sysfs_ops queue_sysfs_ops = {
838         .show   = queue_attr_show,
839         .store  = queue_attr_store,
840 };
841
842 struct kobj_type blk_queue_ktype = {
843         .sysfs_ops      = &queue_sysfs_ops,
844         .default_attrs  = default_attrs,
845         .release        = blk_release_queue,
846 };
847
848 static void blk_wb_init(struct request_queue *q)
849 {
850 #ifndef CONFIG_BLK_WBT_MQ
851         if (q->mq_ops)
852                 return;
853 #endif
854 #ifndef CONFIG_BLK_WBT_SQ
855         if (q->request_fn)
856                 return;
857 #endif
858
859         /*
860          * If this fails, we don't get throttling
861          */
862         wbt_init(q);
863 }
864
865 int blk_register_queue(struct gendisk *disk)
866 {
867         int ret;
868         struct device *dev = disk_to_dev(disk);
869         struct request_queue *q = disk->queue;
870
871         if (WARN_ON(!q))
872                 return -ENXIO;
873
874         /*
875          * SCSI probing may synchronously create and destroy a lot of
876          * request_queues for non-existent devices.  Shutting down a fully
877          * functional queue takes measureable wallclock time as RCU grace
878          * periods are involved.  To avoid excessive latency in these
879          * cases, a request_queue starts out in a degraded mode which is
880          * faster to shut down and is made fully functional here as
881          * request_queues for non-existent devices never get registered.
882          */
883         if (!blk_queue_init_done(q)) {
884                 queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
885                 percpu_ref_switch_to_percpu(&q->q_usage_counter);
886                 blk_queue_bypass_end(q);
887         }
888
889         ret = blk_trace_init_sysfs(dev);
890         if (ret)
891                 return ret;
892
893         if (q->mq_ops)
894                 blk_mq_register_dev(dev, q);
895
896         /* Prevent changes through sysfs until registration is completed. */
897         mutex_lock(&q->sysfs_lock);
898
899         ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
900         if (ret < 0) {
901                 blk_trace_remove_sysfs(dev);
902                 goto unlock;
903         }
904
905         kobject_uevent(&q->kobj, KOBJ_ADD);
906
907         blk_wb_init(q);
908
909         if (q->request_fn || (q->mq_ops && q->elevator)) {
910                 ret = elv_register_queue(q);
911                 if (ret) {
912                         kobject_uevent(&q->kobj, KOBJ_REMOVE);
913                         kobject_del(&q->kobj);
914                         blk_trace_remove_sysfs(dev);
915                         kobject_put(&dev->kobj);
916                         goto unlock;
917                 }
918         }
919         ret = 0;
920 unlock:
921         mutex_unlock(&q->sysfs_lock);
922         return ret;
923 }
924
925 void blk_unregister_queue(struct gendisk *disk)
926 {
927         struct request_queue *q = disk->queue;
928
929         if (WARN_ON(!q))
930                 return;
931
932         if (q->mq_ops)
933                 blk_mq_unregister_dev(disk_to_dev(disk), q);
934
935         if (q->request_fn || (q->mq_ops && q->elevator))
936                 elv_unregister_queue(q);
937
938         kobject_uevent(&q->kobj, KOBJ_REMOVE);
939         kobject_del(&q->kobj);
940         blk_trace_remove_sysfs(disk_to_dev(disk));
941         kobject_put(&disk_to_dev(disk)->kobj);
942 }