]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/block/deadline-iosched.c
[PATCH] 02/05: update ioscheds to use generic dispatch queue
[mv-sheeva.git] / drivers / block / deadline-iosched.c
1 /*
2  *  linux/drivers/block/deadline-iosched.c
3  *
4  *  Deadline i/o scheduler.
5  *
6  *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
7  */
8 #include <linux/kernel.h>
9 #include <linux/fs.h>
10 #include <linux/blkdev.h>
11 #include <linux/elevator.h>
12 #include <linux/bio.h>
13 #include <linux/config.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/hash.h>
19 #include <linux/rbtree.h>
20
21 /*
22  * See Documentation/block/deadline-iosched.txt
23  */
24 static int read_expire = HZ / 2;  /* max time before a read is submitted. */
25 static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
26 static int writes_starved = 2;    /* max times reads can starve a write */
27 static int fifo_batch = 16;       /* # of sequential requests treated as one
28                                      by the above parameters. For throughput. */
29
30 static const int deadline_hash_shift = 5;
31 #define DL_HASH_BLOCK(sec)      ((sec) >> 3)
32 #define DL_HASH_FN(sec)         (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
33 #define DL_HASH_ENTRIES         (1 << deadline_hash_shift)
34 #define rq_hash_key(rq)         ((rq)->sector + (rq)->nr_sectors)
35 #define list_entry_hash(ptr)    list_entry((ptr), struct deadline_rq, hash)
36 #define ON_HASH(drq)            (drq)->on_hash
37
38 struct deadline_data {
39         /*
40          * run time data
41          */
42
43         /*
44          * requests (deadline_rq s) are present on both sort_list and fifo_list
45          */
46         struct rb_root sort_list[2];    
47         struct list_head fifo_list[2];
48         
49         /*
50          * next in sort order. read, write or both are NULL
51          */
52         struct deadline_rq *next_drq[2];
53         struct list_head *hash;         /* request hash */
54         unsigned int batching;          /* number of sequential requests made */
55         sector_t last_sector;           /* head position */
56         unsigned int starved;           /* times reads have starved writes */
57
58         /*
59          * settings that change how the i/o scheduler behaves
60          */
61         int fifo_expire[2];
62         int fifo_batch;
63         int writes_starved;
64         int front_merges;
65
66         mempool_t *drq_pool;
67 };
68
69 /*
70  * pre-request data.
71  */
72 struct deadline_rq {
73         /*
74          * rbtree index, key is the starting offset
75          */
76         struct rb_node rb_node;
77         sector_t rb_key;
78
79         struct request *request;
80
81         /*
82          * request hash, key is the ending offset (for back merge lookup)
83          */
84         struct list_head hash;
85         char on_hash;
86
87         /*
88          * expire fifo
89          */
90         struct list_head fifo;
91         unsigned long expires;
92 };
93
94 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
95
96 static kmem_cache_t *drq_pool;
97
98 #define RQ_DATA(rq)     ((struct deadline_rq *) (rq)->elevator_private)
99
100 /*
101  * the back merge hash support functions
102  */
103 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
104 {
105         drq->on_hash = 0;
106         list_del_init(&drq->hash);
107 }
108
109 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
110 {
111         if (ON_HASH(drq))
112                 __deadline_del_drq_hash(drq);
113 }
114
115 static void
116 deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
117 {
118         deadline_del_drq_hash(drq);
119
120         if (q->last_merge == drq->request)
121                 q->last_merge = NULL;
122 }
123
124 static inline void
125 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
126 {
127         struct request *rq = drq->request;
128
129         BUG_ON(ON_HASH(drq));
130
131         drq->on_hash = 1;
132         list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
133 }
134
135 /*
136  * move hot entry to front of chain
137  */
138 static inline void
139 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
140 {
141         struct request *rq = drq->request;
142         struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
143
144         if (ON_HASH(drq) && drq->hash.prev != head) {
145                 list_del(&drq->hash);
146                 list_add(&drq->hash, head);
147         }
148 }
149
150 static struct request *
151 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
152 {
153         struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
154         struct list_head *entry, *next = hash_list->next;
155
156         while ((entry = next) != hash_list) {
157                 struct deadline_rq *drq = list_entry_hash(entry);
158                 struct request *__rq = drq->request;
159
160                 next = entry->next;
161                 
162                 BUG_ON(!ON_HASH(drq));
163
164                 if (!rq_mergeable(__rq)) {
165                         __deadline_del_drq_hash(drq);
166                         continue;
167                 }
168
169                 if (rq_hash_key(__rq) == offset)
170                         return __rq;
171         }
172
173         return NULL;
174 }
175
176 /*
177  * rb tree support functions
178  */
179 #define RB_NONE         (2)
180 #define RB_EMPTY(root)  ((root)->rb_node == NULL)
181 #define ON_RB(node)     ((node)->rb_color != RB_NONE)
182 #define RB_CLEAR(node)  ((node)->rb_color = RB_NONE)
183 #define rb_entry_drq(node)      rb_entry((node), struct deadline_rq, rb_node)
184 #define DRQ_RB_ROOT(dd, drq)    (&(dd)->sort_list[rq_data_dir((drq)->request)])
185 #define rq_rb_key(rq)           (rq)->sector
186
187 static struct deadline_rq *
188 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
189 {
190         struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
191         struct rb_node *parent = NULL;
192         struct deadline_rq *__drq;
193
194         while (*p) {
195                 parent = *p;
196                 __drq = rb_entry_drq(parent);
197
198                 if (drq->rb_key < __drq->rb_key)
199                         p = &(*p)->rb_left;
200                 else if (drq->rb_key > __drq->rb_key)
201                         p = &(*p)->rb_right;
202                 else
203                         return __drq;
204         }
205
206         rb_link_node(&drq->rb_node, parent, p);
207         return NULL;
208 }
209
210 static void
211 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
212 {
213         struct deadline_rq *__alias;
214
215         drq->rb_key = rq_rb_key(drq->request);
216
217 retry:
218         __alias = __deadline_add_drq_rb(dd, drq);
219         if (!__alias) {
220                 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
221                 return;
222         }
223
224         deadline_move_request(dd, __alias);
225         goto retry;
226 }
227
228 static inline void
229 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
230 {
231         const int data_dir = rq_data_dir(drq->request);
232
233         if (dd->next_drq[data_dir] == drq) {
234                 struct rb_node *rbnext = rb_next(&drq->rb_node);
235
236                 dd->next_drq[data_dir] = NULL;
237                 if (rbnext)
238                         dd->next_drq[data_dir] = rb_entry_drq(rbnext);
239         }
240
241         BUG_ON(!ON_RB(&drq->rb_node));
242         rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
243         RB_CLEAR(&drq->rb_node);
244 }
245
246 static struct request *
247 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
248 {
249         struct rb_node *n = dd->sort_list[data_dir].rb_node;
250         struct deadline_rq *drq;
251
252         while (n) {
253                 drq = rb_entry_drq(n);
254
255                 if (sector < drq->rb_key)
256                         n = n->rb_left;
257                 else if (sector > drq->rb_key)
258                         n = n->rb_right;
259                 else
260                         return drq->request;
261         }
262
263         return NULL;
264 }
265
266 /*
267  * deadline_find_first_drq finds the first (lowest sector numbered) request
268  * for the specified data_dir. Used to sweep back to the start of the disk
269  * (1-way elevator) after we process the last (highest sector) request.
270  */
271 static struct deadline_rq *
272 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
273 {
274         struct rb_node *n = dd->sort_list[data_dir].rb_node;
275
276         for (;;) {
277                 if (n->rb_left == NULL)
278                         return rb_entry_drq(n);
279                 
280                 n = n->rb_left;
281         }
282 }
283
284 /*
285  * add drq to rbtree and fifo
286  */
287 static void
288 deadline_add_request(struct request_queue *q, struct request *rq)
289 {
290         struct deadline_data *dd = q->elevator->elevator_data;
291         struct deadline_rq *drq = RQ_DATA(rq);
292
293         const int data_dir = rq_data_dir(drq->request);
294
295         deadline_add_drq_rb(dd, drq);
296         /*
297          * set expire time (only used for reads) and add to fifo list
298          */
299         drq->expires = jiffies + dd->fifo_expire[data_dir];
300         list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
301
302         if (rq_mergeable(rq)) {
303                 deadline_add_drq_hash(dd, drq);
304
305                 if (!q->last_merge)
306                         q->last_merge = rq;
307         }
308 }
309
310 /*
311  * remove rq from rbtree, fifo, and hash
312  */
313 static void deadline_remove_request(request_queue_t *q, struct request *rq)
314 {
315         struct deadline_rq *drq = RQ_DATA(rq);
316         struct deadline_data *dd = q->elevator->elevator_data;
317
318         list_del_init(&drq->fifo);
319         deadline_remove_merge_hints(q, drq);
320         deadline_del_drq_rb(dd, drq);
321 }
322
323 static int
324 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
325 {
326         struct deadline_data *dd = q->elevator->elevator_data;
327         struct request *__rq;
328         int ret;
329
330         /*
331          * try last_merge to avoid going to hash
332          */
333         ret = elv_try_last_merge(q, bio);
334         if (ret != ELEVATOR_NO_MERGE) {
335                 __rq = q->last_merge;
336                 goto out_insert;
337         }
338
339         /*
340          * see if the merge hash can satisfy a back merge
341          */
342         __rq = deadline_find_drq_hash(dd, bio->bi_sector);
343         if (__rq) {
344                 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
345
346                 if (elv_rq_merge_ok(__rq, bio)) {
347                         ret = ELEVATOR_BACK_MERGE;
348                         goto out;
349                 }
350         }
351
352         /*
353          * check for front merge
354          */
355         if (dd->front_merges) {
356                 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
357
358                 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
359                 if (__rq) {
360                         BUG_ON(rb_key != rq_rb_key(__rq));
361
362                         if (elv_rq_merge_ok(__rq, bio)) {
363                                 ret = ELEVATOR_FRONT_MERGE;
364                                 goto out;
365                         }
366                 }
367         }
368
369         return ELEVATOR_NO_MERGE;
370 out:
371         q->last_merge = __rq;
372 out_insert:
373         if (ret)
374                 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
375         *req = __rq;
376         return ret;
377 }
378
379 static void deadline_merged_request(request_queue_t *q, struct request *req)
380 {
381         struct deadline_data *dd = q->elevator->elevator_data;
382         struct deadline_rq *drq = RQ_DATA(req);
383
384         /*
385          * hash always needs to be repositioned, key is end sector
386          */
387         deadline_del_drq_hash(drq);
388         deadline_add_drq_hash(dd, drq);
389
390         /*
391          * if the merge was a front merge, we need to reposition request
392          */
393         if (rq_rb_key(req) != drq->rb_key) {
394                 deadline_del_drq_rb(dd, drq);
395                 deadline_add_drq_rb(dd, drq);
396         }
397
398         q->last_merge = req;
399 }
400
401 static void
402 deadline_merged_requests(request_queue_t *q, struct request *req,
403                          struct request *next)
404 {
405         struct deadline_data *dd = q->elevator->elevator_data;
406         struct deadline_rq *drq = RQ_DATA(req);
407         struct deadline_rq *dnext = RQ_DATA(next);
408
409         BUG_ON(!drq);
410         BUG_ON(!dnext);
411
412         /*
413          * reposition drq (this is the merged request) in hash, and in rbtree
414          * in case of a front merge
415          */
416         deadline_del_drq_hash(drq);
417         deadline_add_drq_hash(dd, drq);
418
419         if (rq_rb_key(req) != drq->rb_key) {
420                 deadline_del_drq_rb(dd, drq);
421                 deadline_add_drq_rb(dd, drq);
422         }
423
424         /*
425          * if dnext expires before drq, assign its expire time to drq
426          * and move into dnext position (dnext will be deleted) in fifo
427          */
428         if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
429                 if (time_before(dnext->expires, drq->expires)) {
430                         list_move(&drq->fifo, &dnext->fifo);
431                         drq->expires = dnext->expires;
432                 }
433         }
434
435         /*
436          * kill knowledge of next, this one is a goner
437          */
438         deadline_remove_request(q, next);
439 }
440
441 /*
442  * move request from sort list to dispatch queue.
443  */
444 static inline void
445 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
446 {
447         request_queue_t *q = drq->request->q;
448
449         deadline_remove_request(q, drq->request);
450         elv_dispatch_add_tail(q, drq->request);
451 }
452
453 /*
454  * move an entry to dispatch queue
455  */
456 static void
457 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
458 {
459         const int data_dir = rq_data_dir(drq->request);
460         struct rb_node *rbnext = rb_next(&drq->rb_node);
461
462         dd->next_drq[READ] = NULL;
463         dd->next_drq[WRITE] = NULL;
464
465         if (rbnext)
466                 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
467         
468         dd->last_sector = drq->request->sector + drq->request->nr_sectors;
469
470         /*
471          * take it off the sort and fifo list, move
472          * to dispatch queue
473          */
474         deadline_move_to_dispatch(dd, drq);
475 }
476
477 #define list_entry_fifo(ptr)    list_entry((ptr), struct deadline_rq, fifo)
478
479 /*
480  * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
481  * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
482  */
483 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
484 {
485         struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
486
487         /*
488          * drq is expired!
489          */
490         if (time_after(jiffies, drq->expires))
491                 return 1;
492
493         return 0;
494 }
495
496 /*
497  * deadline_dispatch_requests selects the best request according to
498  * read/write expire, fifo_batch, etc
499  */
500 static int deadline_dispatch_requests(request_queue_t *q, int force)
501 {
502         struct deadline_data *dd = q->elevator->elevator_data;
503         const int reads = !list_empty(&dd->fifo_list[READ]);
504         const int writes = !list_empty(&dd->fifo_list[WRITE]);
505         struct deadline_rq *drq;
506         int data_dir;
507
508         /*
509          * batches are currently reads XOR writes
510          */
511         if (dd->next_drq[WRITE])
512                 drq = dd->next_drq[WRITE];
513         else
514                 drq = dd->next_drq[READ];
515
516         if (drq) {
517                 /* we have a "next request" */
518                 
519                 if (dd->last_sector != drq->request->sector)
520                         /* end the batch on a non sequential request */
521                         dd->batching += dd->fifo_batch;
522                 
523                 if (dd->batching < dd->fifo_batch)
524                         /* we are still entitled to batch */
525                         goto dispatch_request;
526         }
527
528         /*
529          * at this point we are not running a batch. select the appropriate
530          * data direction (read / write)
531          */
532
533         if (reads) {
534                 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
535
536                 if (writes && (dd->starved++ >= dd->writes_starved))
537                         goto dispatch_writes;
538
539                 data_dir = READ;
540
541                 goto dispatch_find_request;
542         }
543
544         /*
545          * there are either no reads or writes have been starved
546          */
547
548         if (writes) {
549 dispatch_writes:
550                 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
551
552                 dd->starved = 0;
553
554                 data_dir = WRITE;
555
556                 goto dispatch_find_request;
557         }
558
559         return 0;
560
561 dispatch_find_request:
562         /*
563          * we are not running a batch, find best request for selected data_dir
564          */
565         if (deadline_check_fifo(dd, data_dir)) {
566                 /* An expired request exists - satisfy it */
567                 dd->batching = 0;
568                 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
569                 
570         } else if (dd->next_drq[data_dir]) {
571                 /*
572                  * The last req was the same dir and we have a next request in
573                  * sort order. No expired requests so continue on from here.
574                  */
575                 drq = dd->next_drq[data_dir];
576         } else {
577                 /*
578                  * The last req was the other direction or we have run out of
579                  * higher-sectored requests. Go back to the lowest sectored
580                  * request (1 way elevator) and start a new batch.
581                  */
582                 dd->batching = 0;
583                 drq = deadline_find_first_drq(dd, data_dir);
584         }
585
586 dispatch_request:
587         /*
588          * drq is the selected appropriate request.
589          */
590         dd->batching++;
591         deadline_move_request(dd, drq);
592
593         return 1;
594 }
595
596 static int deadline_queue_empty(request_queue_t *q)
597 {
598         struct deadline_data *dd = q->elevator->elevator_data;
599
600         return list_empty(&dd->fifo_list[WRITE])
601                 && list_empty(&dd->fifo_list[READ]);
602 }
603
604 static struct request *
605 deadline_former_request(request_queue_t *q, struct request *rq)
606 {
607         struct deadline_rq *drq = RQ_DATA(rq);
608         struct rb_node *rbprev = rb_prev(&drq->rb_node);
609
610         if (rbprev)
611                 return rb_entry_drq(rbprev)->request;
612
613         return NULL;
614 }
615
616 static struct request *
617 deadline_latter_request(request_queue_t *q, struct request *rq)
618 {
619         struct deadline_rq *drq = RQ_DATA(rq);
620         struct rb_node *rbnext = rb_next(&drq->rb_node);
621
622         if (rbnext)
623                 return rb_entry_drq(rbnext)->request;
624
625         return NULL;
626 }
627
628 static void deadline_exit_queue(elevator_t *e)
629 {
630         struct deadline_data *dd = e->elevator_data;
631
632         BUG_ON(!list_empty(&dd->fifo_list[READ]));
633         BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
634
635         mempool_destroy(dd->drq_pool);
636         kfree(dd->hash);
637         kfree(dd);
638 }
639
640 /*
641  * initialize elevator private data (deadline_data), and alloc a drq for
642  * each request on the free lists
643  */
644 static int deadline_init_queue(request_queue_t *q, elevator_t *e)
645 {
646         struct deadline_data *dd;
647         int i;
648
649         if (!drq_pool)
650                 return -ENOMEM;
651
652         dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
653         if (!dd)
654                 return -ENOMEM;
655         memset(dd, 0, sizeof(*dd));
656
657         dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
658                                 GFP_KERNEL, q->node);
659         if (!dd->hash) {
660                 kfree(dd);
661                 return -ENOMEM;
662         }
663
664         dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
665                                         mempool_free_slab, drq_pool, q->node);
666         if (!dd->drq_pool) {
667                 kfree(dd->hash);
668                 kfree(dd);
669                 return -ENOMEM;
670         }
671
672         for (i = 0; i < DL_HASH_ENTRIES; i++)
673                 INIT_LIST_HEAD(&dd->hash[i]);
674
675         INIT_LIST_HEAD(&dd->fifo_list[READ]);
676         INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
677         dd->sort_list[READ] = RB_ROOT;
678         dd->sort_list[WRITE] = RB_ROOT;
679         dd->fifo_expire[READ] = read_expire;
680         dd->fifo_expire[WRITE] = write_expire;
681         dd->writes_starved = writes_starved;
682         dd->front_merges = 1;
683         dd->fifo_batch = fifo_batch;
684         e->elevator_data = dd;
685         return 0;
686 }
687
688 static void deadline_put_request(request_queue_t *q, struct request *rq)
689 {
690         struct deadline_data *dd = q->elevator->elevator_data;
691         struct deadline_rq *drq = RQ_DATA(rq);
692
693         mempool_free(drq, dd->drq_pool);
694         rq->elevator_private = NULL;
695 }
696
697 static int
698 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
699                      int gfp_mask)
700 {
701         struct deadline_data *dd = q->elevator->elevator_data;
702         struct deadline_rq *drq;
703
704         drq = mempool_alloc(dd->drq_pool, gfp_mask);
705         if (drq) {
706                 memset(drq, 0, sizeof(*drq));
707                 RB_CLEAR(&drq->rb_node);
708                 drq->request = rq;
709
710                 INIT_LIST_HEAD(&drq->hash);
711                 drq->on_hash = 0;
712
713                 INIT_LIST_HEAD(&drq->fifo);
714
715                 rq->elevator_private = drq;
716                 return 0;
717         }
718
719         return 1;
720 }
721
722 /*
723  * sysfs parts below
724  */
725 struct deadline_fs_entry {
726         struct attribute attr;
727         ssize_t (*show)(struct deadline_data *, char *);
728         ssize_t (*store)(struct deadline_data *, const char *, size_t);
729 };
730
731 static ssize_t
732 deadline_var_show(int var, char *page)
733 {
734         return sprintf(page, "%d\n", var);
735 }
736
737 static ssize_t
738 deadline_var_store(int *var, const char *page, size_t count)
739 {
740         char *p = (char *) page;
741
742         *var = simple_strtol(p, &p, 10);
743         return count;
744 }
745
746 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV)                            \
747 static ssize_t __FUNC(struct deadline_data *dd, char *page)             \
748 {                                                                       \
749         int __data = __VAR;                                     \
750         if (__CONV)                                                     \
751                 __data = jiffies_to_msecs(__data);                      \
752         return deadline_var_show(__data, (page));                       \
753 }
754 SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
755 SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
756 SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
757 SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
758 SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
759 #undef SHOW_FUNCTION
760
761 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)                 \
762 static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \
763 {                                                                       \
764         int __data;                                                     \
765         int ret = deadline_var_store(&__data, (page), count);           \
766         if (__data < (MIN))                                             \
767                 __data = (MIN);                                         \
768         else if (__data > (MAX))                                        \
769                 __data = (MAX);                                         \
770         if (__CONV)                                                     \
771                 *(__PTR) = msecs_to_jiffies(__data);                    \
772         else                                                            \
773                 *(__PTR) = __data;                                      \
774         return ret;                                                     \
775 }
776 STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
777 STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
778 STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
779 STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
780 STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
781 #undef STORE_FUNCTION
782
783 static struct deadline_fs_entry deadline_readexpire_entry = {
784         .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
785         .show = deadline_readexpire_show,
786         .store = deadline_readexpire_store,
787 };
788 static struct deadline_fs_entry deadline_writeexpire_entry = {
789         .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
790         .show = deadline_writeexpire_show,
791         .store = deadline_writeexpire_store,
792 };
793 static struct deadline_fs_entry deadline_writesstarved_entry = {
794         .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
795         .show = deadline_writesstarved_show,
796         .store = deadline_writesstarved_store,
797 };
798 static struct deadline_fs_entry deadline_frontmerges_entry = {
799         .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
800         .show = deadline_frontmerges_show,
801         .store = deadline_frontmerges_store,
802 };
803 static struct deadline_fs_entry deadline_fifobatch_entry = {
804         .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
805         .show = deadline_fifobatch_show,
806         .store = deadline_fifobatch_store,
807 };
808
809 static struct attribute *default_attrs[] = {
810         &deadline_readexpire_entry.attr,
811         &deadline_writeexpire_entry.attr,
812         &deadline_writesstarved_entry.attr,
813         &deadline_frontmerges_entry.attr,
814         &deadline_fifobatch_entry.attr,
815         NULL,
816 };
817
818 #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
819
820 static ssize_t
821 deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
822 {
823         elevator_t *e = container_of(kobj, elevator_t, kobj);
824         struct deadline_fs_entry *entry = to_deadline(attr);
825
826         if (!entry->show)
827                 return -EIO;
828
829         return entry->show(e->elevator_data, page);
830 }
831
832 static ssize_t
833 deadline_attr_store(struct kobject *kobj, struct attribute *attr,
834                     const char *page, size_t length)
835 {
836         elevator_t *e = container_of(kobj, elevator_t, kobj);
837         struct deadline_fs_entry *entry = to_deadline(attr);
838
839         if (!entry->store)
840                 return -EIO;
841
842         return entry->store(e->elevator_data, page, length);
843 }
844
845 static struct sysfs_ops deadline_sysfs_ops = {
846         .show   = deadline_attr_show,
847         .store  = deadline_attr_store,
848 };
849
850 static struct kobj_type deadline_ktype = {
851         .sysfs_ops      = &deadline_sysfs_ops,
852         .default_attrs  = default_attrs,
853 };
854
855 static struct elevator_type iosched_deadline = {
856         .ops = {
857                 .elevator_merge_fn =            deadline_merge,
858                 .elevator_merged_fn =           deadline_merged_request,
859                 .elevator_merge_req_fn =        deadline_merged_requests,
860                 .elevator_dispatch_fn =         deadline_dispatch_requests,
861                 .elevator_add_req_fn =          deadline_add_request,
862                 .elevator_queue_empty_fn =      deadline_queue_empty,
863                 .elevator_former_req_fn =       deadline_former_request,
864                 .elevator_latter_req_fn =       deadline_latter_request,
865                 .elevator_set_req_fn =          deadline_set_request,
866                 .elevator_put_req_fn =          deadline_put_request,
867                 .elevator_init_fn =             deadline_init_queue,
868                 .elevator_exit_fn =             deadline_exit_queue,
869         },
870
871         .elevator_ktype = &deadline_ktype,
872         .elevator_name = "deadline",
873         .elevator_owner = THIS_MODULE,
874 };
875
876 static int __init deadline_init(void)
877 {
878         int ret;
879
880         drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
881                                      0, 0, NULL, NULL);
882
883         if (!drq_pool)
884                 return -ENOMEM;
885
886         ret = elv_register(&iosched_deadline);
887         if (ret)
888                 kmem_cache_destroy(drq_pool);
889
890         return ret;
891 }
892
893 static void __exit deadline_exit(void)
894 {
895         kmem_cache_destroy(drq_pool);
896         elv_unregister(&iosched_deadline);
897 }
898
899 module_init(deadline_init);
900 module_exit(deadline_exit);
901
902 MODULE_AUTHOR("Jens Axboe");
903 MODULE_LICENSE("GPL");
904 MODULE_DESCRIPTION("deadline IO scheduler");