]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/mtd/mtd_blkdevs.c
mtd: blktrans: remove mtd_blkcore_priv, switch to per device queue and thread
[mv-sheeva.git] / drivers / mtd / mtd_blkdevs.c
1 /*
2  * (C) 2003 David Woodhouse <dwmw2@infradead.org>
3  *
4  * Interface to Linux 2.5 block layer for MTD 'translation layers'.
5  *
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/fs.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/spinlock.h>
18 #include <linux/hdreg.h>
19 #include <linux/init.h>
20 #include <linux/mutex.h>
21 #include <linux/kthread.h>
22 #include <asm/uaccess.h>
23
24 #include "mtdcore.h"
25
26 static LIST_HEAD(blktrans_majors);
27
28
29 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
30                                struct mtd_blktrans_dev *dev,
31                                struct request *req)
32 {
33         unsigned long block, nsect;
34         char *buf;
35
36         block = blk_rq_pos(req) << 9 >> tr->blkshift;
37         nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
38
39         buf = req->buffer;
40
41         if (!blk_fs_request(req))
42                 return -EIO;
43
44         if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
45             get_capacity(req->rq_disk))
46                 return -EIO;
47
48         if (blk_discard_rq(req))
49                 return tr->discard(dev, block, nsect);
50
51         switch(rq_data_dir(req)) {
52         case READ:
53                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
54                         if (tr->readsect(dev, block, buf))
55                                 return -EIO;
56                 rq_flush_dcache_pages(req);
57                 return 0;
58         case WRITE:
59                 if (!tr->writesect)
60                         return -EIO;
61
62                 rq_flush_dcache_pages(req);
63                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
64                         if (tr->writesect(dev, block, buf))
65                                 return -EIO;
66                 return 0;
67         default:
68                 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
69                 return -EIO;
70         }
71 }
72
73 static int mtd_blktrans_thread(void *arg)
74 {
75         struct mtd_blktrans_dev *dev = arg;
76         struct request_queue *rq = dev->rq;
77         struct request *req = NULL;
78
79         spin_lock_irq(rq->queue_lock);
80
81         while (!kthread_should_stop()) {
82                 int res;
83
84                 if (!req && !(req = blk_fetch_request(rq))) {
85                         set_current_state(TASK_INTERRUPTIBLE);
86                         spin_unlock_irq(rq->queue_lock);
87                         schedule();
88                         spin_lock_irq(rq->queue_lock);
89                         continue;
90                 }
91
92                 spin_unlock_irq(rq->queue_lock);
93
94                 mutex_lock(&dev->lock);
95                 res = do_blktrans_request(dev->tr, dev, req);
96                 mutex_unlock(&dev->lock);
97
98                 spin_lock_irq(rq->queue_lock);
99
100                 if (!__blk_end_request_cur(req, res))
101                         req = NULL;
102         }
103
104         if (req)
105                 __blk_end_request_all(req, -EIO);
106
107         spin_unlock_irq(rq->queue_lock);
108
109         return 0;
110 }
111
112 static void mtd_blktrans_request(struct request_queue *rq)
113 {
114         struct mtd_blktrans_dev *dev = rq->queuedata;
115         wake_up_process(dev->thread);
116 }
117
118
119 static int blktrans_open(struct block_device *bdev, fmode_t mode)
120 {
121         struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
122         struct mtd_blktrans_ops *tr = dev->tr;
123         int ret = -ENODEV;
124
125         if (!get_mtd_device(NULL, dev->mtd->index))
126                 goto out;
127
128         if (!try_module_get(tr->owner))
129                 goto out_tr;
130
131         /* FIXME: Locking. A hot pluggable device can go away
132            (del_mtd_device can be called for it) without its module
133            being unloaded. */
134         dev->mtd->usecount++;
135
136         ret = 0;
137         if (tr->open && (ret = tr->open(dev))) {
138                 dev->mtd->usecount--;
139                 put_mtd_device(dev->mtd);
140         out_tr:
141                 module_put(tr->owner);
142         }
143  out:
144         return ret;
145 }
146
147 static int blktrans_release(struct gendisk *disk, fmode_t mode)
148 {
149         struct mtd_blktrans_dev *dev = disk->private_data;
150         struct mtd_blktrans_ops *tr = dev->tr;
151         int ret = 0;
152
153         if (tr->release)
154                 ret = tr->release(dev);
155
156         if (!ret) {
157                 dev->mtd->usecount--;
158                 put_mtd_device(dev->mtd);
159                 module_put(tr->owner);
160         }
161
162         return ret;
163 }
164
165 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
166 {
167         struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
168
169         if (dev->tr->getgeo)
170                 return dev->tr->getgeo(dev, geo);
171         return -ENOTTY;
172 }
173
174 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
175                               unsigned int cmd, unsigned long arg)
176 {
177         struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
178         struct mtd_blktrans_ops *tr = dev->tr;
179
180         switch (cmd) {
181         case BLKFLSBUF:
182                 if (tr->flush)
183                         return tr->flush(dev);
184                 /* The core code did the work, we had nothing to do. */
185                 return 0;
186         default:
187                 return -ENOTTY;
188         }
189 }
190
191 static const struct block_device_operations mtd_blktrans_ops = {
192         .owner          = THIS_MODULE,
193         .open           = blktrans_open,
194         .release        = blktrans_release,
195         .locked_ioctl   = blktrans_ioctl,
196         .getgeo         = blktrans_getgeo,
197 };
198
199 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
200 {
201         struct mtd_blktrans_ops *tr = new->tr;
202         struct mtd_blktrans_dev *d;
203         int last_devnum = -1;
204         struct gendisk *gd;
205         int ret;
206
207         if (mutex_trylock(&mtd_table_mutex)) {
208                 mutex_unlock(&mtd_table_mutex);
209                 BUG();
210         }
211
212         list_for_each_entry(d, &tr->devs, list) {
213                 if (new->devnum == -1) {
214                         /* Use first free number */
215                         if (d->devnum != last_devnum+1) {
216                                 /* Found a free devnum. Plug it in here */
217                                 new->devnum = last_devnum+1;
218                                 list_add_tail(&new->list, &d->list);
219                                 goto added;
220                         }
221                 } else if (d->devnum == new->devnum) {
222                         /* Required number taken */
223                         return -EBUSY;
224                 } else if (d->devnum > new->devnum) {
225                         /* Required number was free */
226                         list_add_tail(&new->list, &d->list);
227                         goto added;
228                 }
229                 last_devnum = d->devnum;
230         }
231
232         ret = -EBUSY;
233         if (new->devnum == -1)
234                 new->devnum = last_devnum+1;
235
236         /* Check that the device and any partitions will get valid
237          * minor numbers and that the disk naming code below can cope
238          * with this number. */
239         if (new->devnum > (MINORMASK >> tr->part_bits) ||
240             (tr->part_bits && new->devnum >= 27 * 26))
241                 goto error1;
242
243         list_add_tail(&new->list, &tr->devs);
244  added:
245         mutex_init(&new->lock);
246         if (!tr->writesect)
247                 new->readonly = 1;
248
249
250         /* Create gendisk */
251         ret = -ENOMEM;
252         gd = alloc_disk(1 << tr->part_bits);
253
254         if (!gd)
255                 goto error2;
256
257         new->disk = gd;
258         gd->private_data = new;
259         gd->major = tr->major;
260         gd->first_minor = (new->devnum) << tr->part_bits;
261         gd->fops = &mtd_blktrans_ops;
262
263         if (tr->part_bits)
264                 if (new->devnum < 26)
265                         snprintf(gd->disk_name, sizeof(gd->disk_name),
266                                  "%s%c", tr->name, 'a' + new->devnum);
267                 else
268                         snprintf(gd->disk_name, sizeof(gd->disk_name),
269                                  "%s%c%c", tr->name,
270                                  'a' - 1 + new->devnum / 26,
271                                  'a' + new->devnum % 26);
272         else
273                 snprintf(gd->disk_name, sizeof(gd->disk_name),
274                          "%s%d", tr->name, new->devnum);
275
276         set_capacity(gd, (new->size * tr->blksize) >> 9);
277
278
279         /* Create the request queue */
280         spin_lock_init(&new->queue_lock);
281         new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
282
283         if (!new->rq)
284                 goto error3;
285
286         new->rq->queuedata = new;
287         blk_queue_logical_block_size(new->rq, tr->blksize);
288
289         if (tr->discard)
290                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
291                                         new->rq);
292
293         gd->queue = new->rq;
294
295         /* Create processing thread */
296         /* TODO: workqueue ? */
297         new->thread = kthread_run(mtd_blktrans_thread, new,
298                         "%s%d", tr->name, new->mtd->index);
299         if (IS_ERR(new->thread)) {
300                 ret = PTR_ERR(new->thread);
301                 goto error4;
302         }
303         gd->driverfs_dev = &new->mtd->dev;
304
305         if (new->readonly)
306                 set_disk_ro(gd, 1);
307
308         add_disk(gd);
309         return 0;
310 error4:
311         blk_cleanup_queue(new->rq);
312 error3:
313         put_disk(new->disk);
314 error2:
315         list_del(&new->list);
316 error1:
317         kfree(new);
318         return ret;
319 }
320
321 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
322 {
323         if (mutex_trylock(&mtd_table_mutex)) {
324                 mutex_unlock(&mtd_table_mutex);
325                 BUG();
326         }
327
328         list_del(&old->list);
329
330         /* stop new requests to arrive */
331         del_gendisk(old->disk);
332
333         /* Stop the thread */
334         kthread_stop(old->thread);
335
336         blk_cleanup_queue(old->rq);
337         return 0;
338 }
339
340 static void blktrans_notify_remove(struct mtd_info *mtd)
341 {
342         struct mtd_blktrans_ops *tr;
343         struct mtd_blktrans_dev *dev, *next;
344
345         list_for_each_entry(tr, &blktrans_majors, list)
346                 list_for_each_entry_safe(dev, next, &tr->devs, list)
347                         if (dev->mtd == mtd)
348                                 tr->remove_dev(dev);
349 }
350
351 static void blktrans_notify_add(struct mtd_info *mtd)
352 {
353         struct mtd_blktrans_ops *tr;
354
355         if (mtd->type == MTD_ABSENT)
356                 return;
357
358         list_for_each_entry(tr, &blktrans_majors, list)
359                 tr->add_mtd(tr, mtd);
360 }
361
362 static struct mtd_notifier blktrans_notifier = {
363         .add = blktrans_notify_add,
364         .remove = blktrans_notify_remove,
365 };
366
367 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
368 {
369         struct mtd_info *mtd;
370         int ret;
371
372         /* Register the notifier if/when the first device type is
373            registered, to prevent the link/init ordering from fucking
374            us over. */
375         if (!blktrans_notifier.list.next)
376                 register_mtd_user(&blktrans_notifier);
377
378
379         mutex_lock(&mtd_table_mutex);
380
381         ret = register_blkdev(tr->major, tr->name);
382         if (ret) {
383                 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
384                        tr->name, tr->major, ret);
385                 mutex_unlock(&mtd_table_mutex);
386                 return ret;
387         }
388
389         tr->blkshift = ffs(tr->blksize) - 1;
390
391         INIT_LIST_HEAD(&tr->devs);
392         list_add(&tr->list, &blktrans_majors);
393
394         mtd_for_each_device(mtd)
395                 if (mtd->type != MTD_ABSENT)
396                         tr->add_mtd(tr, mtd);
397
398         mutex_unlock(&mtd_table_mutex);
399
400         return 0;
401 }
402
403 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
404 {
405         struct mtd_blktrans_dev *dev, *next;
406
407         mutex_lock(&mtd_table_mutex);
408
409
410         /* Remove it from the list of active majors */
411         list_del(&tr->list);
412
413         list_for_each_entry_safe(dev, next, &tr->devs, list)
414                 tr->remove_dev(dev);
415
416         unregister_blkdev(tr->major, tr->name);
417         mutex_unlock(&mtd_table_mutex);
418
419         BUG_ON(!list_empty(&tr->devs));
420         return 0;
421 }
422
423 static void __exit mtd_blktrans_exit(void)
424 {
425         /* No race here -- if someone's currently in register_mtd_blktrans
426            we're screwed anyway. */
427         if (blktrans_notifier.list.next)
428                 unregister_mtd_user(&blktrans_notifier);
429 }
430
431 module_exit(mtd_blktrans_exit);
432
433 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
434 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
435 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
436 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
437
438 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
439 MODULE_LICENSE("GPL");
440 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");