]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/mtd_blkdevs.c
MTD/JFFS2: remove CVS keywords
[karo-tx-linux.git] / drivers / mtd / mtd_blkdevs.c
1 /*
2  * (C) 2003 David Woodhouse <dwmw2@infradead.org>
3  *
4  * Interface to Linux 2.5 block layer for MTD 'translation layers'.
5  *
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/fs.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/freezer.h>
18 #include <linux/spinlock.h>
19 #include <linux/hdreg.h>
20 #include <linux/init.h>
21 #include <linux/mutex.h>
22 #include <linux/kthread.h>
23 #include <asm/uaccess.h>
24
25 #include "mtdcore.h"
26
27 static LIST_HEAD(blktrans_majors);
28
29 struct mtd_blkcore_priv {
30         struct task_struct *thread;
31         struct request_queue *rq;
32         spinlock_t queue_lock;
33 };
34
35 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
36                                struct mtd_blktrans_dev *dev,
37                                struct request *req)
38 {
39         unsigned long block, nsect;
40         char *buf;
41
42         block = req->sector << 9 >> tr->blkshift;
43         nsect = req->current_nr_sectors << 9 >> tr->blkshift;
44
45         buf = req->buffer;
46
47         if (!blk_fs_request(req))
48                 return 0;
49
50         if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
51                 return 0;
52
53         switch(rq_data_dir(req)) {
54         case READ:
55                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
56                         if (tr->readsect(dev, block, buf))
57                                 return 0;
58                 return 1;
59
60         case WRITE:
61                 if (!tr->writesect)
62                         return 0;
63
64                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
65                         if (tr->writesect(dev, block, buf))
66                                 return 0;
67                 return 1;
68
69         default:
70                 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
71                 return 0;
72         }
73 }
74
75 static int mtd_blktrans_thread(void *arg)
76 {
77         struct mtd_blktrans_ops *tr = arg;
78         struct request_queue *rq = tr->blkcore_priv->rq;
79
80         /* we might get involved when memory gets low, so use PF_MEMALLOC */
81         current->flags |= PF_MEMALLOC;
82
83         spin_lock_irq(rq->queue_lock);
84         while (!kthread_should_stop()) {
85                 struct request *req;
86                 struct mtd_blktrans_dev *dev;
87                 int res = 0;
88
89                 req = elv_next_request(rq);
90
91                 if (!req) {
92                         set_current_state(TASK_INTERRUPTIBLE);
93                         spin_unlock_irq(rq->queue_lock);
94                         schedule();
95                         spin_lock_irq(rq->queue_lock);
96                         continue;
97                 }
98
99                 dev = req->rq_disk->private_data;
100                 tr = dev->tr;
101
102                 spin_unlock_irq(rq->queue_lock);
103
104                 mutex_lock(&dev->lock);
105                 res = do_blktrans_request(tr, dev, req);
106                 mutex_unlock(&dev->lock);
107
108                 spin_lock_irq(rq->queue_lock);
109
110                 end_request(req, res);
111         }
112         spin_unlock_irq(rq->queue_lock);
113
114         return 0;
115 }
116
117 static void mtd_blktrans_request(struct request_queue *rq)
118 {
119         struct mtd_blktrans_ops *tr = rq->queuedata;
120         wake_up_process(tr->blkcore_priv->thread);
121 }
122
123
124 static int blktrans_open(struct inode *i, struct file *f)
125 {
126         struct mtd_blktrans_dev *dev;
127         struct mtd_blktrans_ops *tr;
128         int ret = -ENODEV;
129
130         dev = i->i_bdev->bd_disk->private_data;
131         tr = dev->tr;
132
133         if (!try_module_get(dev->mtd->owner))
134                 goto out;
135
136         if (!try_module_get(tr->owner))
137                 goto out_tr;
138
139         /* FIXME: Locking. A hot pluggable device can go away
140            (del_mtd_device can be called for it) without its module
141            being unloaded. */
142         dev->mtd->usecount++;
143
144         ret = 0;
145         if (tr->open && (ret = tr->open(dev))) {
146                 dev->mtd->usecount--;
147                 module_put(dev->mtd->owner);
148         out_tr:
149                 module_put(tr->owner);
150         }
151  out:
152         return ret;
153 }
154
155 static int blktrans_release(struct inode *i, struct file *f)
156 {
157         struct mtd_blktrans_dev *dev;
158         struct mtd_blktrans_ops *tr;
159         int ret = 0;
160
161         dev = i->i_bdev->bd_disk->private_data;
162         tr = dev->tr;
163
164         if (tr->release)
165                 ret = tr->release(dev);
166
167         if (!ret) {
168                 dev->mtd->usecount--;
169                 module_put(dev->mtd->owner);
170                 module_put(tr->owner);
171         }
172
173         return ret;
174 }
175
176 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
177 {
178         struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
179
180         if (dev->tr->getgeo)
181                 return dev->tr->getgeo(dev, geo);
182         return -ENOTTY;
183 }
184
185 static int blktrans_ioctl(struct inode *inode, struct file *file,
186                               unsigned int cmd, unsigned long arg)
187 {
188         struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
189         struct mtd_blktrans_ops *tr = dev->tr;
190
191         switch (cmd) {
192         case BLKFLSBUF:
193                 if (tr->flush)
194                         return tr->flush(dev);
195                 /* The core code did the work, we had nothing to do. */
196                 return 0;
197         default:
198                 return -ENOTTY;
199         }
200 }
201
202 static struct block_device_operations mtd_blktrans_ops = {
203         .owner          = THIS_MODULE,
204         .open           = blktrans_open,
205         .release        = blktrans_release,
206         .ioctl          = blktrans_ioctl,
207         .getgeo         = blktrans_getgeo,
208 };
209
210 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
211 {
212         struct mtd_blktrans_ops *tr = new->tr;
213         struct list_head *this;
214         int last_devnum = -1;
215         struct gendisk *gd;
216
217         if (mutex_trylock(&mtd_table_mutex)) {
218                 mutex_unlock(&mtd_table_mutex);
219                 BUG();
220         }
221
222         list_for_each(this, &tr->devs) {
223                 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
224                 if (new->devnum == -1) {
225                         /* Use first free number */
226                         if (d->devnum != last_devnum+1) {
227                                 /* Found a free devnum. Plug it in here */
228                                 new->devnum = last_devnum+1;
229                                 list_add_tail(&new->list, &d->list);
230                                 goto added;
231                         }
232                 } else if (d->devnum == new->devnum) {
233                         /* Required number taken */
234                         return -EBUSY;
235                 } else if (d->devnum > new->devnum) {
236                         /* Required number was free */
237                         list_add_tail(&new->list, &d->list);
238                         goto added;
239                 }
240                 last_devnum = d->devnum;
241         }
242         if (new->devnum == -1)
243                 new->devnum = last_devnum+1;
244
245         if ((new->devnum << tr->part_bits) > 256) {
246                 return -EBUSY;
247         }
248
249         list_add_tail(&new->list, &tr->devs);
250  added:
251         mutex_init(&new->lock);
252         if (!tr->writesect)
253                 new->readonly = 1;
254
255         gd = alloc_disk(1 << tr->part_bits);
256         if (!gd) {
257                 list_del(&new->list);
258                 return -ENOMEM;
259         }
260         gd->major = tr->major;
261         gd->first_minor = (new->devnum) << tr->part_bits;
262         gd->fops = &mtd_blktrans_ops;
263
264         if (tr->part_bits)
265                 if (new->devnum < 26)
266                         snprintf(gd->disk_name, sizeof(gd->disk_name),
267                                  "%s%c", tr->name, 'a' + new->devnum);
268                 else
269                         snprintf(gd->disk_name, sizeof(gd->disk_name),
270                                  "%s%c%c", tr->name,
271                                  'a' - 1 + new->devnum / 26,
272                                  'a' + new->devnum % 26);
273         else
274                 snprintf(gd->disk_name, sizeof(gd->disk_name),
275                          "%s%d", tr->name, new->devnum);
276
277         /* 2.5 has capacity in units of 512 bytes while still
278            having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
279         set_capacity(gd, (new->size * tr->blksize) >> 9);
280
281         gd->private_data = new;
282         new->blkcore_priv = gd;
283         gd->queue = tr->blkcore_priv->rq;
284
285         if (new->readonly)
286                 set_disk_ro(gd, 1);
287
288         add_disk(gd);
289
290         return 0;
291 }
292
293 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
294 {
295         if (mutex_trylock(&mtd_table_mutex)) {
296                 mutex_unlock(&mtd_table_mutex);
297                 BUG();
298         }
299
300         list_del(&old->list);
301
302         del_gendisk(old->blkcore_priv);
303         put_disk(old->blkcore_priv);
304
305         return 0;
306 }
307
308 static void blktrans_notify_remove(struct mtd_info *mtd)
309 {
310         struct list_head *this, *this2, *next;
311
312         list_for_each(this, &blktrans_majors) {
313                 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
314
315                 list_for_each_safe(this2, next, &tr->devs) {
316                         struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
317
318                         if (dev->mtd == mtd)
319                                 tr->remove_dev(dev);
320                 }
321         }
322 }
323
324 static void blktrans_notify_add(struct mtd_info *mtd)
325 {
326         struct list_head *this;
327
328         if (mtd->type == MTD_ABSENT)
329                 return;
330
331         list_for_each(this, &blktrans_majors) {
332                 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
333
334                 tr->add_mtd(tr, mtd);
335         }
336
337 }
338
339 static struct mtd_notifier blktrans_notifier = {
340         .add = blktrans_notify_add,
341         .remove = blktrans_notify_remove,
342 };
343
344 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
345 {
346         int ret, i;
347
348         /* Register the notifier if/when the first device type is
349            registered, to prevent the link/init ordering from fucking
350            us over. */
351         if (!blktrans_notifier.list.next)
352                 register_mtd_user(&blktrans_notifier);
353
354         tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
355         if (!tr->blkcore_priv)
356                 return -ENOMEM;
357
358         mutex_lock(&mtd_table_mutex);
359
360         ret = register_blkdev(tr->major, tr->name);
361         if (ret) {
362                 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
363                        tr->name, tr->major, ret);
364                 kfree(tr->blkcore_priv);
365                 mutex_unlock(&mtd_table_mutex);
366                 return ret;
367         }
368         spin_lock_init(&tr->blkcore_priv->queue_lock);
369
370         tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
371         if (!tr->blkcore_priv->rq) {
372                 unregister_blkdev(tr->major, tr->name);
373                 kfree(tr->blkcore_priv);
374                 mutex_unlock(&mtd_table_mutex);
375                 return -ENOMEM;
376         }
377
378         tr->blkcore_priv->rq->queuedata = tr;
379         blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
380         tr->blkshift = ffs(tr->blksize) - 1;
381
382         tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
383                         "%sd", tr->name);
384         if (IS_ERR(tr->blkcore_priv->thread)) {
385                 blk_cleanup_queue(tr->blkcore_priv->rq);
386                 unregister_blkdev(tr->major, tr->name);
387                 kfree(tr->blkcore_priv);
388                 mutex_unlock(&mtd_table_mutex);
389                 return PTR_ERR(tr->blkcore_priv->thread);
390         }
391
392         INIT_LIST_HEAD(&tr->devs);
393         list_add(&tr->list, &blktrans_majors);
394
395         for (i=0; i<MAX_MTD_DEVICES; i++) {
396                 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
397                         tr->add_mtd(tr, mtd_table[i]);
398         }
399
400         mutex_unlock(&mtd_table_mutex);
401
402         return 0;
403 }
404
405 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
406 {
407         struct list_head *this, *next;
408
409         mutex_lock(&mtd_table_mutex);
410
411         /* Clean up the kernel thread */
412         kthread_stop(tr->blkcore_priv->thread);
413
414         /* Remove it from the list of active majors */
415         list_del(&tr->list);
416
417         list_for_each_safe(this, next, &tr->devs) {
418                 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
419                 tr->remove_dev(dev);
420         }
421
422         blk_cleanup_queue(tr->blkcore_priv->rq);
423         unregister_blkdev(tr->major, tr->name);
424
425         mutex_unlock(&mtd_table_mutex);
426
427         kfree(tr->blkcore_priv);
428
429         BUG_ON(!list_empty(&tr->devs));
430         return 0;
431 }
432
433 static void __exit mtd_blktrans_exit(void)
434 {
435         /* No race here -- if someone's currently in register_mtd_blktrans
436            we're screwed anyway. */
437         if (blktrans_notifier.list.next)
438                 unregister_mtd_user(&blktrans_notifier);
439 }
440
441 module_exit(mtd_blktrans_exit);
442
443 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
444 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
445 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
446 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
447
448 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
449 MODULE_LICENSE("GPL");
450 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");