2 * (C) 2003 David Woodhouse <dwmw2@infradead.org>
4 * Interface to Linux 2.5 block layer for MTD 'translation layers'.
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/freezer.h>
18 #include <linux/spinlock.h>
19 #include <linux/hdreg.h>
20 #include <linux/init.h>
21 #include <linux/mutex.h>
22 #include <linux/kthread.h>
23 #include <asm/uaccess.h>
27 static LIST_HEAD(blktrans_majors);
29 struct mtd_blkcore_priv {
30 struct task_struct *thread;
31 struct request_queue *rq;
32 spinlock_t queue_lock;
35 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
36 struct mtd_blktrans_dev *dev,
39 unsigned long block, nsect;
42 block = req->sector << 9 >> tr->blkshift;
43 nsect = req->current_nr_sectors << 9 >> tr->blkshift;
47 if (!blk_fs_request(req))
50 if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
53 switch(rq_data_dir(req)) {
55 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
56 if (tr->readsect(dev, block, buf))
64 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
65 if (tr->writesect(dev, block, buf))
70 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
75 static int mtd_blktrans_thread(void *arg)
77 struct mtd_blktrans_ops *tr = arg;
78 struct request_queue *rq = tr->blkcore_priv->rq;
80 /* we might get involved when memory gets low, so use PF_MEMALLOC */
81 current->flags |= PF_MEMALLOC;
83 spin_lock_irq(rq->queue_lock);
84 while (!kthread_should_stop()) {
86 struct mtd_blktrans_dev *dev;
89 req = elv_next_request(rq);
92 set_current_state(TASK_INTERRUPTIBLE);
93 spin_unlock_irq(rq->queue_lock);
95 spin_lock_irq(rq->queue_lock);
99 dev = req->rq_disk->private_data;
102 spin_unlock_irq(rq->queue_lock);
104 mutex_lock(&dev->lock);
105 res = do_blktrans_request(tr, dev, req);
106 mutex_unlock(&dev->lock);
108 spin_lock_irq(rq->queue_lock);
110 end_request(req, res);
112 spin_unlock_irq(rq->queue_lock);
117 static void mtd_blktrans_request(struct request_queue *rq)
119 struct mtd_blktrans_ops *tr = rq->queuedata;
120 wake_up_process(tr->blkcore_priv->thread);
124 static int blktrans_open(struct inode *i, struct file *f)
126 struct mtd_blktrans_dev *dev;
127 struct mtd_blktrans_ops *tr;
130 dev = i->i_bdev->bd_disk->private_data;
133 if (!try_module_get(dev->mtd->owner))
136 if (!try_module_get(tr->owner))
139 /* FIXME: Locking. A hot pluggable device can go away
140 (del_mtd_device can be called for it) without its module
142 dev->mtd->usecount++;
145 if (tr->open && (ret = tr->open(dev))) {
146 dev->mtd->usecount--;
147 module_put(dev->mtd->owner);
149 module_put(tr->owner);
155 static int blktrans_release(struct inode *i, struct file *f)
157 struct mtd_blktrans_dev *dev;
158 struct mtd_blktrans_ops *tr;
161 dev = i->i_bdev->bd_disk->private_data;
165 ret = tr->release(dev);
168 dev->mtd->usecount--;
169 module_put(dev->mtd->owner);
170 module_put(tr->owner);
176 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
178 struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
181 return dev->tr->getgeo(dev, geo);
185 static int blktrans_ioctl(struct inode *inode, struct file *file,
186 unsigned int cmd, unsigned long arg)
188 struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
189 struct mtd_blktrans_ops *tr = dev->tr;
194 return tr->flush(dev);
195 /* The core code did the work, we had nothing to do. */
202 static struct block_device_operations mtd_blktrans_ops = {
203 .owner = THIS_MODULE,
204 .open = blktrans_open,
205 .release = blktrans_release,
206 .ioctl = blktrans_ioctl,
207 .getgeo = blktrans_getgeo,
210 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
212 struct mtd_blktrans_ops *tr = new->tr;
213 struct list_head *this;
214 int last_devnum = -1;
217 if (mutex_trylock(&mtd_table_mutex)) {
218 mutex_unlock(&mtd_table_mutex);
222 list_for_each(this, &tr->devs) {
223 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
224 if (new->devnum == -1) {
225 /* Use first free number */
226 if (d->devnum != last_devnum+1) {
227 /* Found a free devnum. Plug it in here */
228 new->devnum = last_devnum+1;
229 list_add_tail(&new->list, &d->list);
232 } else if (d->devnum == new->devnum) {
233 /* Required number taken */
235 } else if (d->devnum > new->devnum) {
236 /* Required number was free */
237 list_add_tail(&new->list, &d->list);
240 last_devnum = d->devnum;
242 if (new->devnum == -1)
243 new->devnum = last_devnum+1;
245 if ((new->devnum << tr->part_bits) > 256) {
249 list_add_tail(&new->list, &tr->devs);
251 mutex_init(&new->lock);
255 gd = alloc_disk(1 << tr->part_bits);
257 list_del(&new->list);
260 gd->major = tr->major;
261 gd->first_minor = (new->devnum) << tr->part_bits;
262 gd->fops = &mtd_blktrans_ops;
265 if (new->devnum < 26)
266 snprintf(gd->disk_name, sizeof(gd->disk_name),
267 "%s%c", tr->name, 'a' + new->devnum);
269 snprintf(gd->disk_name, sizeof(gd->disk_name),
271 'a' - 1 + new->devnum / 26,
272 'a' + new->devnum % 26);
274 snprintf(gd->disk_name, sizeof(gd->disk_name),
275 "%s%d", tr->name, new->devnum);
277 /* 2.5 has capacity in units of 512 bytes while still
278 having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
279 set_capacity(gd, (new->size * tr->blksize) >> 9);
281 gd->private_data = new;
282 new->blkcore_priv = gd;
283 gd->queue = tr->blkcore_priv->rq;
293 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
295 if (mutex_trylock(&mtd_table_mutex)) {
296 mutex_unlock(&mtd_table_mutex);
300 list_del(&old->list);
302 del_gendisk(old->blkcore_priv);
303 put_disk(old->blkcore_priv);
308 static void blktrans_notify_remove(struct mtd_info *mtd)
310 struct list_head *this, *this2, *next;
312 list_for_each(this, &blktrans_majors) {
313 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
315 list_for_each_safe(this2, next, &tr->devs) {
316 struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
324 static void blktrans_notify_add(struct mtd_info *mtd)
326 struct list_head *this;
328 if (mtd->type == MTD_ABSENT)
331 list_for_each(this, &blktrans_majors) {
332 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
334 tr->add_mtd(tr, mtd);
339 static struct mtd_notifier blktrans_notifier = {
340 .add = blktrans_notify_add,
341 .remove = blktrans_notify_remove,
344 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
348 /* Register the notifier if/when the first device type is
349 registered, to prevent the link/init ordering from fucking
351 if (!blktrans_notifier.list.next)
352 register_mtd_user(&blktrans_notifier);
354 tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
355 if (!tr->blkcore_priv)
358 mutex_lock(&mtd_table_mutex);
360 ret = register_blkdev(tr->major, tr->name);
362 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
363 tr->name, tr->major, ret);
364 kfree(tr->blkcore_priv);
365 mutex_unlock(&mtd_table_mutex);
368 spin_lock_init(&tr->blkcore_priv->queue_lock);
370 tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
371 if (!tr->blkcore_priv->rq) {
372 unregister_blkdev(tr->major, tr->name);
373 kfree(tr->blkcore_priv);
374 mutex_unlock(&mtd_table_mutex);
378 tr->blkcore_priv->rq->queuedata = tr;
379 blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
380 tr->blkshift = ffs(tr->blksize) - 1;
382 tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
384 if (IS_ERR(tr->blkcore_priv->thread)) {
385 blk_cleanup_queue(tr->blkcore_priv->rq);
386 unregister_blkdev(tr->major, tr->name);
387 kfree(tr->blkcore_priv);
388 mutex_unlock(&mtd_table_mutex);
389 return PTR_ERR(tr->blkcore_priv->thread);
392 INIT_LIST_HEAD(&tr->devs);
393 list_add(&tr->list, &blktrans_majors);
395 for (i=0; i<MAX_MTD_DEVICES; i++) {
396 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
397 tr->add_mtd(tr, mtd_table[i]);
400 mutex_unlock(&mtd_table_mutex);
405 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
407 struct list_head *this, *next;
409 mutex_lock(&mtd_table_mutex);
411 /* Clean up the kernel thread */
412 kthread_stop(tr->blkcore_priv->thread);
414 /* Remove it from the list of active majors */
417 list_for_each_safe(this, next, &tr->devs) {
418 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
422 blk_cleanup_queue(tr->blkcore_priv->rq);
423 unregister_blkdev(tr->major, tr->name);
425 mutex_unlock(&mtd_table_mutex);
427 kfree(tr->blkcore_priv);
429 BUG_ON(!list_empty(&tr->devs));
433 static void __exit mtd_blktrans_exit(void)
435 /* No race here -- if someone's currently in register_mtd_blktrans
436 we're screwed anyway. */
437 if (blktrans_notifier.list.next)
438 unregister_mtd_user(&blktrans_notifier);
441 module_exit(mtd_blktrans_exit);
443 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
444 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
445 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
446 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
448 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
449 MODULE_LICENSE("GPL");
450 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");