2 * Copyright © 2009 - Maxim Levitsky
3 * SmartMedia/xD translation layer
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/hdreg.h>
14 #include <linux/kthread.h>
15 #include <linux/freezer.h>
16 #include <linux/sysfs.h>
17 #include <linux/bitops.h>
18 #include <linux/mtd/nand_ecc.h>
19 #include "nand/sm_common.h"
24 struct workqueue_struct *cache_flush_workqueue;
26 static int cache_timeout = 1000;
27 module_param(cache_timeout, bool, S_IRUGO);
28 MODULE_PARM_DESC(cache_timeout,
29 "Timeout (in ms) for cache flush (1000 ms default");
32 module_param(debug, int, S_IRUGO | S_IWUSR);
33 MODULE_PARM_DESC(debug, "Debug level (0-2)");
36 /* ------------------- sysfs attributtes ---------------------------------- */
37 struct sm_sysfs_attribute {
38 struct device_attribute dev_attr;
43 ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
46 struct sm_sysfs_attribute *sm_attr =
47 container_of(attr, struct sm_sysfs_attribute, dev_attr);
49 strncpy(buf, sm_attr->data, sm_attr->len);
54 #define NUM_ATTRIBUTES 1
55 #define SM_CIS_VENDOR_OFFSET 0x59
56 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
58 struct attribute_group *attr_group;
59 struct attribute **attributes;
60 struct sm_sysfs_attribute *vendor_attribute;
62 int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET,
63 SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET);
65 char *vendor = kmalloc(vendor_len, GFP_KERNEL);
66 memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len);
67 vendor[vendor_len] = 0;
69 /* Initialize sysfs attributes */
71 kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL);
73 sysfs_attr_init(&vendor_attribute->dev_attr.attr);
75 vendor_attribute->data = vendor;
76 vendor_attribute->len = vendor_len;
77 vendor_attribute->dev_attr.attr.name = "vendor";
78 vendor_attribute->dev_attr.attr.mode = S_IRUGO;
79 vendor_attribute->dev_attr.show = sm_attr_show;
82 /* Create array of pointers to the attributes */
83 attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1),
85 attributes[0] = &vendor_attribute->dev_attr.attr;
87 /* Finally create the attribute group */
88 attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
89 attr_group->attrs = attributes;
93 void sm_delete_sysfs_attributes(struct sm_ftl *ftl)
95 struct attribute **attributes = ftl->disk_attributes->attrs;
98 for (i = 0; attributes[i] ; i++) {
100 struct device_attribute *dev_attr = container_of(attributes[i],
101 struct device_attribute, attr);
103 struct sm_sysfs_attribute *sm_attr =
104 container_of(dev_attr,
105 struct sm_sysfs_attribute, dev_attr);
107 kfree(sm_attr->data);
111 kfree(ftl->disk_attributes->attrs);
112 kfree(ftl->disk_attributes);
116 /* ----------------------- oob helpers -------------------------------------- */
118 static int sm_get_lba(uint8_t *lba)
120 /* check fixed bits */
121 if ((lba[0] & 0xF8) != 0x10)
124 /* check parity - endianess doesn't matter */
125 if (hweight16(*(uint16_t *)lba) & 1)
128 return (lba[1] >> 1) | ((lba[0] & 0x07) << 7);
133 * Read LBA asscociated with block
134 * returns -1, if block is erased
135 * returns -2 if error happens
137 static int sm_read_lba(struct sm_oob *oob)
139 static const uint32_t erased_pattern[4] = {
140 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
145 /* First test for erased block */
146 if (!memcmp(oob, erased_pattern, SM_OOB_SIZE))
149 /* Now check is both copies of the LBA differ too much */
150 lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2;
151 if (lba_test && !is_power_of_2(lba_test))
155 lba = sm_get_lba(oob->lba_copy1);
158 lba = sm_get_lba(oob->lba_copy2);
163 static void sm_write_lba(struct sm_oob *oob, uint16_t lba)
167 WARN_ON(lba >= 1000);
169 tmp[0] = 0x10 | ((lba >> 7) & 0x07);
170 tmp[1] = (lba << 1) & 0xFF;
172 if (hweight16(*(uint16_t *)tmp) & 0x01)
175 oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0];
176 oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1];
180 /* Make offset from parts */
181 static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
183 WARN_ON(boffset & (SM_SECTOR_SIZE - 1));
184 WARN_ON(zone < 0 || zone >= ftl->zone_count);
185 WARN_ON(block >= ftl->zone_size);
186 WARN_ON(boffset >= ftl->block_size);
191 return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
194 /* Breaks offset into parts */
195 static void sm_break_offset(struct sm_ftl *ftl, loff_t offset,
196 int *zone, int *block, int *boffset)
198 *boffset = do_div(offset, ftl->block_size);
199 *block = do_div(offset, ftl->max_lba);
200 *zone = offset >= ftl->zone_count ? -1 : offset;
203 /* ---------------------- low level IO ------------------------------------- */
205 static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob)
209 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
210 if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0)
213 buffer += SM_SMALL_PAGE;
215 __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc);
216 if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0)
221 /* Reads a sector + oob*/
222 static int sm_read_sector(struct sm_ftl *ftl,
223 int zone, int block, int boffset,
224 uint8_t *buffer, struct sm_oob *oob)
226 struct mtd_info *mtd = ftl->trans->mtd;
227 struct mtd_oob_ops ops;
228 struct sm_oob tmp_oob;
232 /* FTL can contain -1 entries that are by default filled with bits */
234 memset(buffer, 0xFF, SM_SECTOR_SIZE);
238 /* User might not need the oob, but we do for data vertification */
242 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
244 ops.ooblen = SM_OOB_SIZE;
245 ops.oobbuf = (void *)oob;
246 ops.len = SM_SECTOR_SIZE;
251 /* Avoid infinite recursion on CIS reads, sm_recheck_media
253 if (zone == 0 && block == ftl->cis_block && boffset ==
257 /* Test if media is stable */
258 if (try == 3 || sm_recheck_media(ftl))
262 /* Unfortunelly, oob read will _always_ succeed,
263 despite card removal..... */
264 ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
266 /* Test for unknown errors */
267 if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
268 dbg("read of block %d at zone %d, failed due to error (%d)",
273 /* Do a basic test on the oob, to guard against returned garbage */
274 if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved))
277 /* This should never happen, unless there is a bug in the mtd driver */
278 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
279 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
284 /* Test if sector marked as bad */
285 if (!sm_sector_valid(oob)) {
286 dbg("read of block %d at zone %d, failed because it is marked"
287 " as bad" , block, zone);
292 if (ret == -EBADMSG ||
293 (ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
295 dbg("read of block %d at zone %d, failed due to ECC error",
303 /* Writes a sector to media */
304 static int sm_write_sector(struct sm_ftl *ftl,
305 int zone, int block, int boffset,
306 uint8_t *buffer, struct sm_oob *oob)
308 struct mtd_oob_ops ops;
309 struct mtd_info *mtd = ftl->trans->mtd;
312 BUG_ON(ftl->readonly);
314 if (zone == 0 && (block == ftl->cis_block || block == 0)) {
315 dbg("attempted to write the CIS!");
322 ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
323 ops.len = SM_SECTOR_SIZE;
326 ops.ooblen = SM_OOB_SIZE;
327 ops.oobbuf = (void *)oob;
329 ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
331 /* Now we assume that hardware will catch write bitflip errors */
332 /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
335 dbg("write to block %d at zone %d, failed with error %d",
338 sm_recheck_media(ftl);
342 /* This should never happen, unless there is a bug in the driver */
343 WARN_ON(ops.oobretlen != SM_OOB_SIZE);
344 WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
349 /* ------------------------ block IO ------------------------------------- */
351 /* Write a block using data and lba, and invalid sector bitmap */
352 static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf,
353 int zone, int block, int lba,
354 unsigned long invalid_bitmap)
360 /* Initialize the oob with requested values */
361 memset(&oob, 0xFF, SM_OOB_SIZE);
362 sm_write_lba(&oob, lba);
367 for (boffset = 0; boffset < ftl->block_size;
368 boffset += SM_SECTOR_SIZE) {
370 oob.data_status = 0xFF;
372 if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) {
374 sm_printk("sector %d of block at LBA %d of zone %d"
375 " coudn't be read, marking it as invalid",
376 boffset / SM_SECTOR_SIZE, lba, zone);
381 if (ftl->smallpagenand) {
382 __nand_calculate_ecc(buf + boffset,
383 SM_SMALL_PAGE, oob.ecc1);
385 __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE,
386 SM_SMALL_PAGE, oob.ecc2);
388 if (!sm_write_sector(ftl, zone, block, boffset,
389 buf + boffset, &oob))
394 /* If write fails. try to erase the block */
395 /* This is safe, because we never write in blocks
396 that contain valuable data.
397 This is intended to repair block that are marked
398 as erased, but that isn't fully erased*/
400 if (sm_erase_block(ftl, zone, block, 0))
406 sm_mark_block_bad(ftl, zone, block);
414 /* Mark whole block at offset 'offs' as bad. */
415 static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
420 memset(&oob, 0xFF, SM_OOB_SIZE);
421 oob.block_status = 0xF0;
426 if (sm_recheck_media(ftl))
429 sm_printk("marking block %d of zone %d as bad", block, zone);
431 /* We aren't checking the return value, because we don't care */
432 /* This also fails on fake xD cards, but I guess these won't expose
433 any bad blocks till fail completly */
434 for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
435 sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
439 * Erase a block within a zone
440 * If erase succedes, it updates free block fifo, otherwise marks block as bad
442 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
445 struct ftl_zone *zone = &ftl->zones[zone_num];
446 struct mtd_info *mtd = ftl->trans->mtd;
447 struct erase_info erase;
450 erase.callback = sm_erase_callback;
451 erase.addr = sm_mkoffset(ftl, zone_num, block, 0);
452 erase.len = ftl->block_size;
453 erase.priv = (u_long)ftl;
458 BUG_ON(ftl->readonly);
460 if (zone_num == 0 && (block == ftl->cis_block || block == 0)) {
461 sm_printk("attempted to erase the CIS!");
465 if (mtd->erase(mtd, &erase)) {
466 sm_printk("erase of block %d in zone %d failed",
471 if (erase.state == MTD_ERASE_PENDING)
472 wait_for_completion(&ftl->erase_completion);
474 if (erase.state != MTD_ERASE_DONE) {
475 sm_printk("erase of block %d in zone %d failed after wait",
481 kfifo_in(&zone->free_sectors,
482 (const unsigned char *)&block, sizeof(block));
486 sm_mark_block_bad(ftl, zone_num, block);
490 static void sm_erase_callback(struct erase_info *self)
492 struct sm_ftl *ftl = (struct sm_ftl *)self->priv;
493 complete(&ftl->erase_completion);
496 /* Throughtly test that block is valid. */
497 static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
501 int lbas[] = { -3, 0, 0, 0 };
506 /* First just check that block doesn't look fishy */
507 /* Only blocks that are valid or are sliced in two parts, are
509 for (boffset = 0; boffset < ftl->block_size;
510 boffset += SM_SECTOR_SIZE) {
512 /* This shoudn't happen anyway */
513 if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
516 test_lba = sm_read_lba(&oob);
518 if (lbas[i] != test_lba)
519 lbas[++i] = test_lba;
521 /* If we found three different LBAs, something is fishy */
526 /* If the block is sliced (partialy erased usually) erase it */
528 sm_erase_block(ftl, zone, block, 1);
535 /* ----------------- media scanning --------------------------------- */
536 static const struct chs_entry chs_table[] = {
544 { 128, 500, 16, 32 },
545 { 256, 1000, 16, 32 },
546 { 512, 1015, 32, 63 },
547 { 1024, 985, 33, 63 },
548 { 2048, 985, 33, 63 },
553 static const uint8_t cis_signature[] = {
554 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
556 /* Find out media parameters.
557 * This ideally has to be based on nand id, but for now device size is enough */
558 int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
561 int size_in_megs = mtd->size / (1024 * 1024);
563 ftl->readonly = mtd->type == MTD_ROM;
565 /* Manual settings for very old devices */
567 ftl->smallpagenand = 0;
569 switch (size_in_megs) {
571 /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/
572 ftl->zone_size = 256;
574 ftl->block_size = 8 * SM_SECTOR_SIZE;
575 ftl->smallpagenand = 1;
579 /* 2 MiB flash SmartMedia (256 byte pages)*/
580 if (mtd->writesize == SM_SMALL_PAGE) {
581 ftl->zone_size = 512;
583 ftl->block_size = 8 * SM_SECTOR_SIZE;
584 ftl->smallpagenand = 1;
585 /* 2 MiB rom SmartMedia */
591 ftl->zone_size = 256;
593 ftl->block_size = 16 * SM_SECTOR_SIZE;
597 /* 4 MiB flash/rom SmartMedia device */
598 ftl->zone_size = 512;
600 ftl->block_size = 16 * SM_SECTOR_SIZE;
603 /* 8 MiB flash/rom SmartMedia device */
604 ftl->zone_size = 1024;
606 ftl->block_size = 16 * SM_SECTOR_SIZE;
609 /* Minimum xD size is 16MiB. Also, all xD cards have standard zone
610 sizes. SmartMedia cards exist up to 128 MiB and have same layout*/
611 if (size_in_megs >= 16) {
612 ftl->zone_count = size_in_megs / 16;
613 ftl->zone_size = 1024;
615 ftl->block_size = 32 * SM_SECTOR_SIZE;
618 /* Test for proper write,erase and oob sizes */
619 if (mtd->erasesize > ftl->block_size)
622 if (mtd->writesize > SM_SECTOR_SIZE)
625 if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE)
628 if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
631 /* We use these functions for IO */
632 if (!mtd->read_oob || !mtd->write_oob)
635 /* Find geometry information */
636 for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) {
637 if (chs_table[i].size == size_in_megs) {
638 ftl->cylinders = chs_table[i].cyl;
639 ftl->heads = chs_table[i].head;
640 ftl->sectors = chs_table[i].sec;
645 sm_printk("media has unknown size : %dMiB", size_in_megs);
646 ftl->cylinders = 985;
652 /* Validate the CIS */
653 static int sm_read_cis(struct sm_ftl *ftl)
657 if (sm_read_sector(ftl,
658 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob))
661 if (!sm_sector_valid(&oob) || !sm_block_valid(&oob))
664 if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
665 cis_signature, sizeof(cis_signature))) {
672 /* Scan the media for the CIS */
673 static int sm_find_cis(struct sm_ftl *ftl)
680 /* Search for first valid block */
681 for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
683 if (sm_read_sector(ftl, 0, block, 0, NULL, &oob))
686 if (!sm_block_valid(&oob))
695 /* Search for first valid sector in this block */
696 for (boffset = 0 ; boffset < ftl->block_size;
697 boffset += SM_SECTOR_SIZE) {
699 if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob))
702 if (!sm_sector_valid(&oob))
707 if (boffset == ftl->block_size)
710 ftl->cis_block = block;
711 ftl->cis_boffset = boffset;
712 ftl->cis_page_offset = 0;
714 cis_found = !sm_read_cis(ftl);
717 ftl->cis_page_offset = SM_SMALL_PAGE;
718 cis_found = !sm_read_cis(ftl);
722 dbg("CIS block found at offset %x",
723 block * ftl->block_size +
724 boffset + ftl->cis_page_offset);
730 /* Basic test to determine if underlying mtd device if functional */
731 static int sm_recheck_media(struct sm_ftl *ftl)
733 if (sm_read_cis(ftl)) {
735 if (!ftl->unstable) {
736 sm_printk("media unstable, not allowing writes");
744 /* Initialize a FTL zone */
745 static int sm_init_zone(struct sm_ftl *ftl, int zone_num)
747 struct ftl_zone *zone = &ftl->zones[zone_num];
754 dbg("initializing zone %d", zone_num);
756 /* Allocate memory for FTL table */
757 zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL);
759 if (!zone->lba_to_phys_table)
761 memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
764 /* Allocate memory for free sectors FIFO */
765 if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
766 kfree(zone->lba_to_phys_table);
770 /* Now scan the zone */
771 for (block = 0 ; block < ftl->zone_size ; block++) {
773 /* Skip blocks till the CIS (including) */
774 if (zone_num == 0 && block <= ftl->cis_block)
777 /* Read the oob of first sector */
778 if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob))
781 /* Test to see if block is erased. It is enough to test
782 first sector, because erase happens in one shot */
783 if (sm_block_erased(&oob)) {
784 kfifo_in(&zone->free_sectors,
785 (unsigned char *)&block, 2);
789 /* If block is marked as bad, skip it */
790 /* This assumes we can trust first sector*/
791 /* However the way the block valid status is defined, ensures
792 very low probability of failure here */
793 if (!sm_block_valid(&oob)) {
794 dbg("PH %04d <-> <marked bad>", block);
799 lba = sm_read_lba(&oob);
801 /* Invalid LBA means that block is damaged. */
802 /* We can try to erase it, or mark it as bad, but
803 lets leave that to recovery application */
804 if (lba == -2 || lba >= ftl->max_lba) {
805 dbg("PH %04d <-> LBA %04d(bad)", block, lba);
810 /* If there is no collision,
811 just put the sector in the FTL table */
812 if (zone->lba_to_phys_table[lba] < 0) {
813 dbg_verbose("PH %04d <-> LBA %04d", block, lba);
814 zone->lba_to_phys_table[lba] = block;
818 sm_printk("collision"
819 " of LBA %d between blocks %d and %d in zone %d",
820 lba, zone->lba_to_phys_table[lba], block, zone_num);
822 /* Test that this block is valid*/
823 if (sm_check_block(ftl, zone_num, block))
826 /* Test now the old block */
827 if (sm_check_block(ftl, zone_num,
828 zone->lba_to_phys_table[lba])) {
829 zone->lba_to_phys_table[lba] = block;
833 /* If both blocks are valid and share same LBA, it means that
834 they hold different versions of same data. It not
835 known which is more recent, thus just erase one of them
837 sm_printk("both blocks are valid, erasing the later");
838 sm_erase_block(ftl, zone_num, block, 1);
841 dbg("zone initialized");
842 zone->initialized = 1;
844 /* No free sectors, means that the zone is heavily damaged, write won't
845 work, but it can still can be (partially) read */
846 if (!kfifo_len(&zone->free_sectors)) {
847 sm_printk("no free blocks in zone %d", zone_num);
851 /* Randomize first block we write to */
852 get_random_bytes(&i, 2);
853 i %= (kfifo_len(&zone->free_sectors) / 2);
856 len = kfifo_out(&zone->free_sectors,
857 (unsigned char *)&block, 2);
859 kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
864 /* Get and automaticly initialize an FTL mapping for one zone */
865 struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
867 struct ftl_zone *zone;
870 BUG_ON(zone_num >= ftl->zone_count);
871 zone = &ftl->zones[zone_num];
873 if (!zone->initialized) {
874 error = sm_init_zone(ftl, zone_num);
877 return ERR_PTR(error);
883 /* ----------------- cache handling ------------------------------------------*/
885 /* Initialize the one block cache */
886 void sm_cache_init(struct sm_ftl *ftl)
888 ftl->cache_data_invalid_bitmap = 0xFFFFFFFF;
889 ftl->cache_clean = 1;
890 ftl->cache_zone = -1;
891 ftl->cache_block = -1;
892 /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/
895 /* Put sector in one block cache */
896 void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
898 memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
899 clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
900 ftl->cache_clean = 0;
903 /* Read a sector from the cache */
904 int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
906 if (test_bit(boffset / SM_SECTOR_SIZE,
907 &ftl->cache_data_invalid_bitmap))
910 memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE);
914 /* Write the cache to hardware */
915 int sm_cache_flush(struct sm_ftl *ftl)
917 struct ftl_zone *zone;
920 uint16_t write_sector;
921 int zone_num = ftl->cache_zone;
924 if (ftl->cache_clean)
930 BUG_ON(zone_num < 0);
931 zone = &ftl->zones[zone_num];
932 block_num = zone->lba_to_phys_table[ftl->cache_block];
935 /* Try to read all unread areas of the cache block*/
936 for_each_bit(sector_num, &ftl->cache_data_invalid_bitmap,
937 ftl->block_size / SM_SECTOR_SIZE) {
939 if (!sm_read_sector(ftl,
940 zone_num, block_num, sector_num * SM_SECTOR_SIZE,
941 ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL))
942 clear_bit(sector_num,
943 &ftl->cache_data_invalid_bitmap);
950 /* If there are no spare blocks, */
951 /* we could still continue by erasing/writing the current block,
952 but for such worn out media it doesn't worth the trouble,
954 if (kfifo_out(&zone->free_sectors,
955 (unsigned char *)&write_sector, 2) != 2) {
956 dbg("no free sectors for write!");
961 if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
962 ftl->cache_block, ftl->cache_data_invalid_bitmap))
965 /* Update the FTL table */
966 zone->lba_to_phys_table[ftl->cache_block] = write_sector;
968 /* Write succesfull, so erase and free the old block */
970 sm_erase_block(ftl, zone_num, block_num, 1);
977 /* flush timer, runs a second after last write */
978 static void sm_cache_flush_timer(unsigned long data)
980 struct sm_ftl *ftl = (struct sm_ftl *)data;
981 queue_work(cache_flush_workqueue, &ftl->flush_work);
984 /* cache flush work, kicked by timer */
985 static void sm_cache_flush_work(struct work_struct *work)
987 struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work);
988 mutex_lock(&ftl->mutex);
990 mutex_unlock(&ftl->mutex);
994 /* ---------------- outside interface -------------------------------------- */
996 /* outside interface: read a sector */
997 static int sm_read(struct mtd_blktrans_dev *dev,
998 unsigned long sect_no, char *buf)
1000 struct sm_ftl *ftl = dev->priv;
1001 struct ftl_zone *zone;
1002 int error = 0, in_cache = 0;
1003 int zone_num, block, boffset;
1005 sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset);
1006 mutex_lock(&ftl->mutex);
1009 zone = sm_get_zone(ftl, zone_num);
1011 error = PTR_ERR(zone);
1015 /* Have to look at cache first */
1016 if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
1018 if (!sm_cache_get(ftl, buf, boffset))
1022 /* Translate the block and return if doesn't exist in the table */
1023 block = zone->lba_to_phys_table[block];
1026 memset(buf, 0xFF, SM_SECTOR_SIZE);
1030 if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) {
1036 sm_cache_put(ftl, buf, boffset);
1038 mutex_unlock(&ftl->mutex);
1042 /* outside interface: write a sector */
1043 static int sm_write(struct mtd_blktrans_dev *dev,
1044 unsigned long sec_no, char *buf)
1046 struct sm_ftl *ftl = dev->priv;
1047 struct ftl_zone *zone;
1048 int error, zone_num, block, boffset;
1050 BUG_ON(ftl->readonly);
1051 sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset);
1053 /* No need in flush thread running now */
1054 del_timer(&ftl->timer);
1055 mutex_lock(&ftl->mutex);
1057 zone = sm_get_zone(ftl, zone_num);
1059 error = PTR_ERR(zone);
1063 /* If entry is not in cache, flush it */
1064 if (ftl->cache_block != block || ftl->cache_zone != zone_num) {
1066 error = sm_cache_flush(ftl);
1070 ftl->cache_block = block;
1071 ftl->cache_zone = zone_num;
1074 sm_cache_put(ftl, buf, boffset);
1076 mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout));
1077 mutex_unlock(&ftl->mutex);
1081 /* outside interface: flush everything */
1082 static int sm_flush(struct mtd_blktrans_dev *dev)
1084 struct sm_ftl *ftl = dev->priv;
1087 mutex_lock(&ftl->mutex);
1088 retval = sm_cache_flush(ftl);
1089 mutex_unlock(&ftl->mutex);
1093 /* outside interface: device is released */
1094 static int sm_release(struct mtd_blktrans_dev *dev)
1096 struct sm_ftl *ftl = dev->priv;
1098 mutex_lock(&ftl->mutex);
1099 del_timer_sync(&ftl->timer);
1100 cancel_work_sync(&ftl->flush_work);
1101 sm_cache_flush(ftl);
1102 mutex_unlock(&ftl->mutex);
1106 /* outside interface: get geometry */
1107 static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
1109 struct sm_ftl *ftl = dev->priv;
1110 geo->heads = ftl->heads;
1111 geo->sectors = ftl->sectors;
1112 geo->cylinders = ftl->cylinders;
1116 /* external interface: main initialization function */
1117 static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
1119 struct mtd_blktrans_dev *trans;
1122 /* Allocate & initialize our private structure */
1123 ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL);
1128 mutex_init(&ftl->mutex);
1129 setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
1130 INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
1131 init_completion(&ftl->erase_completion);
1133 /* Read media information */
1134 if (sm_get_media_info(ftl, mtd)) {
1135 dbg("found unsupported mtd device, aborting");
1140 /* Allocate temporary CIS buffer for read retry support */
1141 ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
1142 if (!ftl->cis_buffer)
1145 /* Allocate zone array, it will be initialized on demand */
1146 ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count,
1151 /* Allocate the cache*/
1152 ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
1154 if (!ftl->cache_data)
1160 /* Allocate upper layer structure and initialize it */
1161 trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL);
1171 trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9;
1172 trans->readonly = ftl->readonly;
1174 if (sm_find_cis(ftl)) {
1175 dbg("CIS not found on mtd device, aborting");
1179 ftl->disk_attributes = sm_create_sysfs_attributes(ftl);
1180 trans->disk_attributes = ftl->disk_attributes;
1182 sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d",
1183 (int)(mtd->size / (1024 * 1024)), mtd->index);
1186 dbg("%d zone(s), each consists of %d blocks (+%d spares)",
1187 ftl->zone_count, ftl->max_lba,
1188 ftl->zone_size - ftl->max_lba);
1189 dbg("each block consists of %d bytes",
1193 /* Register device*/
1194 if (add_mtd_blktrans_dev(trans)) {
1195 dbg("error in mtdblktrans layer");
1202 kfree(ftl->cache_data);
1206 kfree(ftl->cis_buffer);
1213 /* main interface: device {surprise,} removal */
1214 static void sm_remove_dev(struct mtd_blktrans_dev *dev)
1216 struct sm_ftl *ftl = dev->priv;
1219 del_mtd_blktrans_dev(dev);
1222 for (i = 0 ; i < ftl->zone_count; i++) {
1224 if (!ftl->zones[i].initialized)
1227 kfree(ftl->zones[i].lba_to_phys_table);
1228 kfifo_free(&ftl->zones[i].free_sectors);
1231 sm_delete_sysfs_attributes(ftl);
1232 kfree(ftl->cis_buffer);
1234 kfree(ftl->cache_data);
1238 static struct mtd_blktrans_ops sm_ftl_ops = {
1241 .part_bits = SM_FTL_PARTN_BITS,
1242 .blksize = SM_SECTOR_SIZE,
1243 .getgeo = sm_getgeo,
1245 .add_mtd = sm_add_mtd,
1246 .remove_dev = sm_remove_dev,
1248 .readsect = sm_read,
1249 .writesect = sm_write,
1252 .release = sm_release,
1254 .owner = THIS_MODULE,
1257 static __init int sm_module_init(void)
1260 cache_flush_workqueue = create_freezeable_workqueue("smflush");
1262 if (IS_ERR(cache_flush_workqueue))
1263 return PTR_ERR(cache_flush_workqueue);
1265 error = register_mtd_blktrans(&sm_ftl_ops);
1267 destroy_workqueue(cache_flush_workqueue);
1272 static void __exit sm_module_exit(void)
1274 destroy_workqueue(cache_flush_workqueue);
1275 deregister_mtd_blktrans(&sm_ftl_ops);
1278 module_init(sm_module_init);
1279 module_exit(sm_module_exit);
1281 MODULE_LICENSE("GPL");
1282 MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
1283 MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer");