2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
45 #define MAX_WORD_RETRIES 3
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static void cfi_amdstd_destroy(struct mtd_info *);
65 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
66 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
68 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
69 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
72 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
73 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
75 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
76 .probe = NULL, /* Not usable directly */
77 .destroy = cfi_amdstd_destroy,
78 .name = "cfi_cmdset_0002",
83 /* #define DEBUG_CFI_FEATURES */
86 #ifdef DEBUG_CFI_FEATURES
87 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
89 const char* erase_suspend[3] = {
90 "Not supported", "Read only", "Read/write"
92 const char* top_bottom[6] = {
93 "No WP", "8x8KiB sectors at top & bottom, no WP",
94 "Bottom boot", "Top boot",
95 "Uniform, Bottom WP", "Uniform, Top WP"
98 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
99 printk(" Address sensitive unlock: %s\n",
100 (extp->SiliconRevision & 1) ? "Not required" : "Required");
102 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
103 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
105 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
107 if (extp->BlkProt == 0)
108 printk(" Block protection: Not supported\n");
110 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
113 printk(" Temporary block unprotect: %s\n",
114 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
115 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
116 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
117 printk(" Burst mode: %s\n",
118 extp->BurstMode ? "Supported" : "Not supported");
119 if (extp->PageMode == 0)
120 printk(" Page mode: Not supported\n");
122 printk(" Page mode: %d word page\n", extp->PageMode << 2);
124 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
125 extp->VppMin >> 4, extp->VppMin & 0xf);
126 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
127 extp->VppMax >> 4, extp->VppMax & 0xf);
129 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
130 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
132 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
136 #ifdef AMD_BOOTLOC_BUG
137 /* Wheee. Bring me the head of someone at AMD. */
138 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
140 struct map_info *map = mtd->priv;
141 struct cfi_private *cfi = map->fldrv_priv;
142 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
143 __u8 major = extp->MajorVersion;
144 __u8 minor = extp->MinorVersion;
146 if (((major << 8) | minor) < 0x3131) {
147 /* CFI version 1.0 => don't trust bootloc */
149 DEBUG(MTD_DEBUG_LEVEL1,
150 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
151 map->name, cfi->mfr, cfi->id);
153 /* AFAICS all 29LV400 with a bottom boot block have a device ID
154 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
155 * These were badly detected as they have the 0x80 bit set
156 * so treat them as a special case.
158 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
160 /* Macronix added CFI to their 2nd generation
161 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
162 * Fujitsu, Spansion, EON, ESI and older Macronix)
165 * Therefore also check the manufacturer.
166 * This reduces the risk of false detection due to
167 * the 8-bit device ID.
169 (cfi->mfr == CFI_MFR_MACRONIX)) {
170 DEBUG(MTD_DEBUG_LEVEL1,
171 "%s: Macronix MX29LV400C with bottom boot block"
172 " detected\n", map->name);
173 extp->TopBottom = 2; /* bottom boot */
175 if (cfi->id & 0x80) {
176 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
177 extp->TopBottom = 3; /* top boot */
179 extp->TopBottom = 2; /* bottom boot */
182 DEBUG(MTD_DEBUG_LEVEL1,
183 "%s: AMD CFI PRI V%c.%c has no boot block field;"
184 " deduced %s from Device ID\n", map->name, major, minor,
185 extp->TopBottom == 2 ? "bottom" : "top");
190 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
192 struct map_info *map = mtd->priv;
193 struct cfi_private *cfi = map->fldrv_priv;
194 if (cfi->cfiq->BufWriteTimeoutTyp) {
195 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
196 mtd->write = cfi_amdstd_write_buffers;
200 /* Atmel chips don't use the same PRI format as AMD chips */
201 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
203 struct map_info *map = mtd->priv;
204 struct cfi_private *cfi = map->fldrv_priv;
205 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
206 struct cfi_pri_atmel atmel_pri;
208 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
209 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
211 if (atmel_pri.Features & 0x02)
212 extp->EraseSuspend = 2;
214 /* Some chips got it backwards... */
215 if (cfi->id == AT49BV6416) {
216 if (atmel_pri.BottomBoot)
221 if (atmel_pri.BottomBoot)
227 /* burst write mode not supported */
228 cfi->cfiq->BufWriteTimeoutTyp = 0;
229 cfi->cfiq->BufWriteTimeoutMax = 0;
232 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
234 /* Setup for chips with a secsi area */
235 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
236 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
239 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
241 struct map_info *map = mtd->priv;
242 struct cfi_private *cfi = map->fldrv_priv;
243 if ((cfi->cfiq->NumEraseRegions == 1) &&
244 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
245 mtd->erase = cfi_amdstd_erase_chip;
251 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
254 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
256 mtd->lock = cfi_atmel_lock;
257 mtd->unlock = cfi_atmel_unlock;
258 mtd->flags |= MTD_POWERUP_LOCK;
261 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
263 struct map_info *map = mtd->priv;
264 struct cfi_private *cfi = map->fldrv_priv;
267 * These flashes report two seperate eraseblock regions based on the
268 * sector_erase-size and block_erase-size, although they both operate on the
269 * same memory. This is not allowed according to CFI, so we just pick the
272 cfi->cfiq->NumEraseRegions = 1;
275 static void fixup_sst39vf(struct mtd_info *mtd, void *param)
277 struct map_info *map = mtd->priv;
278 struct cfi_private *cfi = map->fldrv_priv;
280 fixup_old_sst_eraseregion(mtd);
282 cfi->addr_unlock1 = 0x5555;
283 cfi->addr_unlock2 = 0x2AAA;
286 static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
288 struct map_info *map = mtd->priv;
289 struct cfi_private *cfi = map->fldrv_priv;
291 fixup_old_sst_eraseregion(mtd);
293 cfi->addr_unlock1 = 0x555;
294 cfi->addr_unlock2 = 0x2AA;
297 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
299 struct map_info *map = mtd->priv;
300 struct cfi_private *cfi = map->fldrv_priv;
302 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
303 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
304 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
308 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
310 struct map_info *map = mtd->priv;
311 struct cfi_private *cfi = map->fldrv_priv;
313 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
314 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
315 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
319 /* Used to fix CFI-Tables of chips without Extended Query Tables */
320 static struct cfi_fixup cfi_nopri_fixup_table[] = {
321 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
322 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
323 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
324 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
325 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
326 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
327 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
328 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
332 static struct cfi_fixup cfi_fixup_table[] = {
333 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
334 #ifdef AMD_BOOTLOC_BUG
335 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
336 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
338 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
339 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
340 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
341 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
342 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
343 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
344 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
345 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
346 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
347 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
348 #if !FORCE_WORD_WRITE
349 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
353 static struct cfi_fixup jedec_fixup_table[] = {
354 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
355 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
356 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
360 static struct cfi_fixup fixup_table[] = {
361 /* The CFI vendor ids and the JEDEC vendor IDs appear
362 * to be common. It is like the devices id's are as
363 * well. This table is to pick all cases where
364 * we know that is the case.
366 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
367 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
372 static void cfi_fixup_major_minor(struct cfi_private *cfi,
373 struct cfi_pri_amdstd *extp)
375 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
376 extp->MajorVersion == '0')
377 extp->MajorVersion = '1';
380 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
382 struct cfi_private *cfi = map->fldrv_priv;
383 struct mtd_info *mtd;
386 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
388 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
392 mtd->type = MTD_NORFLASH;
394 /* Fill in the default mtd operations */
395 mtd->erase = cfi_amdstd_erase_varsize;
396 mtd->write = cfi_amdstd_write_words;
397 mtd->read = cfi_amdstd_read;
398 mtd->sync = cfi_amdstd_sync;
399 mtd->suspend = cfi_amdstd_suspend;
400 mtd->resume = cfi_amdstd_resume;
401 mtd->flags = MTD_CAP_NORFLASH;
402 mtd->name = map->name;
405 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
407 if (cfi->cfi_mode==CFI_MODE_CFI){
408 unsigned char bootloc;
409 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
410 struct cfi_pri_amdstd *extp;
412 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
415 * It's a real CFI chip, not one for which the probe
416 * routine faked a CFI structure.
418 cfi_fixup_major_minor(cfi, extp);
421 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
422 * see: http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_r20.pdf, page 19
423 * http://www.amd.com/us-en/assets/content_type/DownloadableAssets/cfi_100_20011201.pdf
424 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
426 if (extp->MajorVersion != '1' ||
427 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
428 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
429 "version %c.%c (%#02x/%#02x).\n",
430 extp->MajorVersion, extp->MinorVersion,
431 extp->MajorVersion, extp->MinorVersion);
437 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
438 extp->MajorVersion, extp->MinorVersion);
440 /* Install our own private info structure */
441 cfi->cmdset_priv = extp;
443 /* Apply cfi device specific fixups */
444 cfi_fixup(mtd, cfi_fixup_table);
446 #ifdef DEBUG_CFI_FEATURES
447 /* Tell the user about it in lots of lovely detail */
448 cfi_tell_features(extp);
451 bootloc = extp->TopBottom;
452 if ((bootloc < 2) || (bootloc > 5)) {
453 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
454 "bank location (%d). Assuming bottom.\n",
459 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
460 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
462 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
463 int j = (cfi->cfiq->NumEraseRegions-1)-i;
466 swap = cfi->cfiq->EraseRegionInfo[i];
467 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
468 cfi->cfiq->EraseRegionInfo[j] = swap;
471 /* Set the default CFI lock/unlock addresses */
472 cfi->addr_unlock1 = 0x555;
473 cfi->addr_unlock2 = 0x2aa;
475 cfi_fixup(mtd, cfi_nopri_fixup_table);
477 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
483 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
484 /* Apply jedec specific fixups */
485 cfi_fixup(mtd, jedec_fixup_table);
487 /* Apply generic fixups */
488 cfi_fixup(mtd, fixup_table);
490 for (i=0; i< cfi->numchips; i++) {
491 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
492 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
493 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
494 cfi->chips[i].ref_point_counter = 0;
495 init_waitqueue_head(&(cfi->chips[i].wq));
498 map->fldrv = &cfi_amdstd_chipdrv;
500 return cfi_amdstd_setup(mtd);
502 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
503 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
504 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
505 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
506 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
508 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
510 struct map_info *map = mtd->priv;
511 struct cfi_private *cfi = map->fldrv_priv;
512 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
513 unsigned long offset = 0;
516 printk(KERN_NOTICE "number of %s chips: %d\n",
517 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
518 /* Select the correct geometry setup */
519 mtd->size = devsize * cfi->numchips;
521 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
522 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
523 * mtd->numeraseregions, GFP_KERNEL);
524 if (!mtd->eraseregions) {
525 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
529 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
530 unsigned long ernum, ersize;
531 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
532 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
534 if (mtd->erasesize < ersize) {
535 mtd->erasesize = ersize;
537 for (j=0; j<cfi->numchips; j++) {
538 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
539 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
540 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
542 offset += (ersize * ernum);
544 if (offset != devsize) {
546 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
551 for (i=0; i<mtd->numeraseregions;i++){
552 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
553 i,mtd->eraseregions[i].offset,
554 mtd->eraseregions[i].erasesize,
555 mtd->eraseregions[i].numblocks);
559 __module_get(THIS_MODULE);
560 register_reboot_notifier(&mtd->reboot_notifier);
564 kfree(mtd->eraseregions);
566 kfree(cfi->cmdset_priv);
572 * Return true if the chip is ready.
574 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
575 * non-suspended sector) and is indicated by no toggle bits toggling.
577 * Note that anything more complicated than checking if no bits are toggling
578 * (including checking DQ5 for an error status) is tricky to get working
579 * correctly and is therefore not done (particulary with interleaved chips
580 * as each chip must be checked independantly of the others).
582 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
586 d = map_read(map, addr);
587 t = map_read(map, addr);
589 return map_word_equal(map, d, t);
593 * Return true if the chip is ready and has the correct value.
595 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
596 * non-suspended sector) and it is indicated by no bits toggling.
598 * Error are indicated by toggling bits or bits held with the wrong value,
599 * or with bits toggling.
601 * Note that anything more complicated than checking if no bits are toggling
602 * (including checking DQ5 for an error status) is tricky to get working
603 * correctly and is therefore not done (particulary with interleaved chips
604 * as each chip must be checked independantly of the others).
607 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
611 oldd = map_read(map, addr);
612 curd = map_read(map, addr);
614 return map_word_equal(map, oldd, curd) &&
615 map_word_equal(map, curd, expected);
618 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
620 DECLARE_WAITQUEUE(wait, current);
621 struct cfi_private *cfi = map->fldrv_priv;
623 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
626 timeo = jiffies + HZ;
628 switch (chip->state) {
632 if (chip_ready(map, adr))
635 if (time_after(jiffies, timeo)) {
636 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
639 mutex_unlock(&chip->mutex);
641 mutex_lock(&chip->mutex);
642 /* Someone else might have been playing with it. */
652 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
653 !(mode == FL_READY || mode == FL_POINT ||
654 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
657 /* We could check to see if we're trying to access the sector
658 * that is currently being erased. However, no user will try
659 * anything like that so we just wait for the timeout. */
662 /* It's harmless to issue the Erase-Suspend and Erase-Resume
663 * commands when the erase algorithm isn't in progress. */
664 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
665 chip->oldstate = FL_ERASING;
666 chip->state = FL_ERASE_SUSPENDING;
667 chip->erase_suspended = 1;
669 if (chip_ready(map, adr))
672 if (time_after(jiffies, timeo)) {
673 /* Should have suspended the erase by now.
674 * Send an Erase-Resume command as either
675 * there was an error (so leave the erase
676 * routine to recover from it) or we trying to
677 * use the erase-in-progress sector. */
678 map_write(map, CMD(0x30), chip->in_progress_block_addr);
679 chip->state = FL_ERASING;
680 chip->oldstate = FL_READY;
681 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
685 mutex_unlock(&chip->mutex);
687 mutex_lock(&chip->mutex);
688 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
689 So we can just loop here. */
691 chip->state = FL_READY;
694 case FL_XIP_WHILE_ERASING:
695 if (mode != FL_READY && mode != FL_POINT &&
696 (!cfip || !(cfip->EraseSuspend&2)))
698 chip->oldstate = chip->state;
699 chip->state = FL_READY;
703 /* The machine is rebooting */
707 /* Only if there's no operation suspended... */
708 if (mode == FL_READY && chip->oldstate == FL_READY)
713 set_current_state(TASK_UNINTERRUPTIBLE);
714 add_wait_queue(&chip->wq, &wait);
715 mutex_unlock(&chip->mutex);
717 remove_wait_queue(&chip->wq, &wait);
718 mutex_lock(&chip->mutex);
724 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
726 struct cfi_private *cfi = map->fldrv_priv;
728 switch(chip->oldstate) {
730 chip->state = chip->oldstate;
731 map_write(map, CMD(0x30), chip->in_progress_block_addr);
732 chip->oldstate = FL_READY;
733 chip->state = FL_ERASING;
736 case FL_XIP_WHILE_ERASING:
737 chip->state = chip->oldstate;
738 chip->oldstate = FL_READY;
743 /* We should really make set_vpp() count, rather than doing this */
747 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
752 #ifdef CONFIG_MTD_XIP
755 * No interrupt what so ever can be serviced while the flash isn't in array
756 * mode. This is ensured by the xip_disable() and xip_enable() functions
757 * enclosing any code path where the flash is known not to be in array mode.
758 * And within a XIP disabled code path, only functions marked with __xipram
759 * may be called and nothing else (it's a good thing to inspect generated
760 * assembly to make sure inline functions were actually inlined and that gcc
761 * didn't emit calls to its own support functions). Also configuring MTD CFI
762 * support to a single buswidth and a single interleave is also recommended.
765 static void xip_disable(struct map_info *map, struct flchip *chip,
768 /* TODO: chips with no XIP use should ignore and return */
769 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
773 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
776 struct cfi_private *cfi = map->fldrv_priv;
778 if (chip->state != FL_POINT && chip->state != FL_READY) {
779 map_write(map, CMD(0xf0), adr);
780 chip->state = FL_READY;
782 (void) map_read(map, adr);
788 * When a delay is required for the flash operation to complete, the
789 * xip_udelay() function is polling for both the given timeout and pending
790 * (but still masked) hardware interrupts. Whenever there is an interrupt
791 * pending then the flash erase operation is suspended, array mode restored
792 * and interrupts unmasked. Task scheduling might also happen at that
793 * point. The CPU eventually returns from the interrupt or the call to
794 * schedule() and the suspended flash operation is resumed for the remaining
795 * of the delay period.
797 * Warning: this function _will_ fool interrupt latency tracing tools.
800 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
801 unsigned long adr, int usec)
803 struct cfi_private *cfi = map->fldrv_priv;
804 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
805 map_word status, OK = CMD(0x80);
806 unsigned long suspended, start = xip_currtime();
811 if (xip_irqpending() && extp &&
812 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
813 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
815 * Let's suspend the erase operation when supported.
816 * Note that we currently don't try to suspend
817 * interleaved chips if there is already another
818 * operation suspended (imagine what happens
819 * when one chip was already done with the current
820 * operation while another chip suspended it, then
821 * we resume the whole thing at once). Yes, it
824 map_write(map, CMD(0xb0), adr);
825 usec -= xip_elapsed_since(start);
826 suspended = xip_currtime();
828 if (xip_elapsed_since(suspended) > 100000) {
830 * The chip doesn't want to suspend
831 * after waiting for 100 msecs.
832 * This is a critical error but there
833 * is not much we can do here.
837 status = map_read(map, adr);
838 } while (!map_word_andequal(map, status, OK, OK));
840 /* Suspend succeeded */
841 oldstate = chip->state;
842 if (!map_word_bitsset(map, status, CMD(0x40)))
844 chip->state = FL_XIP_WHILE_ERASING;
845 chip->erase_suspended = 1;
846 map_write(map, CMD(0xf0), adr);
847 (void) map_read(map, adr);
850 mutex_unlock(&chip->mutex);
855 * We're back. However someone else might have
856 * decided to go write to the chip if we are in
857 * a suspended erase state. If so let's wait
860 mutex_lock(&chip->mutex);
861 while (chip->state != FL_XIP_WHILE_ERASING) {
862 DECLARE_WAITQUEUE(wait, current);
863 set_current_state(TASK_UNINTERRUPTIBLE);
864 add_wait_queue(&chip->wq, &wait);
865 mutex_unlock(&chip->mutex);
867 remove_wait_queue(&chip->wq, &wait);
868 mutex_lock(&chip->mutex);
870 /* Disallow XIP again */
873 /* Resume the write or erase operation */
874 map_write(map, CMD(0x30), adr);
875 chip->state = oldstate;
876 start = xip_currtime();
877 } else if (usec >= 1000000/HZ) {
879 * Try to save on CPU power when waiting delay
880 * is at least a system timer tick period.
881 * No need to be extremely accurate here.
885 status = map_read(map, adr);
886 } while (!map_word_andequal(map, status, OK, OK)
887 && xip_elapsed_since(start) < usec);
890 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
893 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
894 * the flash is actively programming or erasing since we have to poll for
895 * the operation to complete anyway. We can't do that in a generic way with
896 * a XIP setup so do it before the actual flash operation in this case
897 * and stub it out from INVALIDATE_CACHE_UDELAY.
899 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
900 INVALIDATE_CACHED_RANGE(map, from, size)
902 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
903 UDELAY(map, chip, adr, usec)
908 * Activating this XIP support changes the way the code works a bit. For
909 * example the code to suspend the current process when concurrent access
910 * happens is never executed because xip_udelay() will always return with the
911 * same chip state as it was entered with. This is why there is no care for
912 * the presence of add_wait_queue() or schedule() calls from within a couple
913 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
914 * The queueing and scheduling are always happening within xip_udelay().
916 * Similarly, get_chip() and put_chip() just happen to always be executed
917 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
918 * is in array mode, therefore never executing many cases therein and not
919 * causing any problem with XIP.
924 #define xip_disable(map, chip, adr)
925 #define xip_enable(map, chip, adr)
926 #define XIP_INVAL_CACHED_RANGE(x...)
928 #define UDELAY(map, chip, adr, usec) \
930 mutex_unlock(&chip->mutex); \
932 mutex_lock(&chip->mutex); \
935 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
937 mutex_unlock(&chip->mutex); \
938 INVALIDATE_CACHED_RANGE(map, adr, len); \
940 mutex_lock(&chip->mutex); \
945 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
947 unsigned long cmd_addr;
948 struct cfi_private *cfi = map->fldrv_priv;
953 /* Ensure cmd read/writes are aligned. */
954 cmd_addr = adr & ~(map_bankwidth(map)-1);
956 mutex_lock(&chip->mutex);
957 ret = get_chip(map, chip, cmd_addr, FL_READY);
959 mutex_unlock(&chip->mutex);
963 if (chip->state != FL_POINT && chip->state != FL_READY) {
964 map_write(map, CMD(0xf0), cmd_addr);
965 chip->state = FL_READY;
968 map_copy_from(map, buf, adr, len);
970 put_chip(map, chip, cmd_addr);
972 mutex_unlock(&chip->mutex);
977 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
979 struct map_info *map = mtd->priv;
980 struct cfi_private *cfi = map->fldrv_priv;
985 /* ofs: offset within the first chip that the first read should start */
987 chipnum = (from >> cfi->chipshift);
988 ofs = from - (chipnum << cfi->chipshift);
994 unsigned long thislen;
996 if (chipnum >= cfi->numchips)
999 if ((len + ofs -1) >> cfi->chipshift)
1000 thislen = (1<<cfi->chipshift) - ofs;
1004 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1019 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1021 DECLARE_WAITQUEUE(wait, current);
1022 unsigned long timeo = jiffies + HZ;
1023 struct cfi_private *cfi = map->fldrv_priv;
1026 mutex_lock(&chip->mutex);
1028 if (chip->state != FL_READY){
1030 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
1032 set_current_state(TASK_UNINTERRUPTIBLE);
1033 add_wait_queue(&chip->wq, &wait);
1035 mutex_unlock(&chip->mutex);
1038 remove_wait_queue(&chip->wq, &wait);
1040 if(signal_pending(current))
1043 timeo = jiffies + HZ;
1050 chip->state = FL_READY;
1052 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1053 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1054 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1056 map_copy_from(map, buf, adr, len);
1058 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1059 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1060 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1061 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1064 mutex_unlock(&chip->mutex);
1069 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1071 struct map_info *map = mtd->priv;
1072 struct cfi_private *cfi = map->fldrv_priv;
1078 /* ofs: offset within the first chip that the first read should start */
1080 /* 8 secsi bytes per chip */
1088 unsigned long thislen;
1090 if (chipnum >= cfi->numchips)
1093 if ((len + ofs -1) >> 3)
1094 thislen = (1<<3) - ofs;
1098 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1113 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1115 struct cfi_private *cfi = map->fldrv_priv;
1116 unsigned long timeo = jiffies + HZ;
1118 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1119 * have a max write time of a few hundreds usec). However, we should
1120 * use the maximum timeout value given by the chip at probe time
1121 * instead. Unfortunately, struct flchip does have a field for
1122 * maximum timeout, only for typical which can be far too short
1123 * depending of the conditions. The ' + 1' is to avoid having a
1124 * timeout of 0 jiffies if HZ is smaller than 1000.
1126 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1133 mutex_lock(&chip->mutex);
1134 ret = get_chip(map, chip, adr, FL_WRITING);
1136 mutex_unlock(&chip->mutex);
1140 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1141 __func__, adr, datum.x[0] );
1144 * Check for a NOP for the case when the datum to write is already
1145 * present - it saves time and works around buggy chips that corrupt
1146 * data at other locations when 0xff is written to a location that
1147 * already contains 0xff.
1149 oldd = map_read(map, adr);
1150 if (map_word_equal(map, oldd, datum)) {
1151 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1156 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1158 xip_disable(map, chip, adr);
1160 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1161 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1162 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1163 map_write(map, datum, adr);
1164 chip->state = FL_WRITING;
1166 INVALIDATE_CACHE_UDELAY(map, chip,
1167 adr, map_bankwidth(map),
1168 chip->word_write_time);
1170 /* See comment above for timeout value. */
1171 timeo = jiffies + uWriteTimeout;
1173 if (chip->state != FL_WRITING) {
1174 /* Someone's suspended the write. Sleep */
1175 DECLARE_WAITQUEUE(wait, current);
1177 set_current_state(TASK_UNINTERRUPTIBLE);
1178 add_wait_queue(&chip->wq, &wait);
1179 mutex_unlock(&chip->mutex);
1181 remove_wait_queue(&chip->wq, &wait);
1182 timeo = jiffies + (HZ / 2); /* FIXME */
1183 mutex_lock(&chip->mutex);
1187 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1188 xip_enable(map, chip, adr);
1189 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1190 xip_disable(map, chip, adr);
1194 if (chip_ready(map, adr))
1197 /* Latency issues. Drop the lock, wait a while and retry */
1198 UDELAY(map, chip, adr, 1);
1200 /* Did we succeed? */
1201 if (!chip_good(map, adr, datum)) {
1202 /* reset on all failures. */
1203 map_write( map, CMD(0xF0), chip->start );
1204 /* FIXME - should have reset delay before continuing */
1206 if (++retry_cnt <= MAX_WORD_RETRIES)
1211 xip_enable(map, chip, adr);
1213 chip->state = FL_READY;
1214 put_chip(map, chip, adr);
1215 mutex_unlock(&chip->mutex);
1221 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1222 size_t *retlen, const u_char *buf)
1224 struct map_info *map = mtd->priv;
1225 struct cfi_private *cfi = map->fldrv_priv;
1228 unsigned long ofs, chipstart;
1229 DECLARE_WAITQUEUE(wait, current);
1235 chipnum = to >> cfi->chipshift;
1236 ofs = to - (chipnum << cfi->chipshift);
1237 chipstart = cfi->chips[chipnum].start;
1239 /* If it's not bus-aligned, do the first byte write */
1240 if (ofs & (map_bankwidth(map)-1)) {
1241 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1242 int i = ofs - bus_ofs;
1247 mutex_lock(&cfi->chips[chipnum].mutex);
1249 if (cfi->chips[chipnum].state != FL_READY) {
1251 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1253 set_current_state(TASK_UNINTERRUPTIBLE);
1254 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1256 mutex_unlock(&cfi->chips[chipnum].mutex);
1259 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1261 if(signal_pending(current))
1267 /* Load 'tmp_buf' with old contents of flash */
1268 tmp_buf = map_read(map, bus_ofs+chipstart);
1270 mutex_unlock(&cfi->chips[chipnum].mutex);
1272 /* Number of bytes to copy from buffer */
1273 n = min_t(int, len, map_bankwidth(map)-i);
1275 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1277 ret = do_write_oneword(map, &cfi->chips[chipnum],
1287 if (ofs >> cfi->chipshift) {
1290 if (chipnum == cfi->numchips)
1295 /* We are now aligned, write as much as possible */
1296 while(len >= map_bankwidth(map)) {
1299 datum = map_word_load(map, buf);
1301 ret = do_write_oneword(map, &cfi->chips[chipnum],
1306 ofs += map_bankwidth(map);
1307 buf += map_bankwidth(map);
1308 (*retlen) += map_bankwidth(map);
1309 len -= map_bankwidth(map);
1311 if (ofs >> cfi->chipshift) {
1314 if (chipnum == cfi->numchips)
1316 chipstart = cfi->chips[chipnum].start;
1320 /* Write the trailing bytes if any */
1321 if (len & (map_bankwidth(map)-1)) {
1325 mutex_lock(&cfi->chips[chipnum].mutex);
1327 if (cfi->chips[chipnum].state != FL_READY) {
1329 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1331 set_current_state(TASK_UNINTERRUPTIBLE);
1332 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1334 mutex_unlock(&cfi->chips[chipnum].mutex);
1337 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1339 if(signal_pending(current))
1345 tmp_buf = map_read(map, ofs + chipstart);
1347 mutex_unlock(&cfi->chips[chipnum].mutex);
1349 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1351 ret = do_write_oneword(map, &cfi->chips[chipnum],
1364 * FIXME: interleaved mode not tested, and probably not supported!
1366 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1367 unsigned long adr, const u_char *buf,
1370 struct cfi_private *cfi = map->fldrv_priv;
1371 unsigned long timeo = jiffies + HZ;
1372 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1373 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1375 unsigned long cmd_adr;
1382 mutex_lock(&chip->mutex);
1383 ret = get_chip(map, chip, adr, FL_WRITING);
1385 mutex_unlock(&chip->mutex);
1389 datum = map_word_load(map, buf);
1391 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1392 __func__, adr, datum.x[0] );
1394 XIP_INVAL_CACHED_RANGE(map, adr, len);
1396 xip_disable(map, chip, cmd_adr);
1398 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1399 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1400 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1402 /* Write Buffer Load */
1403 map_write(map, CMD(0x25), cmd_adr);
1405 chip->state = FL_WRITING_TO_BUFFER;
1407 /* Write length of data to come */
1408 words = len / map_bankwidth(map);
1409 map_write(map, CMD(words - 1), cmd_adr);
1412 while(z < words * map_bankwidth(map)) {
1413 datum = map_word_load(map, buf);
1414 map_write(map, datum, adr + z);
1416 z += map_bankwidth(map);
1417 buf += map_bankwidth(map);
1419 z -= map_bankwidth(map);
1423 /* Write Buffer Program Confirm: GO GO GO */
1424 map_write(map, CMD(0x29), cmd_adr);
1425 chip->state = FL_WRITING;
1427 INVALIDATE_CACHE_UDELAY(map, chip,
1428 adr, map_bankwidth(map),
1429 chip->word_write_time);
1431 timeo = jiffies + uWriteTimeout;
1434 if (chip->state != FL_WRITING) {
1435 /* Someone's suspended the write. Sleep */
1436 DECLARE_WAITQUEUE(wait, current);
1438 set_current_state(TASK_UNINTERRUPTIBLE);
1439 add_wait_queue(&chip->wq, &wait);
1440 mutex_unlock(&chip->mutex);
1442 remove_wait_queue(&chip->wq, &wait);
1443 timeo = jiffies + (HZ / 2); /* FIXME */
1444 mutex_lock(&chip->mutex);
1448 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1451 if (chip_ready(map, adr)) {
1452 xip_enable(map, chip, adr);
1456 /* Latency issues. Drop the lock, wait a while and retry */
1457 UDELAY(map, chip, adr, 1);
1460 /* reset on all failures. */
1461 map_write( map, CMD(0xF0), chip->start );
1462 xip_enable(map, chip, adr);
1463 /* FIXME - should have reset delay before continuing */
1465 printk(KERN_WARNING "MTD %s(): software timeout\n",
1470 chip->state = FL_READY;
1471 put_chip(map, chip, adr);
1472 mutex_unlock(&chip->mutex);
1478 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1479 size_t *retlen, const u_char *buf)
1481 struct map_info *map = mtd->priv;
1482 struct cfi_private *cfi = map->fldrv_priv;
1483 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1492 chipnum = to >> cfi->chipshift;
1493 ofs = to - (chipnum << cfi->chipshift);
1495 /* If it's not bus-aligned, do the first word write */
1496 if (ofs & (map_bankwidth(map)-1)) {
1497 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1498 if (local_len > len)
1500 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1501 local_len, retlen, buf);
1508 if (ofs >> cfi->chipshift) {
1511 if (chipnum == cfi->numchips)
1516 /* Write buffer is worth it only if more than one word to write... */
1517 while (len >= map_bankwidth(map) * 2) {
1518 /* We must not cross write block boundaries */
1519 int size = wbufsize - (ofs & (wbufsize-1));
1523 if (size % map_bankwidth(map))
1524 size -= size % map_bankwidth(map);
1526 ret = do_write_buffer(map, &cfi->chips[chipnum],
1536 if (ofs >> cfi->chipshift) {
1539 if (chipnum == cfi->numchips)
1545 size_t retlen_dregs = 0;
1547 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1548 len, &retlen_dregs, buf);
1550 *retlen += retlen_dregs;
1559 * Handle devices with one erase region, that only implement
1560 * the chip erase command.
1562 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1564 struct cfi_private *cfi = map->fldrv_priv;
1565 unsigned long timeo = jiffies + HZ;
1566 unsigned long int adr;
1567 DECLARE_WAITQUEUE(wait, current);
1570 adr = cfi->addr_unlock1;
1572 mutex_lock(&chip->mutex);
1573 ret = get_chip(map, chip, adr, FL_WRITING);
1575 mutex_unlock(&chip->mutex);
1579 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1580 __func__, chip->start );
1582 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1584 xip_disable(map, chip, adr);
1586 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1587 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1588 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1589 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1590 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1591 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1593 chip->state = FL_ERASING;
1594 chip->erase_suspended = 0;
1595 chip->in_progress_block_addr = adr;
1597 INVALIDATE_CACHE_UDELAY(map, chip,
1599 chip->erase_time*500);
1601 timeo = jiffies + (HZ*20);
1604 if (chip->state != FL_ERASING) {
1605 /* Someone's suspended the erase. Sleep */
1606 set_current_state(TASK_UNINTERRUPTIBLE);
1607 add_wait_queue(&chip->wq, &wait);
1608 mutex_unlock(&chip->mutex);
1610 remove_wait_queue(&chip->wq, &wait);
1611 mutex_lock(&chip->mutex);
1614 if (chip->erase_suspended) {
1615 /* This erase was suspended and resumed.
1616 Adjust the timeout */
1617 timeo = jiffies + (HZ*20); /* FIXME */
1618 chip->erase_suspended = 0;
1621 if (chip_ready(map, adr))
1624 if (time_after(jiffies, timeo)) {
1625 printk(KERN_WARNING "MTD %s(): software timeout\n",
1630 /* Latency issues. Drop the lock, wait a while and retry */
1631 UDELAY(map, chip, adr, 1000000/HZ);
1633 /* Did we succeed? */
1634 if (!chip_good(map, adr, map_word_ff(map))) {
1635 /* reset on all failures. */
1636 map_write( map, CMD(0xF0), chip->start );
1637 /* FIXME - should have reset delay before continuing */
1642 chip->state = FL_READY;
1643 xip_enable(map, chip, adr);
1644 put_chip(map, chip, adr);
1645 mutex_unlock(&chip->mutex);
1651 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1653 struct cfi_private *cfi = map->fldrv_priv;
1654 unsigned long timeo = jiffies + HZ;
1655 DECLARE_WAITQUEUE(wait, current);
1660 mutex_lock(&chip->mutex);
1661 ret = get_chip(map, chip, adr, FL_ERASING);
1663 mutex_unlock(&chip->mutex);
1667 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1670 XIP_INVAL_CACHED_RANGE(map, adr, len);
1672 xip_disable(map, chip, adr);
1674 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1675 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1676 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1677 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1678 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1679 map_write(map, CMD(0x30), adr);
1681 chip->state = FL_ERASING;
1682 chip->erase_suspended = 0;
1683 chip->in_progress_block_addr = adr;
1685 INVALIDATE_CACHE_UDELAY(map, chip,
1687 chip->erase_time*500);
1689 timeo = jiffies + (HZ*20);
1692 if (chip->state != FL_ERASING) {
1693 /* Someone's suspended the erase. Sleep */
1694 set_current_state(TASK_UNINTERRUPTIBLE);
1695 add_wait_queue(&chip->wq, &wait);
1696 mutex_unlock(&chip->mutex);
1698 remove_wait_queue(&chip->wq, &wait);
1699 mutex_lock(&chip->mutex);
1702 if (chip->erase_suspended) {
1703 /* This erase was suspended and resumed.
1704 Adjust the timeout */
1705 timeo = jiffies + (HZ*20); /* FIXME */
1706 chip->erase_suspended = 0;
1709 if (chip_ready(map, adr)) {
1710 xip_enable(map, chip, adr);
1714 if (time_after(jiffies, timeo)) {
1715 xip_enable(map, chip, adr);
1716 printk(KERN_WARNING "MTD %s(): software timeout\n",
1721 /* Latency issues. Drop the lock, wait a while and retry */
1722 UDELAY(map, chip, adr, 1000000/HZ);
1724 /* Did we succeed? */
1725 if (!chip_good(map, adr, map_word_ff(map))) {
1726 /* reset on all failures. */
1727 map_write( map, CMD(0xF0), chip->start );
1728 /* FIXME - should have reset delay before continuing */
1733 chip->state = FL_READY;
1734 put_chip(map, chip, adr);
1735 mutex_unlock(&chip->mutex);
1740 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1742 unsigned long ofs, len;
1748 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1752 instr->state = MTD_ERASE_DONE;
1753 mtd_erase_callback(instr);
1759 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1761 struct map_info *map = mtd->priv;
1762 struct cfi_private *cfi = map->fldrv_priv;
1765 if (instr->addr != 0)
1768 if (instr->len != mtd->size)
1771 ret = do_erase_chip(map, &cfi->chips[0]);
1775 instr->state = MTD_ERASE_DONE;
1776 mtd_erase_callback(instr);
1781 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1782 unsigned long adr, int len, void *thunk)
1784 struct cfi_private *cfi = map->fldrv_priv;
1787 mutex_lock(&chip->mutex);
1788 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1791 chip->state = FL_LOCKING;
1793 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1794 __func__, adr, len);
1796 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1797 cfi->device_type, NULL);
1798 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1799 cfi->device_type, NULL);
1800 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1801 cfi->device_type, NULL);
1802 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1803 cfi->device_type, NULL);
1804 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1805 cfi->device_type, NULL);
1806 map_write(map, CMD(0x40), chip->start + adr);
1808 chip->state = FL_READY;
1809 put_chip(map, chip, adr + chip->start);
1813 mutex_unlock(&chip->mutex);
1817 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1818 unsigned long adr, int len, void *thunk)
1820 struct cfi_private *cfi = map->fldrv_priv;
1823 mutex_lock(&chip->mutex);
1824 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1827 chip->state = FL_UNLOCKING;
1829 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1830 __func__, adr, len);
1832 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1833 cfi->device_type, NULL);
1834 map_write(map, CMD(0x70), adr);
1836 chip->state = FL_READY;
1837 put_chip(map, chip, adr + chip->start);
1841 mutex_unlock(&chip->mutex);
1845 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1847 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1850 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1852 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1856 static void cfi_amdstd_sync (struct mtd_info *mtd)
1858 struct map_info *map = mtd->priv;
1859 struct cfi_private *cfi = map->fldrv_priv;
1861 struct flchip *chip;
1863 DECLARE_WAITQUEUE(wait, current);
1865 for (i=0; !ret && i<cfi->numchips; i++) {
1866 chip = &cfi->chips[i];
1869 mutex_lock(&chip->mutex);
1871 switch(chip->state) {
1875 case FL_JEDEC_QUERY:
1876 chip->oldstate = chip->state;
1877 chip->state = FL_SYNCING;
1878 /* No need to wake_up() on this state change -
1879 * as the whole point is that nobody can do anything
1880 * with the chip now anyway.
1883 mutex_unlock(&chip->mutex);
1887 /* Not an idle state */
1888 set_current_state(TASK_UNINTERRUPTIBLE);
1889 add_wait_queue(&chip->wq, &wait);
1891 mutex_unlock(&chip->mutex);
1895 remove_wait_queue(&chip->wq, &wait);
1901 /* Unlock the chips again */
1903 for (i--; i >=0; i--) {
1904 chip = &cfi->chips[i];
1906 mutex_lock(&chip->mutex);
1908 if (chip->state == FL_SYNCING) {
1909 chip->state = chip->oldstate;
1912 mutex_unlock(&chip->mutex);
1917 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1919 struct map_info *map = mtd->priv;
1920 struct cfi_private *cfi = map->fldrv_priv;
1922 struct flchip *chip;
1925 for (i=0; !ret && i<cfi->numchips; i++) {
1926 chip = &cfi->chips[i];
1928 mutex_lock(&chip->mutex);
1930 switch(chip->state) {
1934 case FL_JEDEC_QUERY:
1935 chip->oldstate = chip->state;
1936 chip->state = FL_PM_SUSPENDED;
1937 /* No need to wake_up() on this state change -
1938 * as the whole point is that nobody can do anything
1939 * with the chip now anyway.
1941 case FL_PM_SUSPENDED:
1948 mutex_unlock(&chip->mutex);
1951 /* Unlock the chips again */
1954 for (i--; i >=0; i--) {
1955 chip = &cfi->chips[i];
1957 mutex_lock(&chip->mutex);
1959 if (chip->state == FL_PM_SUSPENDED) {
1960 chip->state = chip->oldstate;
1963 mutex_unlock(&chip->mutex);
1971 static void cfi_amdstd_resume(struct mtd_info *mtd)
1973 struct map_info *map = mtd->priv;
1974 struct cfi_private *cfi = map->fldrv_priv;
1976 struct flchip *chip;
1978 for (i=0; i<cfi->numchips; i++) {
1980 chip = &cfi->chips[i];
1982 mutex_lock(&chip->mutex);
1984 if (chip->state == FL_PM_SUSPENDED) {
1985 chip->state = FL_READY;
1986 map_write(map, CMD(0xF0), chip->start);
1990 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1992 mutex_unlock(&chip->mutex);
1998 * Ensure that the flash device is put back into read array mode before
1999 * unloading the driver or rebooting. On some systems, rebooting while
2000 * the flash is in query/program/erase mode will prevent the CPU from
2001 * fetching the bootloader code, requiring a hard reset or power cycle.
2003 static int cfi_amdstd_reset(struct mtd_info *mtd)
2005 struct map_info *map = mtd->priv;
2006 struct cfi_private *cfi = map->fldrv_priv;
2008 struct flchip *chip;
2010 for (i = 0; i < cfi->numchips; i++) {
2012 chip = &cfi->chips[i];
2014 mutex_lock(&chip->mutex);
2016 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2018 map_write(map, CMD(0xF0), chip->start);
2019 chip->state = FL_SHUTDOWN;
2020 put_chip(map, chip, chip->start);
2023 mutex_unlock(&chip->mutex);
2030 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2033 struct mtd_info *mtd;
2035 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2036 cfi_amdstd_reset(mtd);
2041 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2043 struct map_info *map = mtd->priv;
2044 struct cfi_private *cfi = map->fldrv_priv;
2046 cfi_amdstd_reset(mtd);
2047 unregister_reboot_notifier(&mtd->reboot_notifier);
2048 kfree(cfi->cmdset_priv);
2051 kfree(mtd->eraseregions);
2054 MODULE_LICENSE("GPL");
2055 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2056 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2057 MODULE_ALIAS("cfi_cmdset_0006");
2058 MODULE_ALIAS("cfi_cmdset_0701");