2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
36 #include <linux/of_platform.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
45 #define MAX_WORD_RETRIES 3
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
52 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
56 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
57 static void cfi_amdstd_sync (struct mtd_info *);
58 static int cfi_amdstd_suspend (struct mtd_info *);
59 static void cfi_amdstd_resume (struct mtd_info *);
60 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
61 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
64 size_t *retlen, const u_char *buf);
66 static void cfi_amdstd_destroy(struct mtd_info *);
68 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
69 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
71 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
72 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
75 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
76 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
78 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
79 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
80 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
82 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
83 .probe = NULL, /* Not usable directly */
84 .destroy = cfi_amdstd_destroy,
85 .name = "cfi_cmdset_0002",
90 /* #define DEBUG_CFI_FEATURES */
93 #ifdef DEBUG_CFI_FEATURES
94 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
96 const char* erase_suspend[3] = {
97 "Not supported", "Read only", "Read/write"
99 const char* top_bottom[6] = {
100 "No WP", "8x8KiB sectors at top & bottom, no WP",
101 "Bottom boot", "Top boot",
102 "Uniform, Bottom WP", "Uniform, Top WP"
105 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
106 printk(" Address sensitive unlock: %s\n",
107 (extp->SiliconRevision & 1) ? "Not required" : "Required");
109 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
110 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
112 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
114 if (extp->BlkProt == 0)
115 printk(" Block protection: Not supported\n");
117 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
120 printk(" Temporary block unprotect: %s\n",
121 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
122 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
123 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
124 printk(" Burst mode: %s\n",
125 extp->BurstMode ? "Supported" : "Not supported");
126 if (extp->PageMode == 0)
127 printk(" Page mode: Not supported\n");
129 printk(" Page mode: %d word page\n", extp->PageMode << 2);
131 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
132 extp->VppMin >> 4, extp->VppMin & 0xf);
133 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
134 extp->VppMax >> 4, extp->VppMax & 0xf);
136 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
137 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
139 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
143 #ifdef AMD_BOOTLOC_BUG
144 /* Wheee. Bring me the head of someone at AMD. */
145 static void fixup_amd_bootblock(struct mtd_info *mtd)
147 struct map_info *map = mtd->priv;
148 struct cfi_private *cfi = map->fldrv_priv;
149 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
150 __u8 major = extp->MajorVersion;
151 __u8 minor = extp->MinorVersion;
153 if (((major << 8) | minor) < 0x3131) {
154 /* CFI version 1.0 => don't trust bootloc */
156 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
157 map->name, cfi->mfr, cfi->id);
159 /* AFAICS all 29LV400 with a bottom boot block have a device ID
160 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
161 * These were badly detected as they have the 0x80 bit set
162 * so treat them as a special case.
164 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
166 /* Macronix added CFI to their 2nd generation
167 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
168 * Fujitsu, Spansion, EON, ESI and older Macronix)
171 * Therefore also check the manufacturer.
172 * This reduces the risk of false detection due to
173 * the 8-bit device ID.
175 (cfi->mfr == CFI_MFR_MACRONIX)) {
176 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
177 " detected\n", map->name);
178 extp->TopBottom = 2; /* bottom boot */
180 if (cfi->id & 0x80) {
181 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
182 extp->TopBottom = 3; /* top boot */
184 extp->TopBottom = 2; /* bottom boot */
187 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
188 " deduced %s from Device ID\n", map->name, major, minor,
189 extp->TopBottom == 2 ? "bottom" : "top");
194 static void fixup_use_write_buffers(struct mtd_info *mtd)
196 struct map_info *map = mtd->priv;
197 struct cfi_private *cfi = map->fldrv_priv;
198 if (cfi->cfiq->BufWriteTimeoutTyp) {
199 pr_debug("Using buffer write method\n" );
200 mtd->_write = cfi_amdstd_write_buffers;
204 /* Atmel chips don't use the same PRI format as AMD chips */
205 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
207 struct map_info *map = mtd->priv;
208 struct cfi_private *cfi = map->fldrv_priv;
209 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
210 struct cfi_pri_atmel atmel_pri;
212 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
213 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
215 if (atmel_pri.Features & 0x02)
216 extp->EraseSuspend = 2;
218 /* Some chips got it backwards... */
219 if (cfi->id == AT49BV6416) {
220 if (atmel_pri.BottomBoot)
225 if (atmel_pri.BottomBoot)
231 /* burst write mode not supported */
232 cfi->cfiq->BufWriteTimeoutTyp = 0;
233 cfi->cfiq->BufWriteTimeoutMax = 0;
236 static void fixup_use_secsi(struct mtd_info *mtd)
238 /* Setup for chips with a secsi area */
239 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
240 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
243 static void fixup_use_erase_chip(struct mtd_info *mtd)
245 struct map_info *map = mtd->priv;
246 struct cfi_private *cfi = map->fldrv_priv;
247 if ((cfi->cfiq->NumEraseRegions == 1) &&
248 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
249 mtd->_erase = cfi_amdstd_erase_chip;
255 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
258 static void fixup_use_atmel_lock(struct mtd_info *mtd)
260 mtd->_lock = cfi_atmel_lock;
261 mtd->_unlock = cfi_atmel_unlock;
262 mtd->flags |= MTD_POWERUP_LOCK;
265 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
267 struct map_info *map = mtd->priv;
268 struct cfi_private *cfi = map->fldrv_priv;
271 * These flashes report two separate eraseblock regions based on the
272 * sector_erase-size and block_erase-size, although they both operate on the
273 * same memory. This is not allowed according to CFI, so we just pick the
276 cfi->cfiq->NumEraseRegions = 1;
279 static void fixup_sst39vf(struct mtd_info *mtd)
281 struct map_info *map = mtd->priv;
282 struct cfi_private *cfi = map->fldrv_priv;
284 fixup_old_sst_eraseregion(mtd);
286 cfi->addr_unlock1 = 0x5555;
287 cfi->addr_unlock2 = 0x2AAA;
290 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
292 struct map_info *map = mtd->priv;
293 struct cfi_private *cfi = map->fldrv_priv;
295 fixup_old_sst_eraseregion(mtd);
297 cfi->addr_unlock1 = 0x555;
298 cfi->addr_unlock2 = 0x2AA;
300 cfi->sector_erase_cmd = CMD(0x50);
303 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
305 struct map_info *map = mtd->priv;
306 struct cfi_private *cfi = map->fldrv_priv;
308 fixup_sst39vf_rev_b(mtd);
311 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
312 * it should report a size of 8KBytes (0x0020*256).
314 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
315 pr_warning("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", mtd->name);
318 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
320 struct map_info *map = mtd->priv;
321 struct cfi_private *cfi = map->fldrv_priv;
323 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
324 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
325 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
329 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
331 struct map_info *map = mtd->priv;
332 struct cfi_private *cfi = map->fldrv_priv;
334 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
335 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
336 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
340 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
342 struct map_info *map = mtd->priv;
343 struct cfi_private *cfi = map->fldrv_priv;
346 * S29NS512P flash uses more than 8bits to report number of sectors,
347 * which is not permitted by CFI.
349 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
350 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
353 /* Used to fix CFI-Tables of chips without Extended Query Tables */
354 static struct cfi_fixup cfi_nopri_fixup_table[] = {
355 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
356 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
357 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
358 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
359 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
360 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
361 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
362 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
366 static struct cfi_fixup cfi_fixup_table[] = {
367 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
368 #ifdef AMD_BOOTLOC_BUG
369 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
370 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
371 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
373 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
374 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
375 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
376 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
377 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
378 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
379 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
380 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
381 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
382 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
383 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
384 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
385 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
386 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
387 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
388 #if !FORCE_WORD_WRITE
389 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
393 static struct cfi_fixup jedec_fixup_table[] = {
394 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
395 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
396 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
400 static struct cfi_fixup fixup_table[] = {
401 /* The CFI vendor ids and the JEDEC vendor IDs appear
402 * to be common. It is like the devices id's are as
403 * well. This table is to pick all cases where
404 * we know that is the case.
406 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
407 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
412 static void cfi_fixup_major_minor(struct cfi_private *cfi,
413 struct cfi_pri_amdstd *extp)
415 if (cfi->mfr == CFI_MFR_SAMSUNG) {
416 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
417 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
419 * Samsung K8P2815UQB and K8D6x16UxM chips
420 * report major=0 / minor=0.
421 * K8D3x16UxC chips report major=3 / minor=3.
423 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
424 " Extended Query version to 1.%c\n",
426 extp->MajorVersion = '1';
431 * SST 38VF640x chips report major=0xFF / minor=0xFF.
433 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
434 extp->MajorVersion = '1';
435 extp->MinorVersion = '0';
439 static int is_m29ew(struct cfi_private *cfi)
441 if (cfi->mfr == CFI_MFR_INTEL &&
442 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
443 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
449 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
450 * Some revisions of the M29EW suffer from erase suspend hang ups. In
451 * particular, it can occur when the sequence
452 * Erase Confirm -> Suspend -> Program -> Resume
453 * causes a lockup due to internal timing issues. The consequence is that the
454 * erase cannot be resumed without inserting a dummy command after programming
455 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
456 * that writes an F0 command code before the RESUME command.
458 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
461 struct cfi_private *cfi = map->fldrv_priv;
462 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
464 map_write(map, CMD(0xF0), adr);
468 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
470 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
471 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
472 * command is issued after an ERASE RESUME operation without waiting for a
473 * minimum delay. The result is that once the ERASE seems to be completed
474 * (no bits are toggling), the contents of the Flash memory block on which
475 * the erase was ongoing could be inconsistent with the expected values
476 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
477 * values), causing a consequent failure of the ERASE operation.
478 * The occurrence of this issue could be high, especially when file system
479 * operations on the Flash are intensive. As a result, it is recommended
480 * that a patch be applied. Intensive file system operations can cause many
481 * calls to the garbage routine to free Flash space (also by erasing physical
482 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
483 * commands can occur. The problem disappears when a delay is inserted after
484 * the RESUME command by using the udelay() function available in Linux.
485 * The DELAY value must be tuned based on the customer's platform.
486 * The maximum value that fixes the problem in all cases is 500us.
487 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
489 * We have chosen 500µs because this latency is acceptable.
491 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
494 * Resolving the Delay After Resume Issue see Micron TN-13-07
495 * Worst case delay must be 500µs but 30-50µs should be ok as well
501 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
503 struct cfi_private *cfi = map->fldrv_priv;
504 struct device_node __maybe_unused *np = map->device_node;
505 struct mtd_info *mtd;
508 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
512 mtd->type = MTD_NORFLASH;
514 /* Fill in the default mtd operations */
515 mtd->_erase = cfi_amdstd_erase_varsize;
516 mtd->_write = cfi_amdstd_write_words;
517 mtd->_read = cfi_amdstd_read;
518 mtd->_sync = cfi_amdstd_sync;
519 mtd->_suspend = cfi_amdstd_suspend;
520 mtd->_resume = cfi_amdstd_resume;
521 mtd->flags = MTD_CAP_NORFLASH;
522 mtd->name = map->name;
524 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
526 pr_debug("MTD %s(): write buffer size %d\n", __func__,
529 mtd->_panic_write = cfi_amdstd_panic_write;
530 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
532 if (cfi->cfi_mode==CFI_MODE_CFI){
533 unsigned char bootloc;
534 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
535 struct cfi_pri_amdstd *extp;
537 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
540 * It's a real CFI chip, not one for which the probe
541 * routine faked a CFI structure.
543 cfi_fixup_major_minor(cfi, extp);
546 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
547 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
548 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
549 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
550 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
552 if (extp->MajorVersion != '1' ||
553 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
554 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
555 "version %c.%c (%#02x/%#02x).\n",
556 extp->MajorVersion, extp->MinorVersion,
557 extp->MajorVersion, extp->MinorVersion);
563 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
564 extp->MajorVersion, extp->MinorVersion);
566 /* Install our own private info structure */
567 cfi->cmdset_priv = extp;
569 /* Apply cfi device specific fixups */
570 cfi_fixup(mtd, cfi_fixup_table);
572 #ifdef DEBUG_CFI_FEATURES
573 /* Tell the user about it in lots of lovely detail */
574 cfi_tell_features(extp);
578 if (np && of_property_read_bool(
579 np, "use-advanced-sector-protection")
580 && extp->BlkProtUnprot == 8) {
581 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
582 mtd->_lock = cfi_ppb_lock;
583 mtd->_unlock = cfi_ppb_unlock;
584 mtd->_is_locked = cfi_ppb_is_locked;
588 bootloc = extp->TopBottom;
589 if ((bootloc < 2) || (bootloc > 5)) {
590 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
591 "bank location (%d). Assuming bottom.\n",
596 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
597 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
599 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
600 int j = (cfi->cfiq->NumEraseRegions-1)-i;
603 swap = cfi->cfiq->EraseRegionInfo[i];
604 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
605 cfi->cfiq->EraseRegionInfo[j] = swap;
608 /* Set the default CFI lock/unlock addresses */
609 cfi->addr_unlock1 = 0x555;
610 cfi->addr_unlock2 = 0x2aa;
612 cfi_fixup(mtd, cfi_nopri_fixup_table);
614 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
620 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
621 /* Apply jedec specific fixups */
622 cfi_fixup(mtd, jedec_fixup_table);
624 /* Apply generic fixups */
625 cfi_fixup(mtd, fixup_table);
627 for (i=0; i< cfi->numchips; i++) {
628 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
629 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
630 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
631 cfi->chips[i].ref_point_counter = 0;
632 init_waitqueue_head(&(cfi->chips[i].wq));
635 map->fldrv = &cfi_amdstd_chipdrv;
637 return cfi_amdstd_setup(mtd);
639 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
640 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
641 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
642 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
643 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
645 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
647 struct map_info *map = mtd->priv;
648 struct cfi_private *cfi = map->fldrv_priv;
649 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
650 unsigned long offset = 0;
653 printk(KERN_NOTICE "number of %s chips: %d\n",
654 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
655 /* Select the correct geometry setup */
656 mtd->size = devsize * cfi->numchips;
658 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
659 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
660 * mtd->numeraseregions, GFP_KERNEL);
661 if (!mtd->eraseregions)
664 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
665 unsigned long ernum, ersize;
666 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
667 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
669 if (mtd->erasesize < ersize) {
670 mtd->erasesize = ersize;
672 for (j=0; j<cfi->numchips; j++) {
673 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
674 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
675 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
677 offset += (ersize * ernum);
679 if (offset != devsize) {
681 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
685 __module_get(THIS_MODULE);
686 register_reboot_notifier(&mtd->reboot_notifier);
690 kfree(mtd->eraseregions);
692 kfree(cfi->cmdset_priv);
698 * Return true if the chip is ready.
700 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
701 * non-suspended sector) and is indicated by no toggle bits toggling.
703 * Note that anything more complicated than checking if no bits are toggling
704 * (including checking DQ5 for an error status) is tricky to get working
705 * correctly and is therefore not done (particularly with interleaved chips
706 * as each chip must be checked independently of the others).
708 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
712 d = map_read(map, addr);
713 t = map_read(map, addr);
715 return map_word_equal(map, d, t);
719 * Return true if the chip is ready and has the correct value.
721 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
722 * non-suspended sector) and it is indicated by no bits toggling.
724 * Error are indicated by toggling bits or bits held with the wrong value,
725 * or with bits toggling.
727 * Note that anything more complicated than checking if no bits are toggling
728 * (including checking DQ5 for an error status) is tricky to get working
729 * correctly and is therefore not done (particularly with interleaved chips
730 * as each chip must be checked independently of the others).
733 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
737 oldd = map_read(map, addr);
738 curd = map_read(map, addr);
740 return map_word_equal(map, oldd, curd) &&
741 map_word_equal(map, curd, expected);
744 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
746 DECLARE_WAITQUEUE(wait, current);
747 struct cfi_private *cfi = map->fldrv_priv;
749 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
752 timeo = jiffies + HZ;
754 switch (chip->state) {
758 if (chip_ready(map, adr))
761 if (time_after(jiffies, timeo)) {
762 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
765 mutex_unlock(&chip->mutex);
767 mutex_lock(&chip->mutex);
768 /* Someone else might have been playing with it. */
778 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
779 !(mode == FL_READY || mode == FL_POINT ||
780 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
783 /* We could check to see if we're trying to access the sector
784 * that is currently being erased. However, no user will try
785 * anything like that so we just wait for the timeout. */
788 /* It's harmless to issue the Erase-Suspend and Erase-Resume
789 * commands when the erase algorithm isn't in progress. */
790 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
791 chip->oldstate = FL_ERASING;
792 chip->state = FL_ERASE_SUSPENDING;
793 chip->erase_suspended = 1;
795 if (chip_ready(map, adr))
798 if (time_after(jiffies, timeo)) {
799 /* Should have suspended the erase by now.
800 * Send an Erase-Resume command as either
801 * there was an error (so leave the erase
802 * routine to recover from it) or we trying to
803 * use the erase-in-progress sector. */
804 put_chip(map, chip, adr);
805 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
809 mutex_unlock(&chip->mutex);
811 mutex_lock(&chip->mutex);
812 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
813 So we can just loop here. */
815 chip->state = FL_READY;
818 case FL_XIP_WHILE_ERASING:
819 if (mode != FL_READY && mode != FL_POINT &&
820 (!cfip || !(cfip->EraseSuspend&2)))
822 chip->oldstate = chip->state;
823 chip->state = FL_READY;
827 /* The machine is rebooting */
831 /* Only if there's no operation suspended... */
832 if (mode == FL_READY && chip->oldstate == FL_READY)
837 set_current_state(TASK_UNINTERRUPTIBLE);
838 add_wait_queue(&chip->wq, &wait);
839 mutex_unlock(&chip->mutex);
841 remove_wait_queue(&chip->wq, &wait);
842 mutex_lock(&chip->mutex);
848 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
850 struct cfi_private *cfi = map->fldrv_priv;
852 switch(chip->oldstate) {
854 cfi_fixup_m29ew_erase_suspend(map,
855 chip->in_progress_block_addr);
856 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
857 cfi_fixup_m29ew_delay_after_resume(cfi);
858 chip->oldstate = FL_READY;
859 chip->state = FL_ERASING;
862 case FL_XIP_WHILE_ERASING:
863 chip->state = chip->oldstate;
864 chip->oldstate = FL_READY;
871 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
876 #ifdef CONFIG_MTD_XIP
879 * No interrupt what so ever can be serviced while the flash isn't in array
880 * mode. This is ensured by the xip_disable() and xip_enable() functions
881 * enclosing any code path where the flash is known not to be in array mode.
882 * And within a XIP disabled code path, only functions marked with __xipram
883 * may be called and nothing else (it's a good thing to inspect generated
884 * assembly to make sure inline functions were actually inlined and that gcc
885 * didn't emit calls to its own support functions). Also configuring MTD CFI
886 * support to a single buswidth and a single interleave is also recommended.
889 static void xip_disable(struct map_info *map, struct flchip *chip,
892 /* TODO: chips with no XIP use should ignore and return */
893 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
897 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
900 struct cfi_private *cfi = map->fldrv_priv;
902 if (chip->state != FL_POINT && chip->state != FL_READY) {
903 map_write(map, CMD(0xf0), adr);
904 chip->state = FL_READY;
906 (void) map_read(map, adr);
912 * When a delay is required for the flash operation to complete, the
913 * xip_udelay() function is polling for both the given timeout and pending
914 * (but still masked) hardware interrupts. Whenever there is an interrupt
915 * pending then the flash erase operation is suspended, array mode restored
916 * and interrupts unmasked. Task scheduling might also happen at that
917 * point. The CPU eventually returns from the interrupt or the call to
918 * schedule() and the suspended flash operation is resumed for the remaining
919 * of the delay period.
921 * Warning: this function _will_ fool interrupt latency tracing tools.
924 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
925 unsigned long adr, int usec)
927 struct cfi_private *cfi = map->fldrv_priv;
928 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
929 map_word status, OK = CMD(0x80);
930 unsigned long suspended, start = xip_currtime();
935 if (xip_irqpending() && extp &&
936 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
937 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
939 * Let's suspend the erase operation when supported.
940 * Note that we currently don't try to suspend
941 * interleaved chips if there is already another
942 * operation suspended (imagine what happens
943 * when one chip was already done with the current
944 * operation while another chip suspended it, then
945 * we resume the whole thing at once). Yes, it
948 map_write(map, CMD(0xb0), adr);
949 usec -= xip_elapsed_since(start);
950 suspended = xip_currtime();
952 if (xip_elapsed_since(suspended) > 100000) {
954 * The chip doesn't want to suspend
955 * after waiting for 100 msecs.
956 * This is a critical error but there
957 * is not much we can do here.
961 status = map_read(map, adr);
962 } while (!map_word_andequal(map, status, OK, OK));
964 /* Suspend succeeded */
965 oldstate = chip->state;
966 if (!map_word_bitsset(map, status, CMD(0x40)))
968 chip->state = FL_XIP_WHILE_ERASING;
969 chip->erase_suspended = 1;
970 map_write(map, CMD(0xf0), adr);
971 (void) map_read(map, adr);
974 mutex_unlock(&chip->mutex);
979 * We're back. However someone else might have
980 * decided to go write to the chip if we are in
981 * a suspended erase state. If so let's wait
984 mutex_lock(&chip->mutex);
985 while (chip->state != FL_XIP_WHILE_ERASING) {
986 DECLARE_WAITQUEUE(wait, current);
987 set_current_state(TASK_UNINTERRUPTIBLE);
988 add_wait_queue(&chip->wq, &wait);
989 mutex_unlock(&chip->mutex);
991 remove_wait_queue(&chip->wq, &wait);
992 mutex_lock(&chip->mutex);
994 /* Disallow XIP again */
997 /* Correct Erase Suspend Hangups for M29EW */
998 cfi_fixup_m29ew_erase_suspend(map, adr);
999 /* Resume the write or erase operation */
1000 map_write(map, cfi->sector_erase_cmd, adr);
1001 chip->state = oldstate;
1002 start = xip_currtime();
1003 } else if (usec >= 1000000/HZ) {
1005 * Try to save on CPU power when waiting delay
1006 * is at least a system timer tick period.
1007 * No need to be extremely accurate here.
1011 status = map_read(map, adr);
1012 } while (!map_word_andequal(map, status, OK, OK)
1013 && xip_elapsed_since(start) < usec);
1016 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1019 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1020 * the flash is actively programming or erasing since we have to poll for
1021 * the operation to complete anyway. We can't do that in a generic way with
1022 * a XIP setup so do it before the actual flash operation in this case
1023 * and stub it out from INVALIDATE_CACHE_UDELAY.
1025 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1026 INVALIDATE_CACHED_RANGE(map, from, size)
1028 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1029 UDELAY(map, chip, adr, usec)
1034 * Activating this XIP support changes the way the code works a bit. For
1035 * example the code to suspend the current process when concurrent access
1036 * happens is never executed because xip_udelay() will always return with the
1037 * same chip state as it was entered with. This is why there is no care for
1038 * the presence of add_wait_queue() or schedule() calls from within a couple
1039 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1040 * The queueing and scheduling are always happening within xip_udelay().
1042 * Similarly, get_chip() and put_chip() just happen to always be executed
1043 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1044 * is in array mode, therefore never executing many cases therein and not
1045 * causing any problem with XIP.
1050 #define xip_disable(map, chip, adr)
1051 #define xip_enable(map, chip, adr)
1052 #define XIP_INVAL_CACHED_RANGE(x...)
1054 #define UDELAY(map, chip, adr, usec) \
1056 mutex_unlock(&chip->mutex); \
1058 mutex_lock(&chip->mutex); \
1061 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1063 mutex_unlock(&chip->mutex); \
1064 INVALIDATE_CACHED_RANGE(map, adr, len); \
1066 mutex_lock(&chip->mutex); \
1071 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1073 unsigned long cmd_addr;
1074 struct cfi_private *cfi = map->fldrv_priv;
1079 /* Ensure cmd read/writes are aligned. */
1080 cmd_addr = adr & ~(map_bankwidth(map)-1);
1082 mutex_lock(&chip->mutex);
1083 ret = get_chip(map, chip, cmd_addr, FL_READY);
1085 mutex_unlock(&chip->mutex);
1089 if (chip->state != FL_POINT && chip->state != FL_READY) {
1090 map_write(map, CMD(0xf0), cmd_addr);
1091 chip->state = FL_READY;
1094 map_copy_from(map, buf, adr, len);
1096 put_chip(map, chip, cmd_addr);
1098 mutex_unlock(&chip->mutex);
1103 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1105 struct map_info *map = mtd->priv;
1106 struct cfi_private *cfi = map->fldrv_priv;
1111 /* ofs: offset within the first chip that the first read should start */
1112 chipnum = (from >> cfi->chipshift);
1113 ofs = from - (chipnum << cfi->chipshift);
1116 unsigned long thislen;
1118 if (chipnum >= cfi->numchips)
1121 if ((len + ofs -1) >> cfi->chipshift)
1122 thislen = (1<<cfi->chipshift) - ofs;
1126 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1141 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1143 DECLARE_WAITQUEUE(wait, current);
1144 unsigned long timeo = jiffies + HZ;
1145 struct cfi_private *cfi = map->fldrv_priv;
1148 mutex_lock(&chip->mutex);
1150 if (chip->state != FL_READY){
1151 set_current_state(TASK_UNINTERRUPTIBLE);
1152 add_wait_queue(&chip->wq, &wait);
1154 mutex_unlock(&chip->mutex);
1157 remove_wait_queue(&chip->wq, &wait);
1158 timeo = jiffies + HZ;
1165 chip->state = FL_READY;
1167 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1168 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1169 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1171 map_copy_from(map, buf, adr, len);
1173 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1174 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1175 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1176 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1179 mutex_unlock(&chip->mutex);
1184 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1186 struct map_info *map = mtd->priv;
1187 struct cfi_private *cfi = map->fldrv_priv;
1192 /* ofs: offset within the first chip that the first read should start */
1193 /* 8 secsi bytes per chip */
1198 unsigned long thislen;
1200 if (chipnum >= cfi->numchips)
1203 if ((len + ofs -1) >> 3)
1204 thislen = (1<<3) - ofs;
1208 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1223 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1225 struct cfi_private *cfi = map->fldrv_priv;
1226 unsigned long timeo = jiffies + HZ;
1228 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1229 * have a max write time of a few hundreds usec). However, we should
1230 * use the maximum timeout value given by the chip at probe time
1231 * instead. Unfortunately, struct flchip does have a field for
1232 * maximum timeout, only for typical which can be far too short
1233 * depending of the conditions. The ' + 1' is to avoid having a
1234 * timeout of 0 jiffies if HZ is smaller than 1000.
1236 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1243 mutex_lock(&chip->mutex);
1244 ret = get_chip(map, chip, adr, FL_WRITING);
1246 mutex_unlock(&chip->mutex);
1250 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1251 __func__, adr, datum.x[0] );
1254 * Check for a NOP for the case when the datum to write is already
1255 * present - it saves time and works around buggy chips that corrupt
1256 * data at other locations when 0xff is written to a location that
1257 * already contains 0xff.
1259 oldd = map_read(map, adr);
1260 if (map_word_equal(map, oldd, datum)) {
1261 pr_debug("MTD %s(): NOP\n",
1266 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1268 xip_disable(map, chip, adr);
1270 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1271 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1272 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1273 map_write(map, datum, adr);
1274 chip->state = FL_WRITING;
1276 INVALIDATE_CACHE_UDELAY(map, chip,
1277 adr, map_bankwidth(map),
1278 chip->word_write_time);
1280 /* See comment above for timeout value. */
1281 timeo = jiffies + uWriteTimeout;
1283 if (chip->state != FL_WRITING) {
1284 /* Someone's suspended the write. Sleep */
1285 DECLARE_WAITQUEUE(wait, current);
1287 set_current_state(TASK_UNINTERRUPTIBLE);
1288 add_wait_queue(&chip->wq, &wait);
1289 mutex_unlock(&chip->mutex);
1291 remove_wait_queue(&chip->wq, &wait);
1292 timeo = jiffies + (HZ / 2); /* FIXME */
1293 mutex_lock(&chip->mutex);
1297 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1298 xip_enable(map, chip, adr);
1299 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1300 xip_disable(map, chip, adr);
1304 if (chip_ready(map, adr))
1307 /* Latency issues. Drop the lock, wait a while and retry */
1308 UDELAY(map, chip, adr, 1);
1310 /* Did we succeed? */
1311 if (!chip_good(map, adr, datum)) {
1312 /* reset on all failures. */
1313 map_write( map, CMD(0xF0), chip->start );
1314 /* FIXME - should have reset delay before continuing */
1316 if (++retry_cnt <= MAX_WORD_RETRIES)
1321 xip_enable(map, chip, adr);
1323 chip->state = FL_READY;
1325 put_chip(map, chip, adr);
1326 mutex_unlock(&chip->mutex);
1332 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1333 size_t *retlen, const u_char *buf)
1335 struct map_info *map = mtd->priv;
1336 struct cfi_private *cfi = map->fldrv_priv;
1339 unsigned long ofs, chipstart;
1340 DECLARE_WAITQUEUE(wait, current);
1342 chipnum = to >> cfi->chipshift;
1343 ofs = to - (chipnum << cfi->chipshift);
1344 chipstart = cfi->chips[chipnum].start;
1346 /* If it's not bus-aligned, do the first byte write */
1347 if (ofs & (map_bankwidth(map)-1)) {
1348 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1349 int i = ofs - bus_ofs;
1354 mutex_lock(&cfi->chips[chipnum].mutex);
1356 if (cfi->chips[chipnum].state != FL_READY) {
1357 set_current_state(TASK_UNINTERRUPTIBLE);
1358 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1360 mutex_unlock(&cfi->chips[chipnum].mutex);
1363 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1367 /* Load 'tmp_buf' with old contents of flash */
1368 tmp_buf = map_read(map, bus_ofs+chipstart);
1370 mutex_unlock(&cfi->chips[chipnum].mutex);
1372 /* Number of bytes to copy from buffer */
1373 n = min_t(int, len, map_bankwidth(map)-i);
1375 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1377 ret = do_write_oneword(map, &cfi->chips[chipnum],
1387 if (ofs >> cfi->chipshift) {
1390 if (chipnum == cfi->numchips)
1395 /* We are now aligned, write as much as possible */
1396 while(len >= map_bankwidth(map)) {
1399 datum = map_word_load(map, buf);
1401 ret = do_write_oneword(map, &cfi->chips[chipnum],
1406 ofs += map_bankwidth(map);
1407 buf += map_bankwidth(map);
1408 (*retlen) += map_bankwidth(map);
1409 len -= map_bankwidth(map);
1411 if (ofs >> cfi->chipshift) {
1414 if (chipnum == cfi->numchips)
1416 chipstart = cfi->chips[chipnum].start;
1420 /* Write the trailing bytes if any */
1421 if (len & (map_bankwidth(map)-1)) {
1425 mutex_lock(&cfi->chips[chipnum].mutex);
1427 if (cfi->chips[chipnum].state != FL_READY) {
1428 set_current_state(TASK_UNINTERRUPTIBLE);
1429 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1431 mutex_unlock(&cfi->chips[chipnum].mutex);
1434 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1438 tmp_buf = map_read(map, ofs + chipstart);
1440 mutex_unlock(&cfi->chips[chipnum].mutex);
1442 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1444 ret = do_write_oneword(map, &cfi->chips[chipnum],
1457 * FIXME: interleaved mode not tested, and probably not supported!
1459 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1460 unsigned long adr, const u_char *buf,
1463 struct cfi_private *cfi = map->fldrv_priv;
1464 unsigned long timeo = jiffies + HZ;
1465 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1466 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1468 unsigned long cmd_adr;
1475 mutex_lock(&chip->mutex);
1476 ret = get_chip(map, chip, adr, FL_WRITING);
1478 mutex_unlock(&chip->mutex);
1482 datum = map_word_load(map, buf);
1484 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1485 __func__, adr, datum.x[0] );
1487 XIP_INVAL_CACHED_RANGE(map, adr, len);
1489 xip_disable(map, chip, cmd_adr);
1491 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1492 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1494 /* Write Buffer Load */
1495 map_write(map, CMD(0x25), cmd_adr);
1497 chip->state = FL_WRITING_TO_BUFFER;
1499 /* Write length of data to come */
1500 words = len / map_bankwidth(map);
1501 map_write(map, CMD(words - 1), cmd_adr);
1504 while(z < words * map_bankwidth(map)) {
1505 datum = map_word_load(map, buf);
1506 map_write(map, datum, adr + z);
1508 z += map_bankwidth(map);
1509 buf += map_bankwidth(map);
1511 z -= map_bankwidth(map);
1515 /* Write Buffer Program Confirm: GO GO GO */
1516 map_write(map, CMD(0x29), cmd_adr);
1517 chip->state = FL_WRITING;
1519 INVALIDATE_CACHE_UDELAY(map, chip,
1520 adr, map_bankwidth(map),
1521 chip->word_write_time);
1523 timeo = jiffies + uWriteTimeout;
1526 if (chip->state != FL_WRITING) {
1527 /* Someone's suspended the write. Sleep */
1528 DECLARE_WAITQUEUE(wait, current);
1530 set_current_state(TASK_UNINTERRUPTIBLE);
1531 add_wait_queue(&chip->wq, &wait);
1532 mutex_unlock(&chip->mutex);
1534 remove_wait_queue(&chip->wq, &wait);
1535 timeo = jiffies + (HZ / 2); /* FIXME */
1536 mutex_lock(&chip->mutex);
1540 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1543 if (chip_ready(map, adr)) {
1544 xip_enable(map, chip, adr);
1548 /* Latency issues. Drop the lock, wait a while and retry */
1549 UDELAY(map, chip, adr, 1);
1553 * Recovery from write-buffer programming failures requires
1554 * the write-to-buffer-reset sequence. Since the last part
1555 * of the sequence also works as a normal reset, we can run
1556 * the same commands regardless of why we are here.
1558 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
1560 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1561 cfi->device_type, NULL);
1562 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1563 cfi->device_type, NULL);
1564 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
1565 cfi->device_type, NULL);
1566 xip_enable(map, chip, adr);
1567 /* FIXME - should have reset delay before continuing */
1569 printk(KERN_WARNING "MTD %s(): software timeout, address:0x%.8lx.\n",
1574 chip->state = FL_READY;
1576 put_chip(map, chip, adr);
1577 mutex_unlock(&chip->mutex);
1583 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1584 size_t *retlen, const u_char *buf)
1586 struct map_info *map = mtd->priv;
1587 struct cfi_private *cfi = map->fldrv_priv;
1588 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1593 chipnum = to >> cfi->chipshift;
1594 ofs = to - (chipnum << cfi->chipshift);
1596 /* If it's not bus-aligned, do the first word write */
1597 if (ofs & (map_bankwidth(map)-1)) {
1598 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1599 if (local_len > len)
1601 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1602 local_len, retlen, buf);
1609 if (ofs >> cfi->chipshift) {
1612 if (chipnum == cfi->numchips)
1617 /* Write buffer is worth it only if more than one word to write... */
1618 while (len >= map_bankwidth(map) * 2) {
1619 /* We must not cross write block boundaries */
1620 int size = wbufsize - (ofs & (wbufsize-1));
1624 if (size % map_bankwidth(map))
1625 size -= size % map_bankwidth(map);
1627 ret = do_write_buffer(map, &cfi->chips[chipnum],
1637 if (ofs >> cfi->chipshift) {
1640 if (chipnum == cfi->numchips)
1646 size_t retlen_dregs = 0;
1648 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1649 len, &retlen_dregs, buf);
1651 *retlen += retlen_dregs;
1659 * Wait for the flash chip to become ready to write data
1661 * This is only called during the panic_write() path. When panic_write()
1662 * is called, the kernel is in the process of a panic, and will soon be
1663 * dead. Therefore we don't take any locks, and attempt to get access
1664 * to the chip as soon as possible.
1666 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
1669 struct cfi_private *cfi = map->fldrv_priv;
1674 * If the driver thinks the chip is idle, and no toggle bits
1675 * are changing, then the chip is actually idle for sure.
1677 if (chip->state == FL_READY && chip_ready(map, adr))
1681 * Try several times to reset the chip and then wait for it
1682 * to become idle. The upper limit of a few milliseconds of
1683 * delay isn't a big problem: the kernel is dying anyway. It
1684 * is more important to save the messages.
1686 while (retries > 0) {
1687 const unsigned long timeo = (HZ / 1000) + 1;
1689 /* send the reset command */
1690 map_write(map, CMD(0xF0), chip->start);
1692 /* wait for the chip to become ready */
1693 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
1694 if (chip_ready(map, adr))
1701 /* the chip never became ready */
1706 * Write out one word of data to a single flash chip during a kernel panic
1708 * This is only called during the panic_write() path. When panic_write()
1709 * is called, the kernel is in the process of a panic, and will soon be
1710 * dead. Therefore we don't take any locks, and attempt to get access
1711 * to the chip as soon as possible.
1713 * The implementation of this routine is intentionally similar to
1714 * do_write_oneword(), in order to ease code maintenance.
1716 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
1717 unsigned long adr, map_word datum)
1719 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
1720 struct cfi_private *cfi = map->fldrv_priv;
1728 ret = cfi_amdstd_panic_wait(map, chip, adr);
1732 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
1733 __func__, adr, datum.x[0]);
1736 * Check for a NOP for the case when the datum to write is already
1737 * present - it saves time and works around buggy chips that corrupt
1738 * data at other locations when 0xff is written to a location that
1739 * already contains 0xff.
1741 oldd = map_read(map, adr);
1742 if (map_word_equal(map, oldd, datum)) {
1743 pr_debug("MTD %s(): NOP\n", __func__);
1750 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1751 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1752 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1753 map_write(map, datum, adr);
1755 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
1756 if (chip_ready(map, adr))
1762 if (!chip_good(map, adr, datum)) {
1763 /* reset on all failures. */
1764 map_write(map, CMD(0xF0), chip->start);
1765 /* FIXME - should have reset delay before continuing */
1767 if (++retry_cnt <= MAX_WORD_RETRIES)
1779 * Write out some data during a kernel panic
1781 * This is used by the mtdoops driver to save the dying messages from a
1782 * kernel which has panic'd.
1784 * This routine ignores all of the locking used throughout the rest of the
1785 * driver, in order to ensure that the data gets written out no matter what
1786 * state this driver (and the flash chip itself) was in when the kernel crashed.
1788 * The implementation of this routine is intentionally similar to
1789 * cfi_amdstd_write_words(), in order to ease code maintenance.
1791 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
1792 size_t *retlen, const u_char *buf)
1794 struct map_info *map = mtd->priv;
1795 struct cfi_private *cfi = map->fldrv_priv;
1796 unsigned long ofs, chipstart;
1800 chipnum = to >> cfi->chipshift;
1801 ofs = to - (chipnum << cfi->chipshift);
1802 chipstart = cfi->chips[chipnum].start;
1804 /* If it's not bus aligned, do the first byte write */
1805 if (ofs & (map_bankwidth(map) - 1)) {
1806 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
1807 int i = ofs - bus_ofs;
1811 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
1815 /* Load 'tmp_buf' with old contents of flash */
1816 tmp_buf = map_read(map, bus_ofs + chipstart);
1818 /* Number of bytes to copy from buffer */
1819 n = min_t(int, len, map_bankwidth(map) - i);
1821 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1823 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1833 if (ofs >> cfi->chipshift) {
1836 if (chipnum == cfi->numchips)
1841 /* We are now aligned, write as much as possible */
1842 while (len >= map_bankwidth(map)) {
1845 datum = map_word_load(map, buf);
1847 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1852 ofs += map_bankwidth(map);
1853 buf += map_bankwidth(map);
1854 (*retlen) += map_bankwidth(map);
1855 len -= map_bankwidth(map);
1857 if (ofs >> cfi->chipshift) {
1860 if (chipnum == cfi->numchips)
1863 chipstart = cfi->chips[chipnum].start;
1867 /* Write the trailing bytes if any */
1868 if (len & (map_bankwidth(map) - 1)) {
1871 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
1875 tmp_buf = map_read(map, ofs + chipstart);
1877 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1879 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
1892 * Handle devices with one erase region, that only implement
1893 * the chip erase command.
1895 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1897 struct cfi_private *cfi = map->fldrv_priv;
1898 unsigned long timeo = jiffies + HZ;
1899 unsigned long int adr;
1900 DECLARE_WAITQUEUE(wait, current);
1903 adr = cfi->addr_unlock1;
1905 mutex_lock(&chip->mutex);
1906 ret = get_chip(map, chip, adr, FL_WRITING);
1908 mutex_unlock(&chip->mutex);
1912 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
1913 __func__, chip->start );
1915 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1917 xip_disable(map, chip, adr);
1919 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1920 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1921 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1922 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1923 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1924 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1926 chip->state = FL_ERASING;
1927 chip->erase_suspended = 0;
1928 chip->in_progress_block_addr = adr;
1930 INVALIDATE_CACHE_UDELAY(map, chip,
1932 chip->erase_time*500);
1934 timeo = jiffies + (HZ*20);
1937 if (chip->state != FL_ERASING) {
1938 /* Someone's suspended the erase. Sleep */
1939 set_current_state(TASK_UNINTERRUPTIBLE);
1940 add_wait_queue(&chip->wq, &wait);
1941 mutex_unlock(&chip->mutex);
1943 remove_wait_queue(&chip->wq, &wait);
1944 mutex_lock(&chip->mutex);
1947 if (chip->erase_suspended) {
1948 /* This erase was suspended and resumed.
1949 Adjust the timeout */
1950 timeo = jiffies + (HZ*20); /* FIXME */
1951 chip->erase_suspended = 0;
1954 if (chip_ready(map, adr))
1957 if (time_after(jiffies, timeo)) {
1958 printk(KERN_WARNING "MTD %s(): software timeout\n",
1963 /* Latency issues. Drop the lock, wait a while and retry */
1964 UDELAY(map, chip, adr, 1000000/HZ);
1966 /* Did we succeed? */
1967 if (!chip_good(map, adr, map_word_ff(map))) {
1968 /* reset on all failures. */
1969 map_write( map, CMD(0xF0), chip->start );
1970 /* FIXME - should have reset delay before continuing */
1975 chip->state = FL_READY;
1976 xip_enable(map, chip, adr);
1978 put_chip(map, chip, adr);
1979 mutex_unlock(&chip->mutex);
1985 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1987 struct cfi_private *cfi = map->fldrv_priv;
1988 unsigned long timeo = jiffies + HZ;
1989 DECLARE_WAITQUEUE(wait, current);
1994 mutex_lock(&chip->mutex);
1995 ret = get_chip(map, chip, adr, FL_ERASING);
1997 mutex_unlock(&chip->mutex);
2001 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2004 XIP_INVAL_CACHED_RANGE(map, adr, len);
2006 xip_disable(map, chip, adr);
2008 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2009 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2010 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2011 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2012 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2013 map_write(map, cfi->sector_erase_cmd, adr);
2015 chip->state = FL_ERASING;
2016 chip->erase_suspended = 0;
2017 chip->in_progress_block_addr = adr;
2019 INVALIDATE_CACHE_UDELAY(map, chip,
2021 chip->erase_time*500);
2023 timeo = jiffies + (HZ*20);
2026 if (chip->state != FL_ERASING) {
2027 /* Someone's suspended the erase. Sleep */
2028 set_current_state(TASK_UNINTERRUPTIBLE);
2029 add_wait_queue(&chip->wq, &wait);
2030 mutex_unlock(&chip->mutex);
2032 remove_wait_queue(&chip->wq, &wait);
2033 mutex_lock(&chip->mutex);
2036 if (chip->erase_suspended) {
2037 /* This erase was suspended and resumed.
2038 Adjust the timeout */
2039 timeo = jiffies + (HZ*20); /* FIXME */
2040 chip->erase_suspended = 0;
2043 if (chip_ready(map, adr)) {
2044 xip_enable(map, chip, adr);
2048 if (time_after(jiffies, timeo)) {
2049 xip_enable(map, chip, adr);
2050 printk(KERN_WARNING "MTD %s(): software timeout\n",
2055 /* Latency issues. Drop the lock, wait a while and retry */
2056 UDELAY(map, chip, adr, 1000000/HZ);
2058 /* Did we succeed? */
2059 if (!chip_good(map, adr, map_word_ff(map))) {
2060 /* reset on all failures. */
2061 map_write( map, CMD(0xF0), chip->start );
2062 /* FIXME - should have reset delay before continuing */
2067 chip->state = FL_READY;
2069 put_chip(map, chip, adr);
2070 mutex_unlock(&chip->mutex);
2075 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2077 unsigned long ofs, len;
2083 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2087 instr->state = MTD_ERASE_DONE;
2088 mtd_erase_callback(instr);
2094 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2096 struct map_info *map = mtd->priv;
2097 struct cfi_private *cfi = map->fldrv_priv;
2100 if (instr->addr != 0)
2103 if (instr->len != mtd->size)
2106 ret = do_erase_chip(map, &cfi->chips[0]);
2110 instr->state = MTD_ERASE_DONE;
2111 mtd_erase_callback(instr);
2116 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2117 unsigned long adr, int len, void *thunk)
2119 struct cfi_private *cfi = map->fldrv_priv;
2122 mutex_lock(&chip->mutex);
2123 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2126 chip->state = FL_LOCKING;
2128 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2130 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2131 cfi->device_type, NULL);
2132 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2133 cfi->device_type, NULL);
2134 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2135 cfi->device_type, NULL);
2136 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2137 cfi->device_type, NULL);
2138 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2139 cfi->device_type, NULL);
2140 map_write(map, CMD(0x40), chip->start + adr);
2142 chip->state = FL_READY;
2143 put_chip(map, chip, adr + chip->start);
2147 mutex_unlock(&chip->mutex);
2151 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2152 unsigned long adr, int len, void *thunk)
2154 struct cfi_private *cfi = map->fldrv_priv;
2157 mutex_lock(&chip->mutex);
2158 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2161 chip->state = FL_UNLOCKING;
2163 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2165 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2166 cfi->device_type, NULL);
2167 map_write(map, CMD(0x70), adr);
2169 chip->state = FL_READY;
2170 put_chip(map, chip, adr + chip->start);
2174 mutex_unlock(&chip->mutex);
2178 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2180 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2183 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2185 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2189 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2193 struct flchip *chip;
2198 #define MAX_SECTORS 512
2200 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2201 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2202 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2204 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2205 struct flchip *chip,
2206 unsigned long adr, int len, void *thunk)
2208 struct cfi_private *cfi = map->fldrv_priv;
2209 unsigned long timeo;
2212 mutex_lock(&chip->mutex);
2213 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2215 mutex_unlock(&chip->mutex);
2219 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2221 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2222 cfi->device_type, NULL);
2223 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2224 cfi->device_type, NULL);
2225 /* PPB entry command */
2226 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2227 cfi->device_type, NULL);
2229 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2230 chip->state = FL_LOCKING;
2231 map_write(map, CMD(0xA0), chip->start + adr);
2232 map_write(map, CMD(0x00), chip->start + adr);
2233 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2235 * Unlocking of one specific sector is not supported, so we
2236 * have to unlock all sectors of this device instead
2238 chip->state = FL_UNLOCKING;
2239 map_write(map, CMD(0x80), chip->start);
2240 map_write(map, CMD(0x30), chip->start);
2241 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2242 chip->state = FL_JEDEC_QUERY;
2243 /* Return locked status: 0->locked, 1->unlocked */
2244 ret = !cfi_read_query(map, adr);
2249 * Wait for some time as unlocking of all sectors takes quite long
2251 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2253 if (chip_ready(map, adr))
2256 if (time_after(jiffies, timeo)) {
2257 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2262 UDELAY(map, chip, adr, 1);
2265 /* Exit BC commands */
2266 map_write(map, CMD(0x90), chip->start);
2267 map_write(map, CMD(0x00), chip->start);
2269 chip->state = FL_READY;
2270 put_chip(map, chip, adr + chip->start);
2271 mutex_unlock(&chip->mutex);
2276 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2279 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2280 DO_XXLOCK_ONEBLOCK_LOCK);
2283 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2286 struct mtd_erase_region_info *regions = mtd->eraseregions;
2287 struct map_info *map = mtd->priv;
2288 struct cfi_private *cfi = map->fldrv_priv;
2289 struct ppb_lock *sect;
2299 * PPB unlocking always unlocks all sectors of the flash chip.
2300 * We need to re-lock all previously locked sectors. So lets
2301 * first check the locking status of all sectors and save
2302 * it for future use.
2304 sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
2309 * This code to walk all sectors is a slightly modified version
2310 * of the cfi_varsize_frob() code.
2320 int size = regions[i].erasesize;
2323 * Only test sectors that shall not be unlocked. The other
2324 * sectors shall be unlocked, so lets keep their locking
2325 * status at "unlocked" (locked=0) for the final re-locking.
2327 if ((adr < ofs) || (adr >= (ofs + len))) {
2328 sect[sectors].chip = &cfi->chips[chipnum];
2329 sect[sectors].offset = offset;
2330 sect[sectors].locked = do_ppb_xxlock(
2331 map, &cfi->chips[chipnum], adr, 0,
2332 DO_XXLOCK_ONEBLOCK_GETLOCK);
2339 if (offset == regions[i].offset + size * regions[i].numblocks)
2342 if (adr >> cfi->chipshift) {
2346 if (chipnum >= cfi->numchips)
2351 if (sectors >= MAX_SECTORS) {
2352 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2359 /* Now unlock the whole chip */
2360 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2361 DO_XXLOCK_ONEBLOCK_UNLOCK);
2368 * PPB unlocking always unlocks all sectors of the flash chip.
2369 * We need to re-lock all previously locked sectors.
2371 for (i = 0; i < sectors; i++) {
2373 do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
2374 DO_XXLOCK_ONEBLOCK_LOCK);
2381 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2384 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2385 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2388 static void cfi_amdstd_sync (struct mtd_info *mtd)
2390 struct map_info *map = mtd->priv;
2391 struct cfi_private *cfi = map->fldrv_priv;
2393 struct flchip *chip;
2395 DECLARE_WAITQUEUE(wait, current);
2397 for (i=0; !ret && i<cfi->numchips; i++) {
2398 chip = &cfi->chips[i];
2401 mutex_lock(&chip->mutex);
2403 switch(chip->state) {
2407 case FL_JEDEC_QUERY:
2408 chip->oldstate = chip->state;
2409 chip->state = FL_SYNCING;
2410 /* No need to wake_up() on this state change -
2411 * as the whole point is that nobody can do anything
2412 * with the chip now anyway.
2415 mutex_unlock(&chip->mutex);
2419 /* Not an idle state */
2420 set_current_state(TASK_UNINTERRUPTIBLE);
2421 add_wait_queue(&chip->wq, &wait);
2423 mutex_unlock(&chip->mutex);
2427 remove_wait_queue(&chip->wq, &wait);
2433 /* Unlock the chips again */
2435 for (i--; i >=0; i--) {
2436 chip = &cfi->chips[i];
2438 mutex_lock(&chip->mutex);
2440 if (chip->state == FL_SYNCING) {
2441 chip->state = chip->oldstate;
2444 mutex_unlock(&chip->mutex);
2449 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2451 struct map_info *map = mtd->priv;
2452 struct cfi_private *cfi = map->fldrv_priv;
2454 struct flchip *chip;
2457 for (i=0; !ret && i<cfi->numchips; i++) {
2458 chip = &cfi->chips[i];
2460 mutex_lock(&chip->mutex);
2462 switch(chip->state) {
2466 case FL_JEDEC_QUERY:
2467 chip->oldstate = chip->state;
2468 chip->state = FL_PM_SUSPENDED;
2469 /* No need to wake_up() on this state change -
2470 * as the whole point is that nobody can do anything
2471 * with the chip now anyway.
2473 case FL_PM_SUSPENDED:
2480 mutex_unlock(&chip->mutex);
2483 /* Unlock the chips again */
2486 for (i--; i >=0; i--) {
2487 chip = &cfi->chips[i];
2489 mutex_lock(&chip->mutex);
2491 if (chip->state == FL_PM_SUSPENDED) {
2492 chip->state = chip->oldstate;
2495 mutex_unlock(&chip->mutex);
2503 static void cfi_amdstd_resume(struct mtd_info *mtd)
2505 struct map_info *map = mtd->priv;
2506 struct cfi_private *cfi = map->fldrv_priv;
2508 struct flchip *chip;
2510 for (i=0; i<cfi->numchips; i++) {
2512 chip = &cfi->chips[i];
2514 mutex_lock(&chip->mutex);
2516 if (chip->state == FL_PM_SUSPENDED) {
2517 chip->state = FL_READY;
2518 map_write(map, CMD(0xF0), chip->start);
2522 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
2524 mutex_unlock(&chip->mutex);
2530 * Ensure that the flash device is put back into read array mode before
2531 * unloading the driver or rebooting. On some systems, rebooting while
2532 * the flash is in query/program/erase mode will prevent the CPU from
2533 * fetching the bootloader code, requiring a hard reset or power cycle.
2535 static int cfi_amdstd_reset(struct mtd_info *mtd)
2537 struct map_info *map = mtd->priv;
2538 struct cfi_private *cfi = map->fldrv_priv;
2540 struct flchip *chip;
2542 for (i = 0; i < cfi->numchips; i++) {
2544 chip = &cfi->chips[i];
2546 mutex_lock(&chip->mutex);
2548 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2550 map_write(map, CMD(0xF0), chip->start);
2551 chip->state = FL_SHUTDOWN;
2552 put_chip(map, chip, chip->start);
2555 mutex_unlock(&chip->mutex);
2562 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2565 struct mtd_info *mtd;
2567 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2568 cfi_amdstd_reset(mtd);
2573 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2575 struct map_info *map = mtd->priv;
2576 struct cfi_private *cfi = map->fldrv_priv;
2578 cfi_amdstd_reset(mtd);
2579 unregister_reboot_notifier(&mtd->reboot_notifier);
2580 kfree(cfi->cmdset_priv);
2583 kfree(mtd->eraseregions);
2586 MODULE_LICENSE("GPL");
2587 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2588 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2589 MODULE_ALIAS("cfi_cmdset_0006");
2590 MODULE_ALIAS("cfi_cmdset_0701");