2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.165 2005/02/05 02:06:15 nico Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/xip.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
44 #define MANUFACTURER_INTEL 0x0089
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define MANUFACTURER_ST 0x0020
48 #define M50LPW080 0x002F
50 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
53 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 static int cfi_intelext_suspend (struct mtd_info *);
60 static void cfi_intelext_resume (struct mtd_info *);
62 static void cfi_intelext_destroy(struct mtd_info *);
64 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
66 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
67 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
69 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
70 size_t *retlen, u_char **mtdbuf);
71 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
74 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
75 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
81 * *********** SETUP AND PROBE BITS ***********
84 static struct mtd_chip_driver cfi_intelext_chipdrv = {
85 .probe = NULL, /* Not usable directly */
86 .destroy = cfi_intelext_destroy,
87 .name = "cfi_cmdset_0001",
91 /* #define DEBUG_LOCK_BITS */
92 /* #define DEBUG_CFI_FEATURES */
94 #ifdef DEBUG_CFI_FEATURES
95 static void cfi_tell_features(struct cfi_pri_intelext *extp)
98 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
99 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
100 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
101 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
102 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
103 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
104 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
105 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
106 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
107 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
108 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
109 for (i=10; i<32; i++) {
110 if (extp->FeatureSupport & (1<<i))
111 printk(" - Unknown Bit %X: supported\n", i);
114 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
115 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
116 for (i=1; i<8; i++) {
117 if (extp->SuspendCmdSupport & (1<<i))
118 printk(" - Unknown Bit %X: supported\n", i);
121 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
122 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
123 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
124 for (i=2; i<16; i++) {
125 if (extp->BlkStatusRegMask & (1<<i))
126 printk(" - Unknown Bit %X Active: yes\n",i);
129 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
130 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
131 if (extp->VppOptimal)
132 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
133 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
137 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
138 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
139 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
141 struct map_info *map = mtd->priv;
142 struct cfi_private *cfi = map->fldrv_priv;
143 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
145 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
146 "erase on write disabled.\n");
147 extp->SuspendCmdSupport &= ~1;
151 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
152 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
154 struct map_info *map = mtd->priv;
155 struct cfi_private *cfi = map->fldrv_priv;
156 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
158 if (cfip && (cfip->FeatureSupport&4)) {
159 cfip->FeatureSupport &= ~4;
160 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
165 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
167 struct map_info *map = mtd->priv;
168 struct cfi_private *cfi = map->fldrv_priv;
170 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
171 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
174 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
176 struct map_info *map = mtd->priv;
177 struct cfi_private *cfi = map->fldrv_priv;
179 /* Note this is done after the region info is endian swapped */
180 cfi->cfiq->EraseRegionInfo[1] =
181 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
184 static void fixup_use_point(struct mtd_info *mtd, void *param)
186 struct map_info *map = mtd->priv;
187 if (!mtd->point && map_is_linear(map)) {
188 mtd->point = cfi_intelext_point;
189 mtd->unpoint = cfi_intelext_unpoint;
193 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
195 struct map_info *map = mtd->priv;
196 struct cfi_private *cfi = map->fldrv_priv;
197 if (cfi->cfiq->BufWriteTimeoutTyp) {
198 printk(KERN_INFO "Using buffer write method\n" );
199 mtd->write = cfi_intelext_write_buffers;
203 static struct cfi_fixup cfi_fixup_table[] = {
204 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
205 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
207 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
208 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
210 #if !FORCE_WORD_WRITE
211 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
213 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
214 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
218 static struct cfi_fixup jedec_fixup_table[] = {
219 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
220 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
221 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
224 static struct cfi_fixup fixup_table[] = {
225 /* The CFI vendor ids and the JEDEC vendor IDs appear
226 * to be common. It is like the devices id's are as
227 * well. This table is to pick all cases where
228 * we know that is the case.
230 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
234 static inline struct cfi_pri_intelext *
235 read_pri_intelext(struct map_info *map, __u16 adr)
237 struct cfi_pri_intelext *extp;
238 unsigned int extp_size = sizeof(*extp);
241 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
245 /* Do some byteswapping if necessary */
246 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
247 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
248 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
250 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
251 unsigned int extra_size = 0;
254 /* Protection Register info */
255 extra_size += (extp->NumProtectionFields - 1) *
256 sizeof(struct cfi_intelext_otpinfo);
258 /* Burst Read info */
261 /* Number of hardware-partitions */
263 if (extp_size < sizeof(*extp) + extra_size)
265 nb_parts = extp->extra[extra_size - 1];
267 for (i = 0; i < nb_parts; i++) {
268 struct cfi_intelext_regioninfo *rinfo;
269 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
270 extra_size += sizeof(*rinfo);
271 if (extp_size < sizeof(*extp) + extra_size)
273 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
274 extra_size += (rinfo->NumBlockTypes - 1)
275 * sizeof(struct cfi_intelext_blockinfo);
278 if (extp_size < sizeof(*extp) + extra_size) {
280 extp_size = sizeof(*extp) + extra_size;
282 if (extp_size > 4096) {
284 "%s: cfi_pri_intelext is too fat\n",
295 /* This routine is made available to other mtd code via
296 * inter_module_register. It must only be accessed through
297 * inter_module_get which will bump the use count of this module. The
298 * addresses passed back in cfi are valid as long as the use count of
299 * this module is non-zero, i.e. between inter_module_get and
300 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
302 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
304 struct cfi_private *cfi = map->fldrv_priv;
305 struct mtd_info *mtd;
308 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
310 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
313 memset(mtd, 0, sizeof(*mtd));
315 mtd->type = MTD_NORFLASH;
317 /* Fill in the default mtd operations */
318 mtd->erase = cfi_intelext_erase_varsize;
319 mtd->read = cfi_intelext_read;
320 mtd->write = cfi_intelext_write_words;
321 mtd->sync = cfi_intelext_sync;
322 mtd->lock = cfi_intelext_lock;
323 mtd->unlock = cfi_intelext_unlock;
324 mtd->suspend = cfi_intelext_suspend;
325 mtd->resume = cfi_intelext_resume;
326 mtd->flags = MTD_CAP_NORFLASH;
327 mtd->name = map->name;
329 if (cfi->cfi_mode == CFI_MODE_CFI) {
331 * It's a real CFI chip, not one for which the probe
332 * routine faked a CFI structure. So we read the feature
335 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
336 struct cfi_pri_intelext *extp;
338 extp = read_pri_intelext(map, adr);
344 /* Install our own private info structure */
345 cfi->cmdset_priv = extp;
347 cfi_fixup(mtd, cfi_fixup_table);
349 #ifdef DEBUG_CFI_FEATURES
350 /* Tell the user about it in lots of lovely detail */
351 cfi_tell_features(extp);
354 if(extp->SuspendCmdSupport & 1) {
355 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
358 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
359 /* Apply jedec specific fixups */
360 cfi_fixup(mtd, jedec_fixup_table);
362 /* Apply generic fixups */
363 cfi_fixup(mtd, fixup_table);
365 for (i=0; i< cfi->numchips; i++) {
366 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
367 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
368 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
369 cfi->chips[i].ref_point_counter = 0;
372 map->fldrv = &cfi_intelext_chipdrv;
374 return cfi_intelext_setup(mtd);
377 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
379 struct map_info *map = mtd->priv;
380 struct cfi_private *cfi = map->fldrv_priv;
381 unsigned long offset = 0;
383 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
385 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
387 mtd->size = devsize * cfi->numchips;
389 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
390 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
391 * mtd->numeraseregions, GFP_KERNEL);
392 if (!mtd->eraseregions) {
393 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
397 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
398 unsigned long ernum, ersize;
399 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
400 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
402 if (mtd->erasesize < ersize) {
403 mtd->erasesize = ersize;
405 for (j=0; j<cfi->numchips; j++) {
406 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
407 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
408 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
410 offset += (ersize * ernum);
413 if (offset != devsize) {
415 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
419 for (i=0; i<mtd->numeraseregions;i++){
420 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
421 i,mtd->eraseregions[i].offset,
422 mtd->eraseregions[i].erasesize,
423 mtd->eraseregions[i].numblocks);
427 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
428 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
431 /* This function has the potential to distort the reality
432 a bit and therefore should be called last. */
433 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
436 __module_get(THIS_MODULE);
441 if(mtd->eraseregions)
442 kfree(mtd->eraseregions);
445 kfree(cfi->cmdset_priv);
449 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
450 struct cfi_private **pcfi)
452 struct map_info *map = mtd->priv;
453 struct cfi_private *cfi = *pcfi;
454 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
457 * Probing of multi-partition flash ships.
459 * To support multiple partitions when available, we simply arrange
460 * for each of them to have their own flchip structure even if they
461 * are on the same physical chip. This means completely recreating
462 * a new cfi_private structure right here which is a blatent code
463 * layering violation, but this is still the least intrusive
464 * arrangement at this point. This can be rearranged in the future
465 * if someone feels motivated enough. --nico
467 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
468 && extp->FeatureSupport & (1 << 9)) {
469 struct cfi_private *newcfi;
471 struct flchip_shared *shared;
472 int offs, numregions, numparts, partshift, numvirtchips, i, j;
474 /* Protection Register info */
475 offs = (extp->NumProtectionFields - 1) *
476 sizeof(struct cfi_intelext_otpinfo);
478 /* Burst Read info */
481 /* Number of partition regions */
482 numregions = extp->extra[offs];
485 /* Number of hardware partitions */
487 for (i = 0; i < numregions; i++) {
488 struct cfi_intelext_regioninfo *rinfo;
489 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
490 numparts += rinfo->NumIdentPartitions;
491 offs += sizeof(*rinfo)
492 + (rinfo->NumBlockTypes - 1) *
493 sizeof(struct cfi_intelext_blockinfo);
497 * All functions below currently rely on all chips having
498 * the same geometry so we'll just assume that all hardware
499 * partitions are of the same size too.
501 partshift = cfi->chipshift - __ffs(numparts);
503 if ((1 << partshift) < mtd->erasesize) {
505 "%s: bad number of hw partitions (%d)\n",
506 __FUNCTION__, numparts);
510 numvirtchips = cfi->numchips * numparts;
511 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
514 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
519 memcpy(newcfi, cfi, sizeof(struct cfi_private));
520 newcfi->numchips = numvirtchips;
521 newcfi->chipshift = partshift;
523 chip = &newcfi->chips[0];
524 for (i = 0; i < cfi->numchips; i++) {
525 shared[i].writing = shared[i].erasing = NULL;
526 spin_lock_init(&shared[i].lock);
527 for (j = 0; j < numparts; j++) {
528 *chip = cfi->chips[i];
529 chip->start += j << partshift;
530 chip->priv = &shared[i];
531 /* those should be reset too since
532 they create memory references. */
533 init_waitqueue_head(&chip->wq);
534 spin_lock_init(&chip->_spinlock);
535 chip->mutex = &chip->_spinlock;
540 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
541 "--> %d partitions of %d KiB\n",
542 map->name, cfi->numchips, cfi->interleave,
543 newcfi->numchips, 1<<(newcfi->chipshift-10));
545 map->fldrv_priv = newcfi;
554 * *********** CHIP ACCESS FUNCTIONS ***********
557 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
559 DECLARE_WAITQUEUE(wait, current);
560 struct cfi_private *cfi = map->fldrv_priv;
561 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
563 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
566 timeo = jiffies + HZ;
568 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
570 * OK. We have possibility for contension on the write/erase
571 * operations which are global to the real chip and not per
572 * partition. So let's fight it over in the partition which
573 * currently has authority on the operation.
575 * The rules are as follows:
577 * - any write operation must own shared->writing.
579 * - any erase operation must own _both_ shared->writing and
582 * - contension arbitration is handled in the owner's context.
584 * The 'shared' struct can be read when its lock is taken.
585 * However any writes to it can only be made when the current
586 * owner's lock is also held.
588 struct flchip_shared *shared = chip->priv;
589 struct flchip *contender;
590 spin_lock(&shared->lock);
591 contender = shared->writing;
592 if (contender && contender != chip) {
594 * The engine to perform desired operation on this
595 * partition is already in use by someone else.
596 * Let's fight over it in the context of the chip
597 * currently using it. If it is possible to suspend,
598 * that other partition will do just that, otherwise
599 * it'll happily send us to sleep. In any case, when
600 * get_chip returns success we're clear to go ahead.
602 int ret = spin_trylock(contender->mutex);
603 spin_unlock(&shared->lock);
606 spin_unlock(chip->mutex);
607 ret = get_chip(map, contender, contender->start, mode);
608 spin_lock(chip->mutex);
610 spin_unlock(contender->mutex);
613 timeo = jiffies + HZ;
614 spin_lock(&shared->lock);
618 shared->writing = chip;
619 if (mode == FL_ERASING)
620 shared->erasing = chip;
621 if (contender && contender != chip)
622 spin_unlock(contender->mutex);
623 spin_unlock(&shared->lock);
626 switch (chip->state) {
630 status = map_read(map, adr);
631 if (map_word_andequal(map, status, status_OK, status_OK))
634 /* At this point we're fine with write operations
635 in other partitions as they don't conflict. */
636 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
639 if (time_after(jiffies, timeo)) {
640 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
644 spin_unlock(chip->mutex);
646 spin_lock(chip->mutex);
647 /* Someone else might have been playing with it. */
658 !(cfip->FeatureSupport & 2) ||
659 !(mode == FL_READY || mode == FL_POINT ||
660 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
665 map_write(map, CMD(0xB0), adr);
667 /* If the flash has finished erasing, then 'erase suspend'
668 * appears to make some (28F320) flash devices switch to
669 * 'read' mode. Make sure that we switch to 'read status'
670 * mode so we get the right data. --rmk
672 map_write(map, CMD(0x70), adr);
673 chip->oldstate = FL_ERASING;
674 chip->state = FL_ERASE_SUSPENDING;
675 chip->erase_suspended = 1;
677 status = map_read(map, adr);
678 if (map_word_andequal(map, status, status_OK, status_OK))
681 if (time_after(jiffies, timeo)) {
682 /* Urgh. Resume and pretend we weren't here. */
683 map_write(map, CMD(0xd0), adr);
684 /* Make sure we're in 'read status' mode if it had finished */
685 map_write(map, CMD(0x70), adr);
686 chip->state = FL_ERASING;
687 chip->oldstate = FL_READY;
688 printk(KERN_ERR "Chip not ready after erase "
689 "suspended: status = 0x%lx\n", status.x[0]);
693 spin_unlock(chip->mutex);
695 spin_lock(chip->mutex);
696 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
697 So we can just loop here. */
699 chip->state = FL_STATUS;
702 case FL_XIP_WHILE_ERASING:
703 if (mode != FL_READY && mode != FL_POINT &&
704 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
706 chip->oldstate = chip->state;
707 chip->state = FL_READY;
711 /* Only if there's no operation suspended... */
712 if (mode == FL_READY && chip->oldstate == FL_READY)
717 set_current_state(TASK_UNINTERRUPTIBLE);
718 add_wait_queue(&chip->wq, &wait);
719 spin_unlock(chip->mutex);
721 remove_wait_queue(&chip->wq, &wait);
722 spin_lock(chip->mutex);
727 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
729 struct cfi_private *cfi = map->fldrv_priv;
732 struct flchip_shared *shared = chip->priv;
733 spin_lock(&shared->lock);
734 if (shared->writing == chip && chip->oldstate == FL_READY) {
735 /* We own the ability to write, but we're done */
736 shared->writing = shared->erasing;
737 if (shared->writing && shared->writing != chip) {
738 /* give back ownership to who we loaned it from */
739 struct flchip *loaner = shared->writing;
740 spin_lock(loaner->mutex);
741 spin_unlock(&shared->lock);
742 spin_unlock(chip->mutex);
743 put_chip(map, loaner, loaner->start);
744 spin_lock(chip->mutex);
745 spin_unlock(loaner->mutex);
749 shared->erasing = NULL;
750 shared->writing = NULL;
751 } else if (shared->erasing == chip && shared->writing != chip) {
753 * We own the ability to erase without the ability
754 * to write, which means the erase was suspended
755 * and some other partition is currently writing.
756 * Don't let the switch below mess things up since
757 * we don't have ownership to resume anything.
759 spin_unlock(&shared->lock);
763 spin_unlock(&shared->lock);
766 switch(chip->oldstate) {
768 chip->state = chip->oldstate;
769 /* What if one interleaved chip has finished and the
770 other hasn't? The old code would leave the finished
771 one in READY mode. That's bad, and caused -EROFS
772 errors to be returned from do_erase_oneblock because
773 that's the only bit it checked for at the time.
774 As the state machine appears to explicitly allow
775 sending the 0x70 (Read Status) command to an erasing
776 chip and expecting it to be ignored, that's what we
778 map_write(map, CMD(0xd0), adr);
779 map_write(map, CMD(0x70), adr);
780 chip->oldstate = FL_READY;
781 chip->state = FL_ERASING;
784 case FL_XIP_WHILE_ERASING:
785 chip->state = chip->oldstate;
786 chip->oldstate = FL_READY;
792 /* We should really make set_vpp() count, rather than doing this */
796 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
801 #ifdef CONFIG_MTD_XIP
804 * No interrupt what so ever can be serviced while the flash isn't in array
805 * mode. This is ensured by the xip_disable() and xip_enable() functions
806 * enclosing any code path where the flash is known not to be in array mode.
807 * And within a XIP disabled code path, only functions marked with __xipram
808 * may be called and nothing else (it's a good thing to inspect generated
809 * assembly to make sure inline functions were actually inlined and that gcc
810 * didn't emit calls to its own support functions). Also configuring MTD CFI
811 * support to a single buswidth and a single interleave is also recommended.
812 * Note that not only IRQs are disabled but the preemption count is also
813 * increased to prevent other locking primitives (namely spin_unlock) from
814 * decrementing the preempt count to zero and scheduling the CPU away while
818 static void xip_disable(struct map_info *map, struct flchip *chip,
821 /* TODO: chips with no XIP use should ignore and return */
822 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
827 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
830 struct cfi_private *cfi = map->fldrv_priv;
831 if (chip->state != FL_POINT && chip->state != FL_READY) {
832 map_write(map, CMD(0xff), adr);
833 chip->state = FL_READY;
835 (void) map_read(map, adr);
836 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
842 * When a delay is required for the flash operation to complete, the
843 * xip_udelay() function is polling for both the given timeout and pending
844 * (but still masked) hardware interrupts. Whenever there is an interrupt
845 * pending then the flash erase or write operation is suspended, array mode
846 * restored and interrupts unmasked. Task scheduling might also happen at that
847 * point. The CPU eventually returns from the interrupt or the call to
848 * schedule() and the suspended flash operation is resumed for the remaining
849 * of the delay period.
851 * Warning: this function _will_ fool interrupt latency tracing tools.
854 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
855 unsigned long adr, int usec)
857 struct cfi_private *cfi = map->fldrv_priv;
858 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
859 map_word status, OK = CMD(0x80);
860 unsigned long suspended, start = xip_currtime();
861 flstate_t oldstate, newstate;
865 if (xip_irqpending() && cfip &&
866 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
867 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
868 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
870 * Let's suspend the erase or write operation when
871 * supported. Note that we currently don't try to
872 * suspend interleaved chips if there is already
873 * another operation suspended (imagine what happens
874 * when one chip was already done with the current
875 * operation while another chip suspended it, then
876 * we resume the whole thing at once). Yes, it
879 map_write(map, CMD(0xb0), adr);
880 map_write(map, CMD(0x70), adr);
881 usec -= xip_elapsed_since(start);
882 suspended = xip_currtime();
884 if (xip_elapsed_since(suspended) > 100000) {
886 * The chip doesn't want to suspend
887 * after waiting for 100 msecs.
888 * This is a critical error but there
889 * is not much we can do here.
893 status = map_read(map, adr);
894 } while (!map_word_andequal(map, status, OK, OK));
896 /* Suspend succeeded */
897 oldstate = chip->state;
898 if (oldstate == FL_ERASING) {
899 if (!map_word_bitsset(map, status, CMD(0x40)))
901 newstate = FL_XIP_WHILE_ERASING;
902 chip->erase_suspended = 1;
904 if (!map_word_bitsset(map, status, CMD(0x04)))
906 newstate = FL_XIP_WHILE_WRITING;
907 chip->write_suspended = 1;
909 chip->state = newstate;
910 map_write(map, CMD(0xff), adr);
911 (void) map_read(map, adr);
912 asm volatile (".rep 8; nop; .endr");
915 asm volatile (".rep 8; nop; .endr");
919 * We're back. However someone else might have
920 * decided to go write to the chip if we are in
921 * a suspended erase state. If so let's wait
925 while (chip->state != newstate) {
926 DECLARE_WAITQUEUE(wait, current);
927 set_current_state(TASK_UNINTERRUPTIBLE);
928 add_wait_queue(&chip->wq, &wait);
931 remove_wait_queue(&chip->wq, &wait);
934 /* Disallow XIP again */
937 /* Resume the write or erase operation */
938 map_write(map, CMD(0xd0), adr);
939 map_write(map, CMD(0x70), adr);
940 chip->state = oldstate;
941 start = xip_currtime();
942 } else if (usec >= 1000000/HZ) {
944 * Try to save on CPU power when waiting delay
945 * is at least a system timer tick period.
946 * No need to be extremely accurate here.
950 status = map_read(map, adr);
951 } while (!map_word_andequal(map, status, OK, OK)
952 && xip_elapsed_since(start) < usec);
955 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
958 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
959 * the flash is actively programming or erasing since we have to poll for
960 * the operation to complete anyway. We can't do that in a generic way with
961 * a XIP setup so do it before the actual flash operation in this case.
963 #undef INVALIDATE_CACHED_RANGE
964 #define INVALIDATE_CACHED_RANGE(x...)
965 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
966 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
971 * Activating this XIP support changes the way the code works a bit. For
972 * example the code to suspend the current process when concurrent access
973 * happens is never executed because xip_udelay() will always return with the
974 * same chip state as it was entered with. This is why there is no care for
975 * the presence of add_wait_queue() or schedule() calls from within a couple
976 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
977 * The queueing and scheduling are always happening within xip_udelay().
979 * Similarly, get_chip() and put_chip() just happen to always be executed
980 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
981 * is in array mode, therefore never executing many cases therein and not
982 * causing any problem with XIP.
987 #define xip_disable(map, chip, adr)
988 #define xip_enable(map, chip, adr)
990 #define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
992 #define XIP_INVAL_CACHED_RANGE(x...)
996 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
998 unsigned long cmd_addr;
999 struct cfi_private *cfi = map->fldrv_priv;
1004 /* Ensure cmd read/writes are aligned. */
1005 cmd_addr = adr & ~(map_bankwidth(map)-1);
1007 spin_lock(chip->mutex);
1009 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1012 if (chip->state != FL_POINT && chip->state != FL_READY)
1013 map_write(map, CMD(0xff), cmd_addr);
1015 chip->state = FL_POINT;
1016 chip->ref_point_counter++;
1018 spin_unlock(chip->mutex);
1023 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1025 struct map_info *map = mtd->priv;
1026 struct cfi_private *cfi = map->fldrv_priv;
1031 if (!map->virt || (from + len > mtd->size))
1034 *mtdbuf = (void *)map->virt + from;
1037 /* Now lock the chip(s) to POINT state */
1039 /* ofs: offset within the first chip that the first read should start */
1040 chipnum = (from >> cfi->chipshift);
1041 ofs = from - (chipnum << cfi->chipshift);
1044 unsigned long thislen;
1046 if (chipnum >= cfi->numchips)
1049 if ((len + ofs -1) >> cfi->chipshift)
1050 thislen = (1<<cfi->chipshift) - ofs;
1054 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1067 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1069 struct map_info *map = mtd->priv;
1070 struct cfi_private *cfi = map->fldrv_priv;
1074 /* Now unlock the chip(s) POINT state */
1076 /* ofs: offset within the first chip that the first read should start */
1077 chipnum = (from >> cfi->chipshift);
1078 ofs = from - (chipnum << cfi->chipshift);
1081 unsigned long thislen;
1082 struct flchip *chip;
1084 chip = &cfi->chips[chipnum];
1085 if (chipnum >= cfi->numchips)
1088 if ((len + ofs -1) >> cfi->chipshift)
1089 thislen = (1<<cfi->chipshift) - ofs;
1093 spin_lock(chip->mutex);
1094 if (chip->state == FL_POINT) {
1095 chip->ref_point_counter--;
1096 if(chip->ref_point_counter == 0)
1097 chip->state = FL_READY;
1099 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1101 put_chip(map, chip, chip->start);
1102 spin_unlock(chip->mutex);
1110 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1112 unsigned long cmd_addr;
1113 struct cfi_private *cfi = map->fldrv_priv;
1118 /* Ensure cmd read/writes are aligned. */
1119 cmd_addr = adr & ~(map_bankwidth(map)-1);
1121 spin_lock(chip->mutex);
1122 ret = get_chip(map, chip, cmd_addr, FL_READY);
1124 spin_unlock(chip->mutex);
1128 if (chip->state != FL_POINT && chip->state != FL_READY) {
1129 map_write(map, CMD(0xff), cmd_addr);
1131 chip->state = FL_READY;
1134 map_copy_from(map, buf, adr, len);
1136 put_chip(map, chip, cmd_addr);
1138 spin_unlock(chip->mutex);
1142 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1144 struct map_info *map = mtd->priv;
1145 struct cfi_private *cfi = map->fldrv_priv;
1150 /* ofs: offset within the first chip that the first read should start */
1151 chipnum = (from >> cfi->chipshift);
1152 ofs = from - (chipnum << cfi->chipshift);
1157 unsigned long thislen;
1159 if (chipnum >= cfi->numchips)
1162 if ((len + ofs -1) >> cfi->chipshift)
1163 thislen = (1<<cfi->chipshift) - ofs;
1167 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1182 static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
1183 loff_t from, size_t len,
1186 int base_offst, int reg_sz)
1188 struct map_info *map = mtd->priv;
1189 struct cfi_private *cfi = map->fldrv_priv;
1190 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1191 struct flchip *chip;
1192 int ofs_factor = cfi->interleave * cfi->device_type;
1194 int chip_num, offst;
1197 chip_num = ((unsigned int)from/reg_sz);
1198 offst = from - (reg_sz*chip_num)+base_offst;
1201 /* Calculate which chip & protection register offset we need */
1203 if (chip_num >= cfi->numchips)
1206 chip = &cfi->chips[chip_num];
1208 spin_lock(chip->mutex);
1209 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1211 spin_unlock(chip->mutex);
1212 return (len-count)?:ret;
1215 xip_disable(map, chip, chip->start);
1217 if (chip->state != FL_JEDEC_QUERY) {
1218 map_write(map, CMD(0x90), chip->start);
1219 chip->state = FL_JEDEC_QUERY;
1222 while (count && ((offst-base_offst) < reg_sz)) {
1223 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
1229 xip_enable(map, chip, chip->start);
1230 put_chip(map, chip, chip->start);
1231 spin_unlock(chip->mutex);
1233 /* Move on to the next chip */
1242 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1244 struct map_info *map = mtd->priv;
1245 struct cfi_private *cfi = map->fldrv_priv;
1246 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1247 int base_offst,reg_sz;
1249 /* Check that we actually have some protection registers */
1250 if(!extp || !(extp->FeatureSupport&64)){
1251 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1255 base_offst=(1<<extp->FactProtRegSize);
1256 reg_sz=(1<<extp->UserProtRegSize);
1258 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1261 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1263 struct map_info *map = mtd->priv;
1264 struct cfi_private *cfi = map->fldrv_priv;
1265 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1266 int base_offst,reg_sz;
1268 /* Check that we actually have some protection registers */
1269 if(!extp || !(extp->FeatureSupport&64)){
1270 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1275 reg_sz=(1<<extp->FactProtRegSize);
1277 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1281 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1282 unsigned long adr, map_word datum)
1284 struct cfi_private *cfi = map->fldrv_priv;
1285 map_word status, status_OK;
1286 unsigned long timeo;
1291 /* Let's determine this according to the interleave only once */
1292 status_OK = CMD(0x80);
1294 spin_lock(chip->mutex);
1295 ret = get_chip(map, chip, adr, FL_WRITING);
1297 spin_unlock(chip->mutex);
1301 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1303 xip_disable(map, chip, adr);
1304 map_write(map, CMD(0x40), adr);
1305 map_write(map, datum, adr);
1306 chip->state = FL_WRITING;
1308 spin_unlock(chip->mutex);
1309 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1310 UDELAY(map, chip, adr, chip->word_write_time);
1311 spin_lock(chip->mutex);
1313 timeo = jiffies + (HZ/2);
1316 if (chip->state != FL_WRITING) {
1317 /* Someone's suspended the write. Sleep */
1318 DECLARE_WAITQUEUE(wait, current);
1320 set_current_state(TASK_UNINTERRUPTIBLE);
1321 add_wait_queue(&chip->wq, &wait);
1322 spin_unlock(chip->mutex);
1324 remove_wait_queue(&chip->wq, &wait);
1325 timeo = jiffies + (HZ / 2); /* FIXME */
1326 spin_lock(chip->mutex);
1330 status = map_read(map, adr);
1331 if (map_word_andequal(map, status, status_OK, status_OK))
1334 /* OK Still waiting */
1335 if (time_after(jiffies, timeo)) {
1336 chip->state = FL_STATUS;
1337 xip_enable(map, chip, adr);
1338 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1343 /* Latency issues. Drop the lock, wait a while and retry */
1344 spin_unlock(chip->mutex);
1346 UDELAY(map, chip, adr, 1);
1347 spin_lock(chip->mutex);
1350 chip->word_write_time--;
1351 if (!chip->word_write_time)
1352 chip->word_write_time++;
1355 chip->word_write_time++;
1357 /* Done and happy. */
1358 chip->state = FL_STATUS;
1360 /* check for lock bit */
1361 if (map_word_bitsset(map, status, CMD(0x02))) {
1363 map_write(map, CMD(0x50), adr);
1364 /* put back into read status register mode */
1365 map_write(map, CMD(0x70), adr);
1369 xip_enable(map, chip, adr);
1370 out: put_chip(map, chip, adr);
1371 spin_unlock(chip->mutex);
1377 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1379 struct map_info *map = mtd->priv;
1380 struct cfi_private *cfi = map->fldrv_priv;
1389 chipnum = to >> cfi->chipshift;
1390 ofs = to - (chipnum << cfi->chipshift);
1392 /* If it's not bus-aligned, do the first byte write */
1393 if (ofs & (map_bankwidth(map)-1)) {
1394 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1395 int gap = ofs - bus_ofs;
1399 n = min_t(int, len, map_bankwidth(map)-gap);
1400 datum = map_word_ff(map);
1401 datum = map_word_load_partial(map, datum, buf, gap, n);
1403 ret = do_write_oneword(map, &cfi->chips[chipnum],
1413 if (ofs >> cfi->chipshift) {
1416 if (chipnum == cfi->numchips)
1421 while(len >= map_bankwidth(map)) {
1422 map_word datum = map_word_load(map, buf);
1424 ret = do_write_oneword(map, &cfi->chips[chipnum],
1429 ofs += map_bankwidth(map);
1430 buf += map_bankwidth(map);
1431 (*retlen) += map_bankwidth(map);
1432 len -= map_bankwidth(map);
1434 if (ofs >> cfi->chipshift) {
1437 if (chipnum == cfi->numchips)
1442 if (len & (map_bankwidth(map)-1)) {
1445 datum = map_word_ff(map);
1446 datum = map_word_load_partial(map, datum, buf, 0, len);
1448 ret = do_write_oneword(map, &cfi->chips[chipnum],
1460 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1461 unsigned long adr, const u_char *buf, int len)
1463 struct cfi_private *cfi = map->fldrv_priv;
1464 map_word status, status_OK;
1465 unsigned long cmd_adr, timeo;
1466 int wbufsize, z, ret=0, bytes, words;
1468 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1470 cmd_adr = adr & ~(wbufsize-1);
1472 /* Let's determine this according to the interleave only once */
1473 status_OK = CMD(0x80);
1475 spin_lock(chip->mutex);
1476 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1478 spin_unlock(chip->mutex);
1482 XIP_INVAL_CACHED_RANGE(map, adr, len);
1484 xip_disable(map, chip, cmd_adr);
1486 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1487 [...], the device will not accept any more Write to Buffer commands".
1488 So we must check here and reset those bits if they're set. Otherwise
1489 we're just pissing in the wind */
1490 if (chip->state != FL_STATUS)
1491 map_write(map, CMD(0x70), cmd_adr);
1492 status = map_read(map, cmd_adr);
1493 if (map_word_bitsset(map, status, CMD(0x30))) {
1494 xip_enable(map, chip, cmd_adr);
1495 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1496 xip_disable(map, chip, cmd_adr);
1497 map_write(map, CMD(0x50), cmd_adr);
1498 map_write(map, CMD(0x70), cmd_adr);
1501 chip->state = FL_WRITING_TO_BUFFER;
1505 map_write(map, CMD(0xe8), cmd_adr);
1507 status = map_read(map, cmd_adr);
1508 if (map_word_andequal(map, status, status_OK, status_OK))
1511 spin_unlock(chip->mutex);
1512 UDELAY(map, chip, cmd_adr, 1);
1513 spin_lock(chip->mutex);
1516 /* Argh. Not ready for write to buffer */
1518 map_write(map, CMD(0x70), cmd_adr);
1519 chip->state = FL_STATUS;
1520 Xstatus = map_read(map, cmd_adr);
1521 /* Odd. Clear status bits */
1522 map_write(map, CMD(0x50), cmd_adr);
1523 map_write(map, CMD(0x70), cmd_adr);
1524 xip_enable(map, chip, cmd_adr);
1525 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1526 status.x[0], Xstatus.x[0]);
1532 /* Write length of data to come */
1533 bytes = len & (map_bankwidth(map)-1);
1534 words = len / map_bankwidth(map);
1535 map_write(map, CMD(words - !bytes), cmd_adr );
1539 while(z < words * map_bankwidth(map)) {
1540 map_word datum = map_word_load(map, buf);
1541 map_write(map, datum, adr+z);
1543 z += map_bankwidth(map);
1544 buf += map_bankwidth(map);
1550 datum = map_word_ff(map);
1551 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1552 map_write(map, datum, adr+z);
1556 map_write(map, CMD(0xd0), cmd_adr);
1557 chip->state = FL_WRITING;
1559 spin_unlock(chip->mutex);
1560 INVALIDATE_CACHED_RANGE(map, adr, len);
1561 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1562 spin_lock(chip->mutex);
1564 timeo = jiffies + (HZ/2);
1567 if (chip->state != FL_WRITING) {
1568 /* Someone's suspended the write. Sleep */
1569 DECLARE_WAITQUEUE(wait, current);
1570 set_current_state(TASK_UNINTERRUPTIBLE);
1571 add_wait_queue(&chip->wq, &wait);
1572 spin_unlock(chip->mutex);
1574 remove_wait_queue(&chip->wq, &wait);
1575 timeo = jiffies + (HZ / 2); /* FIXME */
1576 spin_lock(chip->mutex);
1580 status = map_read(map, cmd_adr);
1581 if (map_word_andequal(map, status, status_OK, status_OK))
1584 /* OK Still waiting */
1585 if (time_after(jiffies, timeo)) {
1586 chip->state = FL_STATUS;
1587 xip_enable(map, chip, cmd_adr);
1588 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1593 /* Latency issues. Drop the lock, wait a while and retry */
1594 spin_unlock(chip->mutex);
1595 UDELAY(map, chip, cmd_adr, 1);
1597 spin_lock(chip->mutex);
1600 chip->buffer_write_time--;
1601 if (!chip->buffer_write_time)
1602 chip->buffer_write_time++;
1605 chip->buffer_write_time++;
1607 /* Done and happy. */
1608 chip->state = FL_STATUS;
1610 /* check for lock bit */
1611 if (map_word_bitsset(map, status, CMD(0x02))) {
1613 map_write(map, CMD(0x50), cmd_adr);
1614 /* put back into read status register mode */
1615 map_write(map, CMD(0x70), adr);
1619 xip_enable(map, chip, cmd_adr);
1620 out: put_chip(map, chip, cmd_adr);
1621 spin_unlock(chip->mutex);
1625 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1626 size_t len, size_t *retlen, const u_char *buf)
1628 struct map_info *map = mtd->priv;
1629 struct cfi_private *cfi = map->fldrv_priv;
1630 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1639 chipnum = to >> cfi->chipshift;
1640 ofs = to - (chipnum << cfi->chipshift);
1642 /* If it's not bus-aligned, do the first word write */
1643 if (ofs & (map_bankwidth(map)-1)) {
1644 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1645 if (local_len > len)
1647 ret = cfi_intelext_write_words(mtd, to, local_len,
1655 if (ofs >> cfi->chipshift) {
1658 if (chipnum == cfi->numchips)
1664 /* We must not cross write block boundaries */
1665 int size = wbufsize - (ofs & (wbufsize-1));
1669 ret = do_write_buffer(map, &cfi->chips[chipnum],
1679 if (ofs >> cfi->chipshift) {
1682 if (chipnum == cfi->numchips)
1689 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1690 unsigned long adr, int len, void *thunk)
1692 struct cfi_private *cfi = map->fldrv_priv;
1693 map_word status, status_OK;
1694 unsigned long timeo;
1696 DECLARE_WAITQUEUE(wait, current);
1701 /* Let's determine this according to the interleave only once */
1702 status_OK = CMD(0x80);
1705 spin_lock(chip->mutex);
1706 ret = get_chip(map, chip, adr, FL_ERASING);
1708 spin_unlock(chip->mutex);
1712 XIP_INVAL_CACHED_RANGE(map, adr, len);
1714 xip_disable(map, chip, adr);
1716 /* Clear the status register first */
1717 map_write(map, CMD(0x50), adr);
1720 map_write(map, CMD(0x20), adr);
1721 map_write(map, CMD(0xD0), adr);
1722 chip->state = FL_ERASING;
1723 chip->erase_suspended = 0;
1725 spin_unlock(chip->mutex);
1726 INVALIDATE_CACHED_RANGE(map, adr, len);
1727 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1728 spin_lock(chip->mutex);
1730 /* FIXME. Use a timer to check this, and return immediately. */
1731 /* Once the state machine's known to be working I'll do that */
1733 timeo = jiffies + (HZ*20);
1735 if (chip->state != FL_ERASING) {
1736 /* Someone's suspended the erase. Sleep */
1737 set_current_state(TASK_UNINTERRUPTIBLE);
1738 add_wait_queue(&chip->wq, &wait);
1739 spin_unlock(chip->mutex);
1741 remove_wait_queue(&chip->wq, &wait);
1742 spin_lock(chip->mutex);
1745 if (chip->erase_suspended) {
1746 /* This erase was suspended and resumed.
1747 Adjust the timeout */
1748 timeo = jiffies + (HZ*20); /* FIXME */
1749 chip->erase_suspended = 0;
1752 status = map_read(map, adr);
1753 if (map_word_andequal(map, status, status_OK, status_OK))
1756 /* OK Still waiting */
1757 if (time_after(jiffies, timeo)) {
1759 map_write(map, CMD(0x70), adr);
1760 chip->state = FL_STATUS;
1761 Xstatus = map_read(map, adr);
1762 /* Clear status bits */
1763 map_write(map, CMD(0x50), adr);
1764 map_write(map, CMD(0x70), adr);
1765 xip_enable(map, chip, adr);
1766 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1767 adr, status.x[0], Xstatus.x[0]);
1772 /* Latency issues. Drop the lock, wait a while and retry */
1773 spin_unlock(chip->mutex);
1774 UDELAY(map, chip, adr, 1000000/HZ);
1775 spin_lock(chip->mutex);
1778 /* We've broken this before. It doesn't hurt to be safe */
1779 map_write(map, CMD(0x70), adr);
1780 chip->state = FL_STATUS;
1781 status = map_read(map, adr);
1783 /* check for lock bit */
1784 if (map_word_bitsset(map, status, CMD(0x3a))) {
1785 unsigned char chipstatus;
1787 /* Reset the error bits */
1788 map_write(map, CMD(0x50), adr);
1789 map_write(map, CMD(0x70), adr);
1790 xip_enable(map, chip, adr);
1792 chipstatus = status.x[0];
1793 if (!map_word_equal(map, status, CMD(chipstatus))) {
1795 for (w=0; w<map_words(map); w++) {
1796 for (i = 0; i<cfi_interleave(cfi); i++) {
1797 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1800 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1801 status.x[0], chipstatus);
1804 if ((chipstatus & 0x30) == 0x30) {
1805 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1807 } else if (chipstatus & 0x02) {
1808 /* Protection bit set */
1810 } else if (chipstatus & 0x8) {
1812 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1814 } else if (chipstatus & 0x20) {
1816 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1817 timeo = jiffies + HZ;
1818 put_chip(map, chip, adr);
1819 spin_unlock(chip->mutex);
1822 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1826 xip_enable(map, chip, adr);
1830 out: put_chip(map, chip, adr);
1831 spin_unlock(chip->mutex);
1835 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1837 unsigned long ofs, len;
1843 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1847 instr->state = MTD_ERASE_DONE;
1848 mtd_erase_callback(instr);
1853 static void cfi_intelext_sync (struct mtd_info *mtd)
1855 struct map_info *map = mtd->priv;
1856 struct cfi_private *cfi = map->fldrv_priv;
1858 struct flchip *chip;
1861 for (i=0; !ret && i<cfi->numchips; i++) {
1862 chip = &cfi->chips[i];
1864 spin_lock(chip->mutex);
1865 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1868 chip->oldstate = chip->state;
1869 chip->state = FL_SYNCING;
1870 /* No need to wake_up() on this state change -
1871 * as the whole point is that nobody can do anything
1872 * with the chip now anyway.
1875 spin_unlock(chip->mutex);
1878 /* Unlock the chips again */
1880 for (i--; i >=0; i--) {
1881 chip = &cfi->chips[i];
1883 spin_lock(chip->mutex);
1885 if (chip->state == FL_SYNCING) {
1886 chip->state = chip->oldstate;
1889 spin_unlock(chip->mutex);
1893 #ifdef DEBUG_LOCK_BITS
1894 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1895 struct flchip *chip,
1897 int len, void *thunk)
1899 struct cfi_private *cfi = map->fldrv_priv;
1900 int status, ofs_factor = cfi->interleave * cfi->device_type;
1902 xip_disable(map, chip, adr+(2*ofs_factor));
1903 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1904 chip->state = FL_JEDEC_QUERY;
1905 status = cfi_read_query(map, adr+(2*ofs_factor));
1906 xip_enable(map, chip, 0);
1907 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1913 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1914 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1916 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1917 unsigned long adr, int len, void *thunk)
1919 struct cfi_private *cfi = map->fldrv_priv;
1920 map_word status, status_OK;
1921 unsigned long timeo = jiffies + HZ;
1926 /* Let's determine this according to the interleave only once */
1927 status_OK = CMD(0x80);
1929 spin_lock(chip->mutex);
1930 ret = get_chip(map, chip, adr, FL_LOCKING);
1932 spin_unlock(chip->mutex);
1937 xip_disable(map, chip, adr);
1939 map_write(map, CMD(0x60), adr);
1940 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1941 map_write(map, CMD(0x01), adr);
1942 chip->state = FL_LOCKING;
1943 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1944 map_write(map, CMD(0xD0), adr);
1945 chip->state = FL_UNLOCKING;
1949 spin_unlock(chip->mutex);
1950 UDELAY(map, chip, adr, 1000000/HZ);
1951 spin_lock(chip->mutex);
1953 /* FIXME. Use a timer to check this, and return immediately. */
1954 /* Once the state machine's known to be working I'll do that */
1956 timeo = jiffies + (HZ*20);
1959 status = map_read(map, adr);
1960 if (map_word_andequal(map, status, status_OK, status_OK))
1963 /* OK Still waiting */
1964 if (time_after(jiffies, timeo)) {
1966 map_write(map, CMD(0x70), adr);
1967 chip->state = FL_STATUS;
1968 Xstatus = map_read(map, adr);
1969 xip_enable(map, chip, adr);
1970 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1971 status.x[0], Xstatus.x[0]);
1972 put_chip(map, chip, adr);
1973 spin_unlock(chip->mutex);
1977 /* Latency issues. Drop the lock, wait a while and retry */
1978 spin_unlock(chip->mutex);
1979 UDELAY(map, chip, adr, 1);
1980 spin_lock(chip->mutex);
1983 /* Done and happy. */
1984 chip->state = FL_STATUS;
1985 xip_enable(map, chip, adr);
1986 put_chip(map, chip, adr);
1987 spin_unlock(chip->mutex);
1991 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1995 #ifdef DEBUG_LOCK_BITS
1996 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1997 __FUNCTION__, ofs, len);
1998 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2002 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2003 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2005 #ifdef DEBUG_LOCK_BITS
2006 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2008 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2015 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2019 #ifdef DEBUG_LOCK_BITS
2020 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2021 __FUNCTION__, ofs, len);
2022 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2026 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2027 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2029 #ifdef DEBUG_LOCK_BITS
2030 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2032 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2039 static int cfi_intelext_suspend(struct mtd_info *mtd)
2041 struct map_info *map = mtd->priv;
2042 struct cfi_private *cfi = map->fldrv_priv;
2044 struct flchip *chip;
2047 for (i=0; !ret && i<cfi->numchips; i++) {
2048 chip = &cfi->chips[i];
2050 spin_lock(chip->mutex);
2052 switch (chip->state) {
2056 case FL_JEDEC_QUERY:
2057 if (chip->oldstate == FL_READY) {
2058 chip->oldstate = chip->state;
2059 chip->state = FL_PM_SUSPENDED;
2060 /* No need to wake_up() on this state change -
2061 * as the whole point is that nobody can do anything
2062 * with the chip now anyway.
2065 /* There seems to be an operation pending. We must wait for it. */
2066 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2071 /* Should we actually wait? Once upon a time these routines weren't
2072 allowed to. Or should we return -EAGAIN, because the upper layers
2073 ought to have already shut down anything which was using the device
2074 anyway? The latter for now. */
2075 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2077 case FL_PM_SUSPENDED:
2080 spin_unlock(chip->mutex);
2083 /* Unlock the chips again */
2086 for (i--; i >=0; i--) {
2087 chip = &cfi->chips[i];
2089 spin_lock(chip->mutex);
2091 if (chip->state == FL_PM_SUSPENDED) {
2092 /* No need to force it into a known state here,
2093 because we're returning failure, and it didn't
2095 chip->state = chip->oldstate;
2096 chip->oldstate = FL_READY;
2099 spin_unlock(chip->mutex);
2106 static void cfi_intelext_resume(struct mtd_info *mtd)
2108 struct map_info *map = mtd->priv;
2109 struct cfi_private *cfi = map->fldrv_priv;
2111 struct flchip *chip;
2113 for (i=0; i<cfi->numchips; i++) {
2115 chip = &cfi->chips[i];
2117 spin_lock(chip->mutex);
2119 /* Go to known state. Chip may have been power cycled */
2120 if (chip->state == FL_PM_SUSPENDED) {
2121 map_write(map, CMD(0xFF), cfi->chips[i].start);
2122 chip->oldstate = chip->state = FL_READY;
2126 spin_unlock(chip->mutex);
2130 static void cfi_intelext_destroy(struct mtd_info *mtd)
2132 struct map_info *map = mtd->priv;
2133 struct cfi_private *cfi = map->fldrv_priv;
2134 kfree(cfi->cmdset_priv);
2136 kfree(cfi->chips[0].priv);
2138 kfree(mtd->eraseregions);
2141 static char im_name_1[]="cfi_cmdset_0001";
2142 static char im_name_3[]="cfi_cmdset_0003";
2144 static int __init cfi_intelext_init(void)
2146 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2147 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2151 static void __exit cfi_intelext_exit(void)
2153 inter_module_unregister(im_name_1);
2154 inter_module_unregister(im_name_3);
2157 module_init(cfi_intelext_init);
2158 module_exit(cfi_intelext_exit);
2160 MODULE_LICENSE("GPL");
2161 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2162 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");