]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
[MTD] Make OTP actually work.
[mv-sheeva.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.168 2005/02/17 20:34:59 nico Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/xip.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 #define MANUFACTURER_INTEL      0x0089
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define MANUFACTURER_ST         0x0020
48 #define M50LPW080       0x002F
49
50 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
51 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
52 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
54 static void cfi_intelext_sync (struct mtd_info *);
55 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
56 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
57 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
59 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
60 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
61 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
62                                             struct otp_info *, size_t);
63 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
64                                             struct otp_info *, size_t);
65 static int cfi_intelext_suspend (struct mtd_info *);
66 static void cfi_intelext_resume (struct mtd_info *);
67
68 static void cfi_intelext_destroy(struct mtd_info *);
69
70 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
71
72 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
73 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
74
75 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
76                      size_t *retlen, u_char **mtdbuf);
77 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
78                         size_t len);
79
80 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
81 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
82 #include "fwh_lock.h"
83
84
85
86 /*
87  *  *********** SETUP AND PROBE BITS  ***********
88  */
89
90 static struct mtd_chip_driver cfi_intelext_chipdrv = {
91         .probe          = NULL, /* Not usable directly */
92         .destroy        = cfi_intelext_destroy,
93         .name           = "cfi_cmdset_0001",
94         .module         = THIS_MODULE
95 };
96
97 /* #define DEBUG_LOCK_BITS */
98 /* #define DEBUG_CFI_FEATURES */
99
100 #ifdef DEBUG_CFI_FEATURES
101 static void cfi_tell_features(struct cfi_pri_intelext *extp)
102 {
103         int i;
104         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
105         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
106         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
107         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
108         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
109         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
110         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
111         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
112         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
113         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
114         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
115         for (i=10; i<32; i++) {
116                 if (extp->FeatureSupport & (1<<i)) 
117                         printk("     - Unknown Bit %X:      supported\n", i);
118         }
119         
120         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
121         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
122         for (i=1; i<8; i++) {
123                 if (extp->SuspendCmdSupport & (1<<i))
124                         printk("     - Unknown Bit %X:               supported\n", i);
125         }
126         
127         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
128         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
129         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
130         for (i=2; i<16; i++) {
131                 if (extp->BlkStatusRegMask & (1<<i))
132                         printk("     - Unknown Bit %X Active: yes\n",i);
133         }
134         
135         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
136                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
137         if (extp->VppOptimal)
138                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
139                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
140 }
141 #endif
142
143 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
144 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
145 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
146 {
147         struct map_info *map = mtd->priv;
148         struct cfi_private *cfi = map->fldrv_priv;
149         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
150
151         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
152                             "erase on write disabled.\n");
153         extp->SuspendCmdSupport &= ~1;
154 }
155 #endif
156
157 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
158 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
163
164         if (cfip && (cfip->FeatureSupport&4)) {
165                 cfip->FeatureSupport &= ~4;
166                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
167         }
168 }
169 #endif
170
171 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         
176         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
177         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
178 }
179
180 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
181 {
182         struct map_info *map = mtd->priv;
183         struct cfi_private *cfi = map->fldrv_priv;
184         
185         /* Note this is done after the region info is endian swapped */
186         cfi->cfiq->EraseRegionInfo[1] =
187                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
188 };
189
190 static void fixup_use_point(struct mtd_info *mtd, void *param)
191 {
192         struct map_info *map = mtd->priv;
193         if (!mtd->point && map_is_linear(map)) {
194                 mtd->point   = cfi_intelext_point;
195                 mtd->unpoint = cfi_intelext_unpoint;
196         }
197 }
198
199 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
200 {
201         struct map_info *map = mtd->priv;
202         struct cfi_private *cfi = map->fldrv_priv;
203         if (cfi->cfiq->BufWriteTimeoutTyp) {
204                 printk(KERN_INFO "Using buffer write method\n" );
205                 mtd->write = cfi_intelext_write_buffers;
206         }
207 }
208
209 static struct cfi_fixup cfi_fixup_table[] = {
210 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
211         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 
212 #endif
213 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
214         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
215 #endif
216 #if !FORCE_WORD_WRITE
217         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
218 #endif
219         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
220         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
221         { 0, 0, NULL, NULL }
222 };
223
224 static struct cfi_fixup jedec_fixup_table[] = {
225         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
226         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
227         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
228         { 0, 0, NULL, NULL }
229 };
230 static struct cfi_fixup fixup_table[] = {
231         /* The CFI vendor ids and the JEDEC vendor IDs appear
232          * to be common.  It is like the devices id's are as
233          * well.  This table is to pick all cases where
234          * we know that is the case.
235          */
236         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
237         { 0, 0, NULL, NULL }
238 };
239
240 static inline struct cfi_pri_intelext *
241 read_pri_intelext(struct map_info *map, __u16 adr)
242 {
243         struct cfi_pri_intelext *extp;
244         unsigned int extp_size = sizeof(*extp);
245
246  again:
247         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
248         if (!extp)
249                 return NULL;
250
251         /* Do some byteswapping if necessary */
252         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
253         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
254         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
255
256         if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
257                 unsigned int extra_size = 0;
258                 int nb_parts, i;
259
260                 /* Protection Register info */
261                 extra_size += (extp->NumProtectionFields - 1) *
262                               sizeof(struct cfi_intelext_otpinfo);
263
264                 /* Burst Read info */
265                 extra_size += 6;
266
267                 /* Number of hardware-partitions */
268                 extra_size += 1;
269                 if (extp_size < sizeof(*extp) + extra_size)
270                         goto need_more;
271                 nb_parts = extp->extra[extra_size - 1];
272
273                 for (i = 0; i < nb_parts; i++) {
274                         struct cfi_intelext_regioninfo *rinfo;
275                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
276                         extra_size += sizeof(*rinfo);
277                         if (extp_size < sizeof(*extp) + extra_size)
278                                 goto need_more;
279                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
280                         extra_size += (rinfo->NumBlockTypes - 1)
281                                       * sizeof(struct cfi_intelext_blockinfo);
282                 }
283
284                 if (extp_size < sizeof(*extp) + extra_size) {
285                         need_more:
286                         extp_size = sizeof(*extp) + extra_size;
287                         kfree(extp);
288                         if (extp_size > 4096) {
289                                 printk(KERN_ERR
290                                         "%s: cfi_pri_intelext is too fat\n",
291                                         __FUNCTION__);
292                                 return NULL;
293                         }
294                         goto again;
295                 }
296         }
297                 
298         return extp;
299 }
300
301 /* This routine is made available to other mtd code via
302  * inter_module_register.  It must only be accessed through
303  * inter_module_get which will bump the use count of this module.  The
304  * addresses passed back in cfi are valid as long as the use count of
305  * this module is non-zero, i.e. between inter_module_get and
306  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
307  */
308 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
309 {
310         struct cfi_private *cfi = map->fldrv_priv;
311         struct mtd_info *mtd;
312         int i;
313
314         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
315         if (!mtd) {
316                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
317                 return NULL;
318         }
319         memset(mtd, 0, sizeof(*mtd));
320         mtd->priv = map;
321         mtd->type = MTD_NORFLASH;
322
323         /* Fill in the default mtd operations */
324         mtd->erase   = cfi_intelext_erase_varsize;
325         mtd->read    = cfi_intelext_read;
326         mtd->write   = cfi_intelext_write_words;
327         mtd->sync    = cfi_intelext_sync;
328         mtd->lock    = cfi_intelext_lock;
329         mtd->unlock  = cfi_intelext_unlock;
330         mtd->suspend = cfi_intelext_suspend;
331         mtd->resume  = cfi_intelext_resume;
332         mtd->flags   = MTD_CAP_NORFLASH;
333         mtd->name    = map->name;
334         
335         if (cfi->cfi_mode == CFI_MODE_CFI) {
336                 /* 
337                  * It's a real CFI chip, not one for which the probe
338                  * routine faked a CFI structure. So we read the feature
339                  * table from it.
340                  */
341                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
342                 struct cfi_pri_intelext *extp;
343
344                 extp = read_pri_intelext(map, adr);
345                 if (!extp) {
346                         kfree(mtd);
347                         return NULL;
348                 }
349
350                 /* Install our own private info structure */
351                 cfi->cmdset_priv = extp;        
352
353                 cfi_fixup(mtd, cfi_fixup_table);
354
355 #ifdef DEBUG_CFI_FEATURES
356                 /* Tell the user about it in lots of lovely detail */
357                 cfi_tell_features(extp);
358 #endif  
359
360                 if(extp->SuspendCmdSupport & 1) {
361                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
362                 }
363         }
364         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
365                 /* Apply jedec specific fixups */
366                 cfi_fixup(mtd, jedec_fixup_table);
367         }
368         /* Apply generic fixups */
369         cfi_fixup(mtd, fixup_table);
370
371         for (i=0; i< cfi->numchips; i++) {
372                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
373                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
374                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
375                 cfi->chips[i].ref_point_counter = 0;
376         }               
377
378         map->fldrv = &cfi_intelext_chipdrv;
379         
380         return cfi_intelext_setup(mtd);
381 }
382
383 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
384 {
385         struct map_info *map = mtd->priv;
386         struct cfi_private *cfi = map->fldrv_priv;
387         unsigned long offset = 0;
388         int i,j;
389         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
390
391         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
392
393         mtd->size = devsize * cfi->numchips;
394
395         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
396         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
397                         * mtd->numeraseregions, GFP_KERNEL);
398         if (!mtd->eraseregions) { 
399                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
400                 goto setup_err;
401         }
402         
403         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
404                 unsigned long ernum, ersize;
405                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
406                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
407
408                 if (mtd->erasesize < ersize) {
409                         mtd->erasesize = ersize;
410                 }
411                 for (j=0; j<cfi->numchips; j++) {
412                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
413                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
414                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
415                 }
416                 offset += (ersize * ernum);
417         }
418
419         if (offset != devsize) {
420                 /* Argh */
421                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
422                 goto setup_err;
423         }
424
425         for (i=0; i<mtd->numeraseregions;i++){
426                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
427                        i,mtd->eraseregions[i].offset,
428                        mtd->eraseregions[i].erasesize,
429                        mtd->eraseregions[i].numblocks);
430         }
431
432 #ifdef CONFIG_MTD_OTP
433         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
434         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
435         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
436         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
437         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
438         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
439 #endif
440
441         /* This function has the potential to distort the reality
442            a bit and therefore should be called last. */
443         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
444                 goto setup_err;
445
446         __module_get(THIS_MODULE);
447         return mtd;
448
449  setup_err:
450         if(mtd) {
451                 if(mtd->eraseregions)
452                         kfree(mtd->eraseregions);
453                 kfree(mtd);
454         }
455         kfree(cfi->cmdset_priv);
456         return NULL;
457 }
458
459 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
460                                         struct cfi_private **pcfi)
461 {
462         struct map_info *map = mtd->priv;
463         struct cfi_private *cfi = *pcfi;
464         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
465
466         /*
467          * Probing of multi-partition flash ships.
468          *
469          * To support multiple partitions when available, we simply arrange
470          * for each of them to have their own flchip structure even if they
471          * are on the same physical chip.  This means completely recreating
472          * a new cfi_private structure right here which is a blatent code
473          * layering violation, but this is still the least intrusive
474          * arrangement at this point. This can be rearranged in the future
475          * if someone feels motivated enough.  --nico
476          */
477         if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
478             && extp->FeatureSupport & (1 << 9)) {
479                 struct cfi_private *newcfi;
480                 struct flchip *chip;
481                 struct flchip_shared *shared;
482                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
483
484                 /* Protection Register info */
485                 offs = (extp->NumProtectionFields - 1) *
486                        sizeof(struct cfi_intelext_otpinfo);
487
488                 /* Burst Read info */
489                 offs += 6;
490
491                 /* Number of partition regions */
492                 numregions = extp->extra[offs];
493                 offs += 1;
494
495                 /* Number of hardware partitions */
496                 numparts = 0;
497                 for (i = 0; i < numregions; i++) {
498                         struct cfi_intelext_regioninfo *rinfo;
499                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
500                         numparts += rinfo->NumIdentPartitions;
501                         offs += sizeof(*rinfo)
502                                 + (rinfo->NumBlockTypes - 1) *
503                                   sizeof(struct cfi_intelext_blockinfo);
504                 }
505
506                 /*
507                  * All functions below currently rely on all chips having
508                  * the same geometry so we'll just assume that all hardware
509                  * partitions are of the same size too.
510                  */
511                 partshift = cfi->chipshift - __ffs(numparts);
512
513                 if ((1 << partshift) < mtd->erasesize) {
514                         printk( KERN_ERR
515                                 "%s: bad number of hw partitions (%d)\n",
516                                 __FUNCTION__, numparts);
517                         return -EINVAL;
518                 }
519
520                 numvirtchips = cfi->numchips * numparts;
521                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
522                 if (!newcfi)
523                         return -ENOMEM;
524                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
525                 if (!shared) {
526                         kfree(newcfi);
527                         return -ENOMEM;
528                 }
529                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
530                 newcfi->numchips = numvirtchips;
531                 newcfi->chipshift = partshift;
532
533                 chip = &newcfi->chips[0];
534                 for (i = 0; i < cfi->numchips; i++) {
535                         shared[i].writing = shared[i].erasing = NULL;
536                         spin_lock_init(&shared[i].lock);
537                         for (j = 0; j < numparts; j++) {
538                                 *chip = cfi->chips[i];
539                                 chip->start += j << partshift;
540                                 chip->priv = &shared[i];
541                                 /* those should be reset too since
542                                    they create memory references. */
543                                 init_waitqueue_head(&chip->wq);
544                                 spin_lock_init(&chip->_spinlock);
545                                 chip->mutex = &chip->_spinlock;
546                                 chip++;
547                         }
548                 }
549
550                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
551                                   "--> %d partitions of %d KiB\n",
552                                   map->name, cfi->numchips, cfi->interleave,
553                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
554
555                 map->fldrv_priv = newcfi;
556                 *pcfi = newcfi;
557                 kfree(cfi);
558         }
559
560         return 0;
561 }
562
563 /*
564  *  *********** CHIP ACCESS FUNCTIONS ***********
565  */
566
567 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
568 {
569         DECLARE_WAITQUEUE(wait, current);
570         struct cfi_private *cfi = map->fldrv_priv;
571         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
572         unsigned long timeo;
573         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
574
575  resettime:
576         timeo = jiffies + HZ;
577  retry:
578         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
579                 /*
580                  * OK. We have possibility for contension on the write/erase
581                  * operations which are global to the real chip and not per
582                  * partition.  So let's fight it over in the partition which
583                  * currently has authority on the operation.
584                  *
585                  * The rules are as follows:
586                  *
587                  * - any write operation must own shared->writing.
588                  *
589                  * - any erase operation must own _both_ shared->writing and
590                  *   shared->erasing.
591                  *
592                  * - contension arbitration is handled in the owner's context.
593                  *
594                  * The 'shared' struct can be read when its lock is taken.
595                  * However any writes to it can only be made when the current
596                  * owner's lock is also held.
597                  */
598                 struct flchip_shared *shared = chip->priv;
599                 struct flchip *contender;
600                 spin_lock(&shared->lock);
601                 contender = shared->writing;
602                 if (contender && contender != chip) {
603                         /*
604                          * The engine to perform desired operation on this
605                          * partition is already in use by someone else.
606                          * Let's fight over it in the context of the chip
607                          * currently using it.  If it is possible to suspend,
608                          * that other partition will do just that, otherwise
609                          * it'll happily send us to sleep.  In any case, when
610                          * get_chip returns success we're clear to go ahead.
611                          */
612                         int ret = spin_trylock(contender->mutex);
613                         spin_unlock(&shared->lock);
614                         if (!ret)
615                                 goto retry;
616                         spin_unlock(chip->mutex);
617                         ret = get_chip(map, contender, contender->start, mode);
618                         spin_lock(chip->mutex);
619                         if (ret) {
620                                 spin_unlock(contender->mutex);
621                                 return ret;
622                         }
623                         timeo = jiffies + HZ;
624                         spin_lock(&shared->lock);
625                 }
626
627                 /* We now own it */
628                 shared->writing = chip;
629                 if (mode == FL_ERASING)
630                         shared->erasing = chip;
631                 if (contender && contender != chip)
632                         spin_unlock(contender->mutex);
633                 spin_unlock(&shared->lock);
634         }
635
636         switch (chip->state) {
637
638         case FL_STATUS:
639                 for (;;) {
640                         status = map_read(map, adr);
641                         if (map_word_andequal(map, status, status_OK, status_OK))
642                                 break;
643
644                         /* At this point we're fine with write operations
645                            in other partitions as they don't conflict. */
646                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
647                                 break;
648
649                         if (time_after(jiffies, timeo)) {
650                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 
651                                        status.x[0]);
652                                 return -EIO;
653                         }
654                         spin_unlock(chip->mutex);
655                         cfi_udelay(1);
656                         spin_lock(chip->mutex);
657                         /* Someone else might have been playing with it. */
658                         goto retry;
659                 }
660                                 
661         case FL_READY:
662         case FL_CFI_QUERY:
663         case FL_JEDEC_QUERY:
664                 return 0;
665
666         case FL_ERASING:
667                 if (!cfip ||
668                     !(cfip->FeatureSupport & 2) ||
669                     !(mode == FL_READY || mode == FL_POINT ||
670                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
671                         goto sleep;
672
673
674                 /* Erase suspend */
675                 map_write(map, CMD(0xB0), adr);
676
677                 /* If the flash has finished erasing, then 'erase suspend'
678                  * appears to make some (28F320) flash devices switch to
679                  * 'read' mode.  Make sure that we switch to 'read status'
680                  * mode so we get the right data. --rmk
681                  */
682                 map_write(map, CMD(0x70), adr);
683                 chip->oldstate = FL_ERASING;
684                 chip->state = FL_ERASE_SUSPENDING;
685                 chip->erase_suspended = 1;
686                 for (;;) {
687                         status = map_read(map, adr);
688                         if (map_word_andequal(map, status, status_OK, status_OK))
689                                 break;
690
691                         if (time_after(jiffies, timeo)) {
692                                 /* Urgh. Resume and pretend we weren't here.  */
693                                 map_write(map, CMD(0xd0), adr);
694                                 /* Make sure we're in 'read status' mode if it had finished */
695                                 map_write(map, CMD(0x70), adr);
696                                 chip->state = FL_ERASING;
697                                 chip->oldstate = FL_READY;
698                                 printk(KERN_ERR "Chip not ready after erase "
699                                        "suspended: status = 0x%lx\n", status.x[0]);
700                                 return -EIO;
701                         }
702
703                         spin_unlock(chip->mutex);
704                         cfi_udelay(1);
705                         spin_lock(chip->mutex);
706                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
707                            So we can just loop here. */
708                 }
709                 chip->state = FL_STATUS;
710                 return 0;
711
712         case FL_XIP_WHILE_ERASING:
713                 if (mode != FL_READY && mode != FL_POINT &&
714                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
715                         goto sleep;
716                 chip->oldstate = chip->state;
717                 chip->state = FL_READY;
718                 return 0;
719
720         case FL_POINT:
721                 /* Only if there's no operation suspended... */
722                 if (mode == FL_READY && chip->oldstate == FL_READY)
723                         return 0;
724
725         default:
726         sleep:
727                 set_current_state(TASK_UNINTERRUPTIBLE);
728                 add_wait_queue(&chip->wq, &wait);
729                 spin_unlock(chip->mutex);
730                 schedule();
731                 remove_wait_queue(&chip->wq, &wait);
732                 spin_lock(chip->mutex);
733                 goto resettime;
734         }
735 }
736
737 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
738 {
739         struct cfi_private *cfi = map->fldrv_priv;
740
741         if (chip->priv) {
742                 struct flchip_shared *shared = chip->priv;
743                 spin_lock(&shared->lock);
744                 if (shared->writing == chip && chip->oldstate == FL_READY) {
745                         /* We own the ability to write, but we're done */
746                         shared->writing = shared->erasing;
747                         if (shared->writing && shared->writing != chip) {
748                                 /* give back ownership to who we loaned it from */
749                                 struct flchip *loaner = shared->writing;
750                                 spin_lock(loaner->mutex);
751                                 spin_unlock(&shared->lock);
752                                 spin_unlock(chip->mutex);
753                                 put_chip(map, loaner, loaner->start);
754                                 spin_lock(chip->mutex);
755                                 spin_unlock(loaner->mutex);
756                                 wake_up(&chip->wq);
757                                 return;
758                         }
759                         shared->erasing = NULL;
760                         shared->writing = NULL;
761                 } else if (shared->erasing == chip && shared->writing != chip) {
762                         /*
763                          * We own the ability to erase without the ability
764                          * to write, which means the erase was suspended
765                          * and some other partition is currently writing.
766                          * Don't let the switch below mess things up since
767                          * we don't have ownership to resume anything.
768                          */
769                         spin_unlock(&shared->lock);
770                         wake_up(&chip->wq);
771                         return;
772                 }
773                 spin_unlock(&shared->lock);
774         }
775
776         switch(chip->oldstate) {
777         case FL_ERASING:
778                 chip->state = chip->oldstate;
779                 /* What if one interleaved chip has finished and the 
780                    other hasn't? The old code would leave the finished
781                    one in READY mode. That's bad, and caused -EROFS 
782                    errors to be returned from do_erase_oneblock because
783                    that's the only bit it checked for at the time.
784                    As the state machine appears to explicitly allow 
785                    sending the 0x70 (Read Status) command to an erasing
786                    chip and expecting it to be ignored, that's what we 
787                    do. */
788                 map_write(map, CMD(0xd0), adr);
789                 map_write(map, CMD(0x70), adr);
790                 chip->oldstate = FL_READY;
791                 chip->state = FL_ERASING;
792                 break;
793
794         case FL_XIP_WHILE_ERASING:
795                 chip->state = chip->oldstate;
796                 chip->oldstate = FL_READY;
797                 break;
798
799         case FL_READY:
800         case FL_STATUS:
801         case FL_JEDEC_QUERY:
802                 /* We should really make set_vpp() count, rather than doing this */
803                 DISABLE_VPP(map);
804                 break;
805         default:
806                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
807         }
808         wake_up(&chip->wq);
809 }
810
811 #ifdef CONFIG_MTD_XIP
812
813 /*
814  * No interrupt what so ever can be serviced while the flash isn't in array
815  * mode.  This is ensured by the xip_disable() and xip_enable() functions
816  * enclosing any code path where the flash is known not to be in array mode.
817  * And within a XIP disabled code path, only functions marked with __xipram
818  * may be called and nothing else (it's a good thing to inspect generated
819  * assembly to make sure inline functions were actually inlined and that gcc
820  * didn't emit calls to its own support functions). Also configuring MTD CFI
821  * support to a single buswidth and a single interleave is also recommended.
822  * Note that not only IRQs are disabled but the preemption count is also
823  * increased to prevent other locking primitives (namely spin_unlock) from
824  * decrementing the preempt count to zero and scheduling the CPU away while
825  * not in array mode.
826  */
827
828 static void xip_disable(struct map_info *map, struct flchip *chip,
829                         unsigned long adr)
830 {
831         /* TODO: chips with no XIP use should ignore and return */
832         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
833         preempt_disable();
834         local_irq_disable();
835 }
836
837 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
838                                 unsigned long adr)
839 {
840         struct cfi_private *cfi = map->fldrv_priv;
841         if (chip->state != FL_POINT && chip->state != FL_READY) {
842                 map_write(map, CMD(0xff), adr);
843                 chip->state = FL_READY;
844         }
845         (void) map_read(map, adr);
846         asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
847         local_irq_enable();
848         preempt_enable();
849 }
850
851 /*
852  * When a delay is required for the flash operation to complete, the
853  * xip_udelay() function is polling for both the given timeout and pending
854  * (but still masked) hardware interrupts.  Whenever there is an interrupt
855  * pending then the flash erase or write operation is suspended, array mode
856  * restored and interrupts unmasked.  Task scheduling might also happen at that
857  * point.  The CPU eventually returns from the interrupt or the call to
858  * schedule() and the suspended flash operation is resumed for the remaining
859  * of the delay period.
860  *
861  * Warning: this function _will_ fool interrupt latency tracing tools.
862  */
863
864 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
865                                 unsigned long adr, int usec)
866 {
867         struct cfi_private *cfi = map->fldrv_priv;
868         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
869         map_word status, OK = CMD(0x80);
870         unsigned long suspended, start = xip_currtime();
871         flstate_t oldstate, newstate;
872
873         do {
874                 cpu_relax();
875                 if (xip_irqpending() && cfip &&
876                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
877                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
878                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
879                         /*
880                          * Let's suspend the erase or write operation when
881                          * supported.  Note that we currently don't try to
882                          * suspend interleaved chips if there is already
883                          * another operation suspended (imagine what happens
884                          * when one chip was already done with the current
885                          * operation while another chip suspended it, then
886                          * we resume the whole thing at once).  Yes, it
887                          * can happen!
888                          */
889                         map_write(map, CMD(0xb0), adr);
890                         map_write(map, CMD(0x70), adr);
891                         usec -= xip_elapsed_since(start);
892                         suspended = xip_currtime();
893                         do {
894                                 if (xip_elapsed_since(suspended) > 100000) {
895                                         /*
896                                          * The chip doesn't want to suspend
897                                          * after waiting for 100 msecs.
898                                          * This is a critical error but there
899                                          * is not much we can do here.
900                                          */
901                                         return;
902                                 }
903                                 status = map_read(map, adr);
904                         } while (!map_word_andequal(map, status, OK, OK));
905
906                         /* Suspend succeeded */
907                         oldstate = chip->state;
908                         if (oldstate == FL_ERASING) {
909                                 if (!map_word_bitsset(map, status, CMD(0x40)))
910                                         break;
911                                 newstate = FL_XIP_WHILE_ERASING;
912                                 chip->erase_suspended = 1;
913                         } else {
914                                 if (!map_word_bitsset(map, status, CMD(0x04)))
915                                         break;
916                                 newstate = FL_XIP_WHILE_WRITING;
917                                 chip->write_suspended = 1;
918                         }
919                         chip->state = newstate;
920                         map_write(map, CMD(0xff), adr);
921                         (void) map_read(map, adr);
922                         asm volatile (".rep 8; nop; .endr");
923                         local_irq_enable();
924                         preempt_enable();
925                         asm volatile (".rep 8; nop; .endr");
926                         cond_resched();
927
928                         /*
929                          * We're back.  However someone else might have
930                          * decided to go write to the chip if we are in
931                          * a suspended erase state.  If so let's wait
932                          * until it's done.
933                          */
934                         preempt_disable();
935                         while (chip->state != newstate) {
936                                 DECLARE_WAITQUEUE(wait, current);
937                                 set_current_state(TASK_UNINTERRUPTIBLE);
938                                 add_wait_queue(&chip->wq, &wait);
939                                 preempt_enable();
940                                 schedule();
941                                 remove_wait_queue(&chip->wq, &wait);
942                                 preempt_disable();
943                         }
944                         /* Disallow XIP again */
945                         local_irq_disable();
946
947                         /* Resume the write or erase operation */
948                         map_write(map, CMD(0xd0), adr);
949                         map_write(map, CMD(0x70), adr);
950                         chip->state = oldstate;
951                         start = xip_currtime();
952                 } else if (usec >= 1000000/HZ) {
953                         /*
954                          * Try to save on CPU power when waiting delay
955                          * is at least a system timer tick period.
956                          * No need to be extremely accurate here.
957                          */
958                         xip_cpu_idle();
959                 }
960                 status = map_read(map, adr);
961         } while (!map_word_andequal(map, status, OK, OK)
962                  && xip_elapsed_since(start) < usec);
963 }
964
965 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
966
967 /*
968  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
969  * the flash is actively programming or erasing since we have to poll for
970  * the operation to complete anyway.  We can't do that in a generic way with
971  * a XIP setup so do it before the actual flash operation in this case.
972  */
973 #undef INVALIDATE_CACHED_RANGE
974 #define INVALIDATE_CACHED_RANGE(x...)
975 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
976         do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
977
978 /*
979  * Extra notes:
980  *
981  * Activating this XIP support changes the way the code works a bit.  For
982  * example the code to suspend the current process when concurrent access
983  * happens is never executed because xip_udelay() will always return with the
984  * same chip state as it was entered with.  This is why there is no care for
985  * the presence of add_wait_queue() or schedule() calls from within a couple
986  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
987  * The queueing and scheduling are always happening within xip_udelay().
988  *
989  * Similarly, get_chip() and put_chip() just happen to always be executed
990  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
991  * is in array mode, therefore never executing many cases therein and not
992  * causing any problem with XIP.
993  */
994
995 #else
996
997 #define xip_disable(map, chip, adr)
998 #define xip_enable(map, chip, adr)
999
1000 #define UDELAY(map, chip, adr, usec)  cfi_udelay(usec)
1001
1002 #define XIP_INVAL_CACHED_RANGE(x...)
1003
1004 #endif
1005
1006 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1007 {
1008         unsigned long cmd_addr;
1009         struct cfi_private *cfi = map->fldrv_priv;
1010         int ret = 0;
1011
1012         adr += chip->start;
1013
1014         /* Ensure cmd read/writes are aligned. */ 
1015         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1016
1017         spin_lock(chip->mutex);
1018
1019         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1020
1021         if (!ret) {
1022                 if (chip->state != FL_POINT && chip->state != FL_READY)
1023                         map_write(map, CMD(0xff), cmd_addr);
1024
1025                 chip->state = FL_POINT;
1026                 chip->ref_point_counter++;
1027         }
1028         spin_unlock(chip->mutex);
1029
1030         return ret;
1031 }
1032
1033 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1034 {
1035         struct map_info *map = mtd->priv;
1036         struct cfi_private *cfi = map->fldrv_priv;
1037         unsigned long ofs;
1038         int chipnum;
1039         int ret = 0;
1040
1041         if (!map->virt || (from + len > mtd->size))
1042                 return -EINVAL;
1043         
1044         *mtdbuf = (void *)map->virt + from;
1045         *retlen = 0;
1046
1047         /* Now lock the chip(s) to POINT state */
1048
1049         /* ofs: offset within the first chip that the first read should start */
1050         chipnum = (from >> cfi->chipshift);
1051         ofs = from - (chipnum << cfi->chipshift);
1052
1053         while (len) {
1054                 unsigned long thislen;
1055
1056                 if (chipnum >= cfi->numchips)
1057                         break;
1058
1059                 if ((len + ofs -1) >> cfi->chipshift)
1060                         thislen = (1<<cfi->chipshift) - ofs;
1061                 else
1062                         thislen = len;
1063
1064                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1065                 if (ret)
1066                         break;
1067
1068                 *retlen += thislen;
1069                 len -= thislen;
1070                 
1071                 ofs = 0;
1072                 chipnum++;
1073         }
1074         return 0;
1075 }
1076
1077 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1078 {
1079         struct map_info *map = mtd->priv;
1080         struct cfi_private *cfi = map->fldrv_priv;
1081         unsigned long ofs;
1082         int chipnum;
1083
1084         /* Now unlock the chip(s) POINT state */
1085
1086         /* ofs: offset within the first chip that the first read should start */
1087         chipnum = (from >> cfi->chipshift);
1088         ofs = from - (chipnum <<  cfi->chipshift);
1089
1090         while (len) {
1091                 unsigned long thislen;
1092                 struct flchip *chip;
1093
1094                 chip = &cfi->chips[chipnum];
1095                 if (chipnum >= cfi->numchips)
1096                         break;
1097
1098                 if ((len + ofs -1) >> cfi->chipshift)
1099                         thislen = (1<<cfi->chipshift) - ofs;
1100                 else
1101                         thislen = len;
1102
1103                 spin_lock(chip->mutex);
1104                 if (chip->state == FL_POINT) {
1105                         chip->ref_point_counter--;
1106                         if(chip->ref_point_counter == 0)
1107                                 chip->state = FL_READY;
1108                 } else
1109                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1110
1111                 put_chip(map, chip, chip->start);
1112                 spin_unlock(chip->mutex);
1113
1114                 len -= thislen;
1115                 ofs = 0;
1116                 chipnum++;
1117         }
1118 }
1119
1120 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1121 {
1122         unsigned long cmd_addr;
1123         struct cfi_private *cfi = map->fldrv_priv;
1124         int ret;
1125
1126         adr += chip->start;
1127
1128         /* Ensure cmd read/writes are aligned. */ 
1129         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1130
1131         spin_lock(chip->mutex);
1132         ret = get_chip(map, chip, cmd_addr, FL_READY);
1133         if (ret) {
1134                 spin_unlock(chip->mutex);
1135                 return ret;
1136         }
1137
1138         if (chip->state != FL_POINT && chip->state != FL_READY) {
1139                 map_write(map, CMD(0xff), cmd_addr);
1140
1141                 chip->state = FL_READY;
1142         }
1143
1144         map_copy_from(map, buf, adr, len);
1145
1146         put_chip(map, chip, cmd_addr);
1147
1148         spin_unlock(chip->mutex);
1149         return 0;
1150 }
1151
1152 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1153 {
1154         struct map_info *map = mtd->priv;
1155         struct cfi_private *cfi = map->fldrv_priv;
1156         unsigned long ofs;
1157         int chipnum;
1158         int ret = 0;
1159
1160         /* ofs: offset within the first chip that the first read should start */
1161         chipnum = (from >> cfi->chipshift);
1162         ofs = from - (chipnum <<  cfi->chipshift);
1163
1164         *retlen = 0;
1165
1166         while (len) {
1167                 unsigned long thislen;
1168
1169                 if (chipnum >= cfi->numchips)
1170                         break;
1171
1172                 if ((len + ofs -1) >> cfi->chipshift)
1173                         thislen = (1<<cfi->chipshift) - ofs;
1174                 else
1175                         thislen = len;
1176
1177                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1178                 if (ret)
1179                         break;
1180
1181                 *retlen += thislen;
1182                 len -= thislen;
1183                 buf += thislen;
1184                 
1185                 ofs = 0;
1186                 chipnum++;
1187         }
1188         return ret;
1189 }
1190
1191 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1192                                      unsigned long adr, map_word datum, int mode)
1193 {
1194         struct cfi_private *cfi = map->fldrv_priv;
1195         map_word status, status_OK, write_cmd;
1196         unsigned long timeo;
1197         int z, ret=0;
1198
1199         adr += chip->start;
1200
1201         /* Let's determine this according to the interleave only once */
1202         status_OK = CMD(0x80);
1203         switch (mode) {
1204         case FL_WRITING:   write_cmd = CMD(0x40); break;
1205         case FL_OTP_WRITE: write_cmd = CMD(0xc0); break;
1206         default: return -EINVAL;
1207         }
1208
1209         spin_lock(chip->mutex);
1210         ret = get_chip(map, chip, adr, mode);
1211         if (ret) {
1212                 spin_unlock(chip->mutex);
1213                 return ret;
1214         }
1215
1216         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1217         ENABLE_VPP(map);
1218         xip_disable(map, chip, adr);
1219         map_write(map, write_cmd, adr);
1220         map_write(map, datum, adr);
1221         chip->state = mode;
1222
1223         spin_unlock(chip->mutex);
1224         INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1225         UDELAY(map, chip, adr, chip->word_write_time);
1226         spin_lock(chip->mutex);
1227
1228         timeo = jiffies + (HZ/2);
1229         z = 0;
1230         for (;;) {
1231                 if (chip->state != mode) {
1232                         /* Someone's suspended the write. Sleep */
1233                         DECLARE_WAITQUEUE(wait, current);
1234
1235                         set_current_state(TASK_UNINTERRUPTIBLE);
1236                         add_wait_queue(&chip->wq, &wait);
1237                         spin_unlock(chip->mutex);
1238                         schedule();
1239                         remove_wait_queue(&chip->wq, &wait);
1240                         timeo = jiffies + (HZ / 2); /* FIXME */
1241                         spin_lock(chip->mutex);
1242                         continue;
1243                 }
1244
1245                 status = map_read(map, adr);
1246                 if (map_word_andequal(map, status, status_OK, status_OK))
1247                         break;
1248                 
1249                 /* OK Still waiting */
1250                 if (time_after(jiffies, timeo)) {
1251                         chip->state = FL_STATUS;
1252                         xip_enable(map, chip, adr);
1253                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1254                         ret = -EIO;
1255                         goto out;
1256                 }
1257
1258                 /* Latency issues. Drop the lock, wait a while and retry */
1259                 spin_unlock(chip->mutex);
1260                 z++;
1261                 UDELAY(map, chip, adr, 1);
1262                 spin_lock(chip->mutex);
1263         }
1264         if (!z) {
1265                 chip->word_write_time--;
1266                 if (!chip->word_write_time)
1267                         chip->word_write_time++;
1268         }
1269         if (z > 1) 
1270                 chip->word_write_time++;
1271
1272         /* Done and happy. */
1273         chip->state = FL_STATUS;
1274
1275         /* check for lock bit */
1276         if (map_word_bitsset(map, status, CMD(0x02))) {
1277                 /* clear status */
1278                 map_write(map, CMD(0x50), adr);
1279                 /* put back into read status register mode */
1280                 map_write(map, CMD(0x70), adr);
1281                 ret = -EROFS;
1282         }
1283
1284         xip_enable(map, chip, adr);
1285  out:   put_chip(map, chip, adr);
1286         spin_unlock(chip->mutex);
1287
1288         return ret;
1289 }
1290
1291
1292 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1293 {
1294         struct map_info *map = mtd->priv;
1295         struct cfi_private *cfi = map->fldrv_priv;
1296         int ret = 0;
1297         int chipnum;
1298         unsigned long ofs;
1299
1300         *retlen = 0;
1301         if (!len)
1302                 return 0;
1303
1304         chipnum = to >> cfi->chipshift;
1305         ofs = to  - (chipnum << cfi->chipshift);
1306
1307         /* If it's not bus-aligned, do the first byte write */
1308         if (ofs & (map_bankwidth(map)-1)) {
1309                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1310                 int gap = ofs - bus_ofs;
1311                 int n;
1312                 map_word datum;
1313
1314                 n = min_t(int, len, map_bankwidth(map)-gap);
1315                 datum = map_word_ff(map);
1316                 datum = map_word_load_partial(map, datum, buf, gap, n);
1317
1318                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1319                                                bus_ofs, datum, FL_WRITING);
1320                 if (ret) 
1321                         return ret;
1322
1323                 len -= n;
1324                 ofs += n;
1325                 buf += n;
1326                 (*retlen) += n;
1327
1328                 if (ofs >> cfi->chipshift) {
1329                         chipnum ++; 
1330                         ofs = 0;
1331                         if (chipnum == cfi->numchips)
1332                                 return 0;
1333                 }
1334         }
1335         
1336         while(len >= map_bankwidth(map)) {
1337                 map_word datum = map_word_load(map, buf);
1338
1339                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1340                                        ofs, datum, FL_WRITING);
1341                 if (ret)
1342                         return ret;
1343
1344                 ofs += map_bankwidth(map);
1345                 buf += map_bankwidth(map);
1346                 (*retlen) += map_bankwidth(map);
1347                 len -= map_bankwidth(map);
1348
1349                 if (ofs >> cfi->chipshift) {
1350                         chipnum ++; 
1351                         ofs = 0;
1352                         if (chipnum == cfi->numchips)
1353                                 return 0;
1354                 }
1355         }
1356
1357         if (len & (map_bankwidth(map)-1)) {
1358                 map_word datum;
1359
1360                 datum = map_word_ff(map);
1361                 datum = map_word_load_partial(map, datum, buf, 0, len);
1362
1363                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1364                                        ofs, datum, FL_WRITING);
1365                 if (ret) 
1366                         return ret;
1367                 
1368                 (*retlen) += len;
1369         }
1370
1371         return 0;
1372 }
1373
1374
1375 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 
1376                                     unsigned long adr, const u_char *buf, int len)
1377 {
1378         struct cfi_private *cfi = map->fldrv_priv;
1379         map_word status, status_OK;
1380         unsigned long cmd_adr, timeo;
1381         int wbufsize, z, ret=0, bytes, words;
1382
1383         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1384         adr += chip->start;
1385         cmd_adr = adr & ~(wbufsize-1);
1386         
1387         /* Let's determine this according to the interleave only once */
1388         status_OK = CMD(0x80);
1389
1390         spin_lock(chip->mutex);
1391         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1392         if (ret) {
1393                 spin_unlock(chip->mutex);
1394                 return ret;
1395         }
1396
1397         XIP_INVAL_CACHED_RANGE(map, adr, len);
1398         ENABLE_VPP(map);
1399         xip_disable(map, chip, cmd_adr);
1400
1401         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1402            [...], the device will not accept any more Write to Buffer commands". 
1403            So we must check here and reset those bits if they're set. Otherwise
1404            we're just pissing in the wind */
1405         if (chip->state != FL_STATUS)
1406                 map_write(map, CMD(0x70), cmd_adr);
1407         status = map_read(map, cmd_adr);
1408         if (map_word_bitsset(map, status, CMD(0x30))) {
1409                 xip_enable(map, chip, cmd_adr);
1410                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1411                 xip_disable(map, chip, cmd_adr);
1412                 map_write(map, CMD(0x50), cmd_adr);
1413                 map_write(map, CMD(0x70), cmd_adr);
1414         }
1415
1416         chip->state = FL_WRITING_TO_BUFFER;
1417
1418         z = 0;
1419         for (;;) {
1420                 map_write(map, CMD(0xe8), cmd_adr);
1421
1422                 status = map_read(map, cmd_adr);
1423                 if (map_word_andequal(map, status, status_OK, status_OK))
1424                         break;
1425
1426                 spin_unlock(chip->mutex);
1427                 UDELAY(map, chip, cmd_adr, 1);
1428                 spin_lock(chip->mutex);
1429
1430                 if (++z > 20) {
1431                         /* Argh. Not ready for write to buffer */
1432                         map_word Xstatus;
1433                         map_write(map, CMD(0x70), cmd_adr);
1434                         chip->state = FL_STATUS;
1435                         Xstatus = map_read(map, cmd_adr);
1436                         /* Odd. Clear status bits */
1437                         map_write(map, CMD(0x50), cmd_adr);
1438                         map_write(map, CMD(0x70), cmd_adr);
1439                         xip_enable(map, chip, cmd_adr);
1440                         printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1441                                status.x[0], Xstatus.x[0]);
1442                         ret = -EIO;
1443                         goto out;
1444                 }
1445         }
1446
1447         /* Write length of data to come */
1448         bytes = len & (map_bankwidth(map)-1);
1449         words = len / map_bankwidth(map);
1450         map_write(map, CMD(words - !bytes), cmd_adr );
1451
1452         /* Write data */
1453         z = 0;
1454         while(z < words * map_bankwidth(map)) {
1455                 map_word datum = map_word_load(map, buf);
1456                 map_write(map, datum, adr+z);
1457
1458                 z += map_bankwidth(map);
1459                 buf += map_bankwidth(map);
1460         }
1461
1462         if (bytes) {
1463                 map_word datum;
1464
1465                 datum = map_word_ff(map);
1466                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1467                 map_write(map, datum, adr+z);
1468         }
1469
1470         /* GO GO GO */
1471         map_write(map, CMD(0xd0), cmd_adr);
1472         chip->state = FL_WRITING;
1473
1474         spin_unlock(chip->mutex);
1475         INVALIDATE_CACHED_RANGE(map, adr, len);
1476         UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1477         spin_lock(chip->mutex);
1478
1479         timeo = jiffies + (HZ/2);
1480         z = 0;
1481         for (;;) {
1482                 if (chip->state != FL_WRITING) {
1483                         /* Someone's suspended the write. Sleep */
1484                         DECLARE_WAITQUEUE(wait, current);
1485                         set_current_state(TASK_UNINTERRUPTIBLE);
1486                         add_wait_queue(&chip->wq, &wait);
1487                         spin_unlock(chip->mutex);
1488                         schedule();
1489                         remove_wait_queue(&chip->wq, &wait);
1490                         timeo = jiffies + (HZ / 2); /* FIXME */
1491                         spin_lock(chip->mutex);
1492                         continue;
1493                 }
1494
1495                 status = map_read(map, cmd_adr);
1496                 if (map_word_andequal(map, status, status_OK, status_OK))
1497                         break;
1498
1499                 /* OK Still waiting */
1500                 if (time_after(jiffies, timeo)) {
1501                         chip->state = FL_STATUS;
1502                         xip_enable(map, chip, cmd_adr);
1503                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1504                         ret = -EIO;
1505                         goto out;
1506                 }
1507                 
1508                 /* Latency issues. Drop the lock, wait a while and retry */
1509                 spin_unlock(chip->mutex);
1510                 UDELAY(map, chip, cmd_adr, 1);
1511                 z++;
1512                 spin_lock(chip->mutex);
1513         }
1514         if (!z) {
1515                 chip->buffer_write_time--;
1516                 if (!chip->buffer_write_time)
1517                         chip->buffer_write_time++;
1518         }
1519         if (z > 1) 
1520                 chip->buffer_write_time++;
1521
1522         /* Done and happy. */
1523         chip->state = FL_STATUS;
1524
1525         /* check for lock bit */
1526         if (map_word_bitsset(map, status, CMD(0x02))) {
1527                 /* clear status */
1528                 map_write(map, CMD(0x50), cmd_adr);
1529                 /* put back into read status register mode */
1530                 map_write(map, CMD(0x70), adr);
1531                 ret = -EROFS;
1532         }
1533
1534         xip_enable(map, chip, cmd_adr);
1535  out:   put_chip(map, chip, cmd_adr);
1536         spin_unlock(chip->mutex);
1537         return ret;
1538 }
1539
1540 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1541                                        size_t len, size_t *retlen, const u_char *buf)
1542 {
1543         struct map_info *map = mtd->priv;
1544         struct cfi_private *cfi = map->fldrv_priv;
1545         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1546         int ret = 0;
1547         int chipnum;
1548         unsigned long ofs;
1549
1550         *retlen = 0;
1551         if (!len)
1552                 return 0;
1553
1554         chipnum = to >> cfi->chipshift;
1555         ofs = to  - (chipnum << cfi->chipshift);
1556
1557         /* If it's not bus-aligned, do the first word write */
1558         if (ofs & (map_bankwidth(map)-1)) {
1559                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1560                 if (local_len > len)
1561                         local_len = len;
1562                 ret = cfi_intelext_write_words(mtd, to, local_len,
1563                                                retlen, buf);
1564                 if (ret)
1565                         return ret;
1566                 ofs += local_len;
1567                 buf += local_len;
1568                 len -= local_len;
1569
1570                 if (ofs >> cfi->chipshift) {
1571                         chipnum ++;
1572                         ofs = 0;
1573                         if (chipnum == cfi->numchips)
1574                                 return 0;
1575                 }
1576         }
1577
1578         while(len) {
1579                 /* We must not cross write block boundaries */
1580                 int size = wbufsize - (ofs & (wbufsize-1));
1581
1582                 if (size > len)
1583                         size = len;
1584                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1585                                       ofs, buf, size);
1586                 if (ret)
1587                         return ret;
1588
1589                 ofs += size;
1590                 buf += size;
1591                 (*retlen) += size;
1592                 len -= size;
1593
1594                 if (ofs >> cfi->chipshift) {
1595                         chipnum ++; 
1596                         ofs = 0;
1597                         if (chipnum == cfi->numchips)
1598                                 return 0;
1599                 }
1600         }
1601         return 0;
1602 }
1603
1604 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1605                                       unsigned long adr, int len, void *thunk)
1606 {
1607         struct cfi_private *cfi = map->fldrv_priv;
1608         map_word status, status_OK;
1609         unsigned long timeo;
1610         int retries = 3;
1611         DECLARE_WAITQUEUE(wait, current);
1612         int ret = 0;
1613
1614         adr += chip->start;
1615
1616         /* Let's determine this according to the interleave only once */
1617         status_OK = CMD(0x80);
1618
1619  retry:
1620         spin_lock(chip->mutex);
1621         ret = get_chip(map, chip, adr, FL_ERASING);
1622         if (ret) {
1623                 spin_unlock(chip->mutex);
1624                 return ret;
1625         }
1626
1627         XIP_INVAL_CACHED_RANGE(map, adr, len);
1628         ENABLE_VPP(map);
1629         xip_disable(map, chip, adr);
1630
1631         /* Clear the status register first */
1632         map_write(map, CMD(0x50), adr);
1633
1634         /* Now erase */
1635         map_write(map, CMD(0x20), adr);
1636         map_write(map, CMD(0xD0), adr);
1637         chip->state = FL_ERASING;
1638         chip->erase_suspended = 0;
1639
1640         spin_unlock(chip->mutex);
1641         INVALIDATE_CACHED_RANGE(map, adr, len);
1642         UDELAY(map, chip, adr, chip->erase_time*1000/2);
1643         spin_lock(chip->mutex);
1644
1645         /* FIXME. Use a timer to check this, and return immediately. */
1646         /* Once the state machine's known to be working I'll do that */
1647
1648         timeo = jiffies + (HZ*20);
1649         for (;;) {
1650                 if (chip->state != FL_ERASING) {
1651                         /* Someone's suspended the erase. Sleep */
1652                         set_current_state(TASK_UNINTERRUPTIBLE);
1653                         add_wait_queue(&chip->wq, &wait);
1654                         spin_unlock(chip->mutex);
1655                         schedule();
1656                         remove_wait_queue(&chip->wq, &wait);
1657                         spin_lock(chip->mutex);
1658                         continue;
1659                 }
1660                 if (chip->erase_suspended) {
1661                         /* This erase was suspended and resumed.
1662                            Adjust the timeout */
1663                         timeo = jiffies + (HZ*20); /* FIXME */
1664                         chip->erase_suspended = 0;
1665                 }
1666
1667                 status = map_read(map, adr);
1668                 if (map_word_andequal(map, status, status_OK, status_OK))
1669                         break;
1670                 
1671                 /* OK Still waiting */
1672                 if (time_after(jiffies, timeo)) {
1673                         map_word Xstatus;
1674                         map_write(map, CMD(0x70), adr);
1675                         chip->state = FL_STATUS;
1676                         Xstatus = map_read(map, adr);
1677                         /* Clear status bits */
1678                         map_write(map, CMD(0x50), adr);
1679                         map_write(map, CMD(0x70), adr);
1680                         xip_enable(map, chip, adr);
1681                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1682                                adr, status.x[0], Xstatus.x[0]);
1683                         ret = -EIO;
1684                         goto out;
1685                 }
1686                 
1687                 /* Latency issues. Drop the lock, wait a while and retry */
1688                 spin_unlock(chip->mutex);
1689                 UDELAY(map, chip, adr, 1000000/HZ);
1690                 spin_lock(chip->mutex);
1691         }
1692
1693         /* We've broken this before. It doesn't hurt to be safe */
1694         map_write(map, CMD(0x70), adr);
1695         chip->state = FL_STATUS;
1696         status = map_read(map, adr);
1697
1698         /* check for lock bit */
1699         if (map_word_bitsset(map, status, CMD(0x3a))) {
1700                 unsigned char chipstatus;
1701
1702                 /* Reset the error bits */
1703                 map_write(map, CMD(0x50), adr);
1704                 map_write(map, CMD(0x70), adr);
1705                 xip_enable(map, chip, adr);
1706
1707                 chipstatus = status.x[0];
1708                 if (!map_word_equal(map, status, CMD(chipstatus))) {
1709                         int i, w;
1710                         for (w=0; w<map_words(map); w++) {
1711                                 for (i = 0; i<cfi_interleave(cfi); i++) {
1712                                         chipstatus |= status.x[w] >> (cfi->device_type * 8);
1713                                 }
1714                         }
1715                         printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1716                                status.x[0], chipstatus);
1717                 }
1718
1719                 if ((chipstatus & 0x30) == 0x30) {
1720                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1721                         ret = -EIO;
1722                 } else if (chipstatus & 0x02) {
1723                         /* Protection bit set */
1724                         ret = -EROFS;
1725                 } else if (chipstatus & 0x8) {
1726                         /* Voltage */
1727                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1728                         ret = -EIO;
1729                 } else if (chipstatus & 0x20) {
1730                         if (retries--) {
1731                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1732                                 timeo = jiffies + HZ;
1733                                 put_chip(map, chip, adr);
1734                                 spin_unlock(chip->mutex);
1735                                 goto retry;
1736                         }
1737                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1738                         ret = -EIO;
1739                 }
1740         } else {
1741                 xip_enable(map, chip, adr);
1742                 ret = 0;
1743         }
1744
1745  out:   put_chip(map, chip, adr);
1746         spin_unlock(chip->mutex);
1747         return ret;
1748 }
1749
1750 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1751 {
1752         unsigned long ofs, len;
1753         int ret;
1754
1755         ofs = instr->addr;
1756         len = instr->len;
1757
1758         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1759         if (ret)
1760                 return ret;
1761
1762         instr->state = MTD_ERASE_DONE;
1763         mtd_erase_callback(instr);
1764         
1765         return 0;
1766 }
1767
1768 static void cfi_intelext_sync (struct mtd_info *mtd)
1769 {
1770         struct map_info *map = mtd->priv;
1771         struct cfi_private *cfi = map->fldrv_priv;
1772         int i;
1773         struct flchip *chip;
1774         int ret = 0;
1775
1776         for (i=0; !ret && i<cfi->numchips; i++) {
1777                 chip = &cfi->chips[i];
1778
1779                 spin_lock(chip->mutex);
1780                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1781
1782                 if (!ret) {
1783                         chip->oldstate = chip->state;
1784                         chip->state = FL_SYNCING;
1785                         /* No need to wake_up() on this state change - 
1786                          * as the whole point is that nobody can do anything
1787                          * with the chip now anyway.
1788                          */
1789                 }
1790                 spin_unlock(chip->mutex);
1791         }
1792
1793         /* Unlock the chips again */
1794
1795         for (i--; i >=0; i--) {
1796                 chip = &cfi->chips[i];
1797
1798                 spin_lock(chip->mutex);
1799                 
1800                 if (chip->state == FL_SYNCING) {
1801                         chip->state = chip->oldstate;
1802                         wake_up(&chip->wq);
1803                 }
1804                 spin_unlock(chip->mutex);
1805         }
1806 }
1807
1808 #ifdef DEBUG_LOCK_BITS
1809 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1810                                                 struct flchip *chip,
1811                                                 unsigned long adr,
1812                                                 int len, void *thunk)
1813 {
1814         struct cfi_private *cfi = map->fldrv_priv;
1815         int status, ofs_factor = cfi->interleave * cfi->device_type;
1816
1817         xip_disable(map, chip, adr+(2*ofs_factor));
1818         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1819         chip->state = FL_JEDEC_QUERY;
1820         status = cfi_read_query(map, adr+(2*ofs_factor));
1821         xip_enable(map, chip, 0);
1822         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1823                adr, status);
1824         return 0;
1825 }
1826 #endif
1827
1828 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1829 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1830
1831 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1832                                        unsigned long adr, int len, void *thunk)
1833 {
1834         struct cfi_private *cfi = map->fldrv_priv;
1835         map_word status, status_OK;
1836         unsigned long timeo = jiffies + HZ;
1837         int ret;
1838
1839         adr += chip->start;
1840
1841         /* Let's determine this according to the interleave only once */
1842         status_OK = CMD(0x80);
1843
1844         spin_lock(chip->mutex);
1845         ret = get_chip(map, chip, adr, FL_LOCKING);
1846         if (ret) {
1847                 spin_unlock(chip->mutex);
1848                 return ret;
1849         }
1850
1851         ENABLE_VPP(map);
1852         xip_disable(map, chip, adr);
1853         
1854         map_write(map, CMD(0x60), adr);
1855         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1856                 map_write(map, CMD(0x01), adr);
1857                 chip->state = FL_LOCKING;
1858         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1859                 map_write(map, CMD(0xD0), adr);
1860                 chip->state = FL_UNLOCKING;
1861         } else
1862                 BUG();
1863
1864         spin_unlock(chip->mutex);
1865         UDELAY(map, chip, adr, 1000000/HZ);
1866         spin_lock(chip->mutex);
1867
1868         /* FIXME. Use a timer to check this, and return immediately. */
1869         /* Once the state machine's known to be working I'll do that */
1870
1871         timeo = jiffies + (HZ*20);
1872         for (;;) {
1873
1874                 status = map_read(map, adr);
1875                 if (map_word_andequal(map, status, status_OK, status_OK))
1876                         break;
1877                 
1878                 /* OK Still waiting */
1879                 if (time_after(jiffies, timeo)) {
1880                         map_word Xstatus;
1881                         map_write(map, CMD(0x70), adr);
1882                         chip->state = FL_STATUS;
1883                         Xstatus = map_read(map, adr);
1884                         xip_enable(map, chip, adr);
1885                         printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1886                                status.x[0], Xstatus.x[0]);
1887                         put_chip(map, chip, adr);
1888                         spin_unlock(chip->mutex);
1889                         return -EIO;
1890                 }
1891                 
1892                 /* Latency issues. Drop the lock, wait a while and retry */
1893                 spin_unlock(chip->mutex);
1894                 UDELAY(map, chip, adr, 1);
1895                 spin_lock(chip->mutex);
1896         }
1897         
1898         /* Done and happy. */
1899         chip->state = FL_STATUS;
1900         xip_enable(map, chip, adr);
1901         put_chip(map, chip, adr);
1902         spin_unlock(chip->mutex);
1903         return 0;
1904 }
1905
1906 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1907 {
1908         int ret;
1909
1910 #ifdef DEBUG_LOCK_BITS
1911         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1912                __FUNCTION__, ofs, len);
1913         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1914                 ofs, len, 0);
1915 #endif
1916
1917         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 
1918                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1919         
1920 #ifdef DEBUG_LOCK_BITS
1921         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1922                __FUNCTION__, ret);
1923         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1924                 ofs, len, 0);
1925 #endif
1926
1927         return ret;
1928 }
1929
1930 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1931 {
1932         int ret;
1933
1934 #ifdef DEBUG_LOCK_BITS
1935         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1936                __FUNCTION__, ofs, len);
1937         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1938                 ofs, len, 0);
1939 #endif
1940
1941         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1942                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1943         
1944 #ifdef DEBUG_LOCK_BITS
1945         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1946                __FUNCTION__, ret);
1947         cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 
1948                 ofs, len, 0);
1949 #endif
1950         
1951         return ret;
1952 }
1953
1954 #ifdef CONFIG_MTD_OTP
1955
1956 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, 
1957                         u_long data_offset, u_char *buf, u_int size,
1958                         u_long prot_offset, u_int groupno, u_int groupsize);
1959
1960 static int __xipram
1961 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1962             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1963 {
1964         struct cfi_private *cfi = map->fldrv_priv;
1965         int ret;
1966
1967         spin_lock(chip->mutex);
1968         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1969         if (ret) {
1970                 spin_unlock(chip->mutex);
1971                 return ret;
1972         }
1973
1974         /* let's ensure we're not reading back cached data from array mode */
1975         if (map->inval_cache)
1976                 map->inval_cache(map, chip->start + offset, size);
1977
1978         xip_disable(map, chip, chip->start);
1979         if (chip->state != FL_JEDEC_QUERY) {
1980                 map_write(map, CMD(0x90), chip->start);
1981                 chip->state = FL_JEDEC_QUERY;
1982         }
1983         map_copy_from(map, buf, chip->start + offset, size);
1984         xip_enable(map, chip, chip->start);
1985
1986         /* then ensure we don't keep OTP data in the cache */
1987         if (map->inval_cache)
1988                 map->inval_cache(map, chip->start + offset, size);
1989
1990         put_chip(map, chip, chip->start);
1991         spin_unlock(chip->mutex);
1992         return 0;
1993 }
1994
1995 static int
1996 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1997              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1998 {
1999         int ret;
2000
2001         while (size) {
2002                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2003                 int gap = offset - bus_ofs;
2004                 int n = min_t(int, size, map_bankwidth(map)-gap);
2005                 map_word datum = map_word_ff(map);
2006
2007                 datum = map_word_load_partial(map, datum, buf, gap, n);
2008                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2009                 if (ret) 
2010                         return ret;
2011
2012                 offset += n;
2013                 buf += n;
2014                 size -= n;
2015         }
2016
2017         return 0;
2018 }
2019
2020 static int
2021 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2022             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2023 {
2024         struct cfi_private *cfi = map->fldrv_priv;
2025         map_word datum;
2026
2027         /* make sure area matches group boundaries */
2028         if (size != grpsz)
2029                 return -EXDEV;
2030
2031         datum = map_word_ff(map);
2032         datum = map_word_clr(map, datum, CMD(1 << grpno));
2033         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2034 }
2035
2036 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2037                                  size_t *retlen, u_char *buf,
2038                                  otp_op_t action, int user_regs)
2039 {
2040         struct map_info *map = mtd->priv;
2041         struct cfi_private *cfi = map->fldrv_priv;
2042         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2043         struct flchip *chip;
2044         struct cfi_intelext_otpinfo *otp;
2045         u_long devsize, reg_prot_offset, data_offset;
2046         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2047         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2048         int ret;
2049
2050         *retlen = 0;
2051
2052         /* Check that we actually have some OTP registers */
2053         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2054                 return -ENODATA;
2055
2056         /* we need real chips here not virtual ones */
2057         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2058         chip_step = devsize >> cfi->chipshift;
2059
2060         for (chip_num = 0; chip_num < cfi->numchips; chip_num += chip_step) {
2061                 chip = &cfi->chips[chip_num];
2062                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2063
2064                 /* first OTP region */
2065                 field = 0;
2066                 reg_prot_offset = extp->ProtRegAddr;
2067                 reg_fact_groups = 1;
2068                 reg_fact_size = 1 << extp->FactProtRegSize;
2069                 reg_user_groups = 1;
2070                 reg_user_size = 1 << extp->UserProtRegSize;
2071
2072                 while (len > 0) {
2073                         /* flash geometry fixup */
2074                         data_offset = reg_prot_offset + 1;
2075                         data_offset *= cfi->interleave * cfi->device_type;
2076                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2077                         reg_fact_size *= cfi->interleave;
2078                         reg_user_size *= cfi->interleave;
2079
2080                         if (user_regs) {
2081                                 groups = reg_user_groups;
2082                                 groupsize = reg_user_size;
2083                                 /* skip over factory reg area */
2084                                 groupno = reg_fact_groups;
2085                                 data_offset += reg_fact_groups * reg_fact_size;
2086                         } else {
2087                                 groups = reg_fact_groups;
2088                                 groupsize = reg_fact_size;
2089                                 groupno = 0;
2090                         }
2091
2092                         while (len > 0 && groups > 0) {
2093                                 if (!action) {
2094                                         /*
2095                                          * Special case: if action is NULL
2096                                          * we fill buf with otp_info records.
2097                                          */
2098                                         struct otp_info *otpinfo;
2099                                         map_word lockword;
2100                                         len -= sizeof(struct otp_info);
2101                                         if (len <= 0)
2102                                                 return -ENOSPC;
2103                                         ret = do_otp_read(map, chip,
2104                                                           reg_prot_offset,
2105                                                           (u_char *)&lockword,
2106                                                           map_bankwidth(map),
2107                                                           0, 0,  0);
2108                                         if (ret)
2109                                                 return ret;
2110                                         otpinfo = (struct otp_info *)buf;
2111                                         otpinfo->start = from;
2112                                         otpinfo->length = groupsize;
2113                                         otpinfo->locked =
2114                                            !map_word_bitsset(map, lockword,
2115                                                              CMD(1 << groupno));
2116                                         from += groupsize;
2117                                         buf += sizeof(*otpinfo);
2118                                         *retlen += sizeof(*otpinfo);
2119                                 } else if (from >= groupsize) {
2120                                         from -= groupsize;
2121                                         data_offset += groupsize;
2122                                 } else {
2123                                         int size = groupsize;
2124                                         data_offset += from;
2125                                         size -= from;
2126                                         from = 0;
2127                                         if (size > len)
2128                                                 size = len;
2129                                         ret = action(map, chip, data_offset,
2130                                                      buf, size, reg_prot_offset,
2131                                                      groupno, groupsize);
2132                                         if (ret < 0)
2133                                                 return ret;
2134                                         buf += size;
2135                                         len -= size;
2136                                         *retlen += size;
2137                                         data_offset += size;
2138                                 }
2139                                 groupno++;
2140                                 groups--;
2141                         }
2142
2143                         /* next OTP region */
2144                         if (++field == extp->NumProtectionFields)
2145                                 break;
2146                         reg_prot_offset = otp->ProtRegAddr;
2147                         reg_fact_groups = otp->FactGroups;
2148                         reg_fact_size = 1 << otp->FactProtRegSize;
2149                         reg_user_groups = otp->UserGroups;
2150                         reg_user_size = 1 << otp->UserProtRegSize;
2151                         otp++;
2152                 }
2153         }
2154
2155         return 0;
2156 }
2157
2158 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2159                                            size_t len, size_t *retlen,
2160                                             u_char *buf)
2161 {
2162         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2163                                      buf, do_otp_read, 0);
2164 }
2165
2166 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2167                                            size_t len, size_t *retlen,
2168                                             u_char *buf)
2169 {
2170         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2171                                      buf, do_otp_read, 1);
2172 }
2173
2174 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2175                                             size_t len, size_t *retlen,
2176                                              u_char *buf)
2177 {
2178         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2179                                      buf, do_otp_write, 1);
2180 }
2181
2182 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2183                                            loff_t from, size_t len)
2184 {
2185         size_t retlen;
2186         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2187                                      NULL, do_otp_lock, 1);
2188 }
2189
2190 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, 
2191                                            struct otp_info *buf, size_t len)
2192 {
2193         size_t retlen;
2194         int ret;
2195
2196         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2197         return ret ? : retlen;
2198 }
2199
2200 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2201                                            struct otp_info *buf, size_t len)
2202 {
2203         size_t retlen;
2204         int ret;
2205
2206         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2207         return ret ? : retlen;
2208 }
2209
2210 #endif
2211
2212 static int cfi_intelext_suspend(struct mtd_info *mtd)
2213 {
2214         struct map_info *map = mtd->priv;
2215         struct cfi_private *cfi = map->fldrv_priv;
2216         int i;
2217         struct flchip *chip;
2218         int ret = 0;
2219
2220         for (i=0; !ret && i<cfi->numchips; i++) {
2221                 chip = &cfi->chips[i];
2222
2223                 spin_lock(chip->mutex);
2224
2225                 switch (chip->state) {
2226                 case FL_READY:
2227                 case FL_STATUS:
2228                 case FL_CFI_QUERY:
2229                 case FL_JEDEC_QUERY:
2230                         if (chip->oldstate == FL_READY) {
2231                                 chip->oldstate = chip->state;
2232                                 chip->state = FL_PM_SUSPENDED;
2233                                 /* No need to wake_up() on this state change - 
2234                                  * as the whole point is that nobody can do anything
2235                                  * with the chip now anyway.
2236                                  */
2237                         } else {
2238                                 /* There seems to be an operation pending. We must wait for it. */
2239                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2240                                 ret = -EAGAIN;
2241                         }
2242                         break;
2243                 default:
2244                         /* Should we actually wait? Once upon a time these routines weren't
2245                            allowed to. Or should we return -EAGAIN, because the upper layers
2246                            ought to have already shut down anything which was using the device
2247                            anyway? The latter for now. */
2248                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2249                         ret = -EAGAIN;
2250                 case FL_PM_SUSPENDED:
2251                         break;
2252                 }
2253                 spin_unlock(chip->mutex);
2254         }
2255
2256         /* Unlock the chips again */
2257
2258         if (ret) {
2259                 for (i--; i >=0; i--) {
2260                         chip = &cfi->chips[i];
2261                         
2262                         spin_lock(chip->mutex);
2263                         
2264                         if (chip->state == FL_PM_SUSPENDED) {
2265                                 /* No need to force it into a known state here,
2266                                    because we're returning failure, and it didn't
2267                                    get power cycled */
2268                                 chip->state = chip->oldstate;
2269                                 chip->oldstate = FL_READY;
2270                                 wake_up(&chip->wq);
2271                         }
2272                         spin_unlock(chip->mutex);
2273                 }
2274         } 
2275         
2276         return ret;
2277 }
2278
2279 static void cfi_intelext_resume(struct mtd_info *mtd)
2280 {
2281         struct map_info *map = mtd->priv;
2282         struct cfi_private *cfi = map->fldrv_priv;
2283         int i;
2284         struct flchip *chip;
2285
2286         for (i=0; i<cfi->numchips; i++) {
2287         
2288                 chip = &cfi->chips[i];
2289
2290                 spin_lock(chip->mutex);
2291                 
2292                 /* Go to known state. Chip may have been power cycled */
2293                 if (chip->state == FL_PM_SUSPENDED) {
2294                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2295                         chip->oldstate = chip->state = FL_READY;
2296                         wake_up(&chip->wq);
2297                 }
2298
2299                 spin_unlock(chip->mutex);
2300         }
2301 }
2302
2303 static void cfi_intelext_destroy(struct mtd_info *mtd)
2304 {
2305         struct map_info *map = mtd->priv;
2306         struct cfi_private *cfi = map->fldrv_priv;
2307         kfree(cfi->cmdset_priv);
2308         kfree(cfi->cfiq);
2309         kfree(cfi->chips[0].priv);
2310         kfree(cfi);
2311         kfree(mtd->eraseregions);
2312 }
2313
2314 static char im_name_1[]="cfi_cmdset_0001";
2315 static char im_name_3[]="cfi_cmdset_0003";
2316
2317 static int __init cfi_intelext_init(void)
2318 {
2319         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2320         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2321         return 0;
2322 }
2323
2324 static void __exit cfi_intelext_exit(void)
2325 {
2326         inter_module_unregister(im_name_1);
2327         inter_module_unregister(im_name_3);
2328 }
2329
2330 module_init(cfi_intelext_init);
2331 module_exit(cfi_intelext_exit);
2332
2333 MODULE_LICENSE("GPL");
2334 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2335 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");