]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[mv-sheeva.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 /* Intel chips */
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define PF38F4476       0x881c
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56
57 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
61 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
62 static void cfi_intelext_sync (struct mtd_info *);
63 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
66                                   uint64_t len);
67 #ifdef CONFIG_MTD_OTP
68 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
71 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
72 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
73                                             struct otp_info *, size_t);
74 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
75                                             struct otp_info *, size_t);
76 #endif
77 static int cfi_intelext_suspend (struct mtd_info *);
78 static void cfi_intelext_resume (struct mtd_info *);
79 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
80
81 static void cfi_intelext_destroy(struct mtd_info *);
82
83 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
84
85 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
86 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
87
88 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89                      size_t *retlen, void **virt, resource_size_t *phys);
90 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91
92 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
94 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
95 #include "fwh_lock.h"
96
97
98
99 /*
100  *  *********** SETUP AND PROBE BITS  ***********
101  */
102
103 static struct mtd_chip_driver cfi_intelext_chipdrv = {
104         .probe          = NULL, /* Not usable directly */
105         .destroy        = cfi_intelext_destroy,
106         .name           = "cfi_cmdset_0001",
107         .module         = THIS_MODULE
108 };
109
110 /* #define DEBUG_LOCK_BITS */
111 /* #define DEBUG_CFI_FEATURES */
112
113 #ifdef DEBUG_CFI_FEATURES
114 static void cfi_tell_features(struct cfi_pri_intelext *extp)
115 {
116         int i;
117         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
118         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
119         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
120         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
121         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
122         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
123         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
124         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
125         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
126         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
127         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
128         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
129         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
130         for (i=11; i<32; i++) {
131                 if (extp->FeatureSupport & (1<<i))
132                         printk("     - Unknown Bit %X:      supported\n", i);
133         }
134
135         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
136         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
137         for (i=1; i<8; i++) {
138                 if (extp->SuspendCmdSupport & (1<<i))
139                         printk("     - Unknown Bit %X:               supported\n", i);
140         }
141
142         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
143         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
144         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
145         for (i=2; i<3; i++) {
146                 if (extp->BlkStatusRegMask & (1<<i))
147                         printk("     - Unknown Bit %X Active: yes\n",i);
148         }
149         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
150         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
151         for (i=6; i<16; i++) {
152                 if (extp->BlkStatusRegMask & (1<<i))
153                         printk("     - Unknown Bit %X Active: yes\n",i);
154         }
155
156         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
157                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
158         if (extp->VppOptimal)
159                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
160                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
161 }
162 #endif
163
164 /* Atmel chips don't use the same PRI format as Intel chips */
165 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
166 {
167         struct map_info *map = mtd->priv;
168         struct cfi_private *cfi = map->fldrv_priv;
169         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
170         struct cfi_pri_atmel atmel_pri;
171         uint32_t features = 0;
172
173         /* Reverse byteswapping */
174         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
175         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
176         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
177
178         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
179         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180
181         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
182
183         if (atmel_pri.Features & 0x01) /* chip erase supported */
184                 features |= (1<<0);
185         if (atmel_pri.Features & 0x02) /* erase suspend supported */
186                 features |= (1<<1);
187         if (atmel_pri.Features & 0x04) /* program suspend supported */
188                 features |= (1<<2);
189         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
190                 features |= (1<<9);
191         if (atmel_pri.Features & 0x20) /* page mode read supported */
192                 features |= (1<<7);
193         if (atmel_pri.Features & 0x40) /* queued erase supported */
194                 features |= (1<<4);
195         if (atmel_pri.Features & 0x80) /* Protection bits supported */
196                 features |= (1<<6);
197
198         extp->FeatureSupport = features;
199
200         /* burst write mode not supported */
201         cfi->cfiq->BufWriteTimeoutTyp = 0;
202         cfi->cfiq->BufWriteTimeoutMax = 0;
203 }
204
205 static void fixup_at49bv640dx_lock(struct mtd_info *mtd, void *param)
206 {
207         struct map_info *map = mtd->priv;
208         struct cfi_private *cfi = map->fldrv_priv;
209         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
210
211         cfip->FeatureSupport |= (1 << 5);
212         mtd->flags |= MTD_POWERUP_LOCK;
213 }
214
215 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
216 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
217 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
218 {
219         struct map_info *map = mtd->priv;
220         struct cfi_private *cfi = map->fldrv_priv;
221         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
222
223         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
224                             "erase on write disabled.\n");
225         extp->SuspendCmdSupport &= ~1;
226 }
227 #endif
228
229 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
231 {
232         struct map_info *map = mtd->priv;
233         struct cfi_private *cfi = map->fldrv_priv;
234         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
235
236         if (cfip && (cfip->FeatureSupport&4)) {
237                 cfip->FeatureSupport &= ~4;
238                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
239         }
240 }
241 #endif
242
243 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
244 {
245         struct map_info *map = mtd->priv;
246         struct cfi_private *cfi = map->fldrv_priv;
247
248         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
249         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
250 }
251
252 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
253 {
254         struct map_info *map = mtd->priv;
255         struct cfi_private *cfi = map->fldrv_priv;
256
257         /* Note this is done after the region info is endian swapped */
258         cfi->cfiq->EraseRegionInfo[1] =
259                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
260 };
261
262 static void fixup_use_point(struct mtd_info *mtd, void *param)
263 {
264         struct map_info *map = mtd->priv;
265         if (!mtd->point && map_is_linear(map)) {
266                 mtd->point   = cfi_intelext_point;
267                 mtd->unpoint = cfi_intelext_unpoint;
268         }
269 }
270
271 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         if (cfi->cfiq->BufWriteTimeoutTyp) {
276                 printk(KERN_INFO "Using buffer write method\n" );
277                 mtd->write = cfi_intelext_write_buffers;
278                 mtd->writev = cfi_intelext_writev;
279         }
280 }
281
282 /*
283  * Some chips power-up with all sectors locked by default.
284  */
285 static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
286 {
287         struct map_info *map = mtd->priv;
288         struct cfi_private *cfi = map->fldrv_priv;
289         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
290
291         if (cfip->FeatureSupport&32) {
292                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
293                 mtd->flags |= MTD_POWERUP_LOCK;
294         }
295 }
296
297 static struct cfi_fixup cfi_fixup_table[] = {
298         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
299         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock, NULL },
300         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock, NULL },
301 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
302         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
303 #endif
304 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
305         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
306 #endif
307 #if !FORCE_WORD_WRITE
308         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
309 #endif
310         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
311         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
312         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
313         { 0, 0, NULL, NULL }
314 };
315
316 static struct cfi_fixup jedec_fixup_table[] = {
317         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
318         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
319         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
320         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock, NULL, },
321         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock, NULL, },
322         { 0, 0, NULL, NULL }
323 };
324 static struct cfi_fixup fixup_table[] = {
325         /* The CFI vendor ids and the JEDEC vendor IDs appear
326          * to be common.  It is like the devices id's are as
327          * well.  This table is to pick all cases where
328          * we know that is the case.
329          */
330         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
331         { 0, 0, NULL, NULL }
332 };
333
334 static void cfi_fixup_major_minor(struct cfi_private *cfi,
335                                                 struct cfi_pri_intelext *extp)
336 {
337         if (cfi->mfr == CFI_MFR_INTEL &&
338                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
339                 extp->MinorVersion = '1';
340 }
341
342 static inline struct cfi_pri_intelext *
343 read_pri_intelext(struct map_info *map, __u16 adr)
344 {
345         struct cfi_private *cfi = map->fldrv_priv;
346         struct cfi_pri_intelext *extp;
347         unsigned int extra_size = 0;
348         unsigned int extp_size = sizeof(*extp);
349
350  again:
351         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
352         if (!extp)
353                 return NULL;
354
355         cfi_fixup_major_minor(cfi, extp);
356
357         if (extp->MajorVersion != '1' ||
358             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
359                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
360                        "version %c.%c.\n",  extp->MajorVersion,
361                        extp->MinorVersion);
362                 kfree(extp);
363                 return NULL;
364         }
365
366         /* Do some byteswapping if necessary */
367         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
368         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
369         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
370
371         if (extp->MinorVersion >= '0') {
372                 extra_size = 0;
373
374                 /* Protection Register info */
375                 extra_size += (extp->NumProtectionFields - 1) *
376                               sizeof(struct cfi_intelext_otpinfo);
377         }
378
379         if (extp->MinorVersion >= '1') {
380                 /* Burst Read info */
381                 extra_size += 2;
382                 if (extp_size < sizeof(*extp) + extra_size)
383                         goto need_more;
384                 extra_size += extp->extra[extra_size - 1];
385         }
386
387         if (extp->MinorVersion >= '3') {
388                 int nb_parts, i;
389
390                 /* Number of hardware-partitions */
391                 extra_size += 1;
392                 if (extp_size < sizeof(*extp) + extra_size)
393                         goto need_more;
394                 nb_parts = extp->extra[extra_size - 1];
395
396                 /* skip the sizeof(partregion) field in CFI 1.4 */
397                 if (extp->MinorVersion >= '4')
398                         extra_size += 2;
399
400                 for (i = 0; i < nb_parts; i++) {
401                         struct cfi_intelext_regioninfo *rinfo;
402                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
403                         extra_size += sizeof(*rinfo);
404                         if (extp_size < sizeof(*extp) + extra_size)
405                                 goto need_more;
406                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
407                         extra_size += (rinfo->NumBlockTypes - 1)
408                                       * sizeof(struct cfi_intelext_blockinfo);
409                 }
410
411                 if (extp->MinorVersion >= '4')
412                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
413
414                 if (extp_size < sizeof(*extp) + extra_size) {
415                         need_more:
416                         extp_size = sizeof(*extp) + extra_size;
417                         kfree(extp);
418                         if (extp_size > 4096) {
419                                 printk(KERN_ERR
420                                         "%s: cfi_pri_intelext is too fat\n",
421                                         __func__);
422                                 return NULL;
423                         }
424                         goto again;
425                 }
426         }
427
428         return extp;
429 }
430
431 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
432 {
433         struct cfi_private *cfi = map->fldrv_priv;
434         struct mtd_info *mtd;
435         int i;
436
437         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
438         if (!mtd) {
439                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
440                 return NULL;
441         }
442         mtd->priv = map;
443         mtd->type = MTD_NORFLASH;
444
445         /* Fill in the default mtd operations */
446         mtd->erase   = cfi_intelext_erase_varsize;
447         mtd->read    = cfi_intelext_read;
448         mtd->write   = cfi_intelext_write_words;
449         mtd->sync    = cfi_intelext_sync;
450         mtd->lock    = cfi_intelext_lock;
451         mtd->unlock  = cfi_intelext_unlock;
452         mtd->is_locked = cfi_intelext_is_locked;
453         mtd->suspend = cfi_intelext_suspend;
454         mtd->resume  = cfi_intelext_resume;
455         mtd->flags   = MTD_CAP_NORFLASH;
456         mtd->name    = map->name;
457         mtd->writesize = 1;
458
459         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
460
461         if (cfi->cfi_mode == CFI_MODE_CFI) {
462                 /*
463                  * It's a real CFI chip, not one for which the probe
464                  * routine faked a CFI structure. So we read the feature
465                  * table from it.
466                  */
467                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
468                 struct cfi_pri_intelext *extp;
469
470                 extp = read_pri_intelext(map, adr);
471                 if (!extp) {
472                         kfree(mtd);
473                         return NULL;
474                 }
475
476                 /* Install our own private info structure */
477                 cfi->cmdset_priv = extp;
478
479                 cfi_fixup(mtd, cfi_fixup_table);
480
481 #ifdef DEBUG_CFI_FEATURES
482                 /* Tell the user about it in lots of lovely detail */
483                 cfi_tell_features(extp);
484 #endif
485
486                 if(extp->SuspendCmdSupport & 1) {
487                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
488                 }
489         }
490         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
491                 /* Apply jedec specific fixups */
492                 cfi_fixup(mtd, jedec_fixup_table);
493         }
494         /* Apply generic fixups */
495         cfi_fixup(mtd, fixup_table);
496
497         for (i=0; i< cfi->numchips; i++) {
498                 if (cfi->cfiq->WordWriteTimeoutTyp)
499                         cfi->chips[i].word_write_time =
500                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
501                 else
502                         cfi->chips[i].word_write_time = 50000;
503
504                 if (cfi->cfiq->BufWriteTimeoutTyp)
505                         cfi->chips[i].buffer_write_time =
506                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
507                 /* No default; if it isn't specified, we won't use it */
508
509                 if (cfi->cfiq->BlockEraseTimeoutTyp)
510                         cfi->chips[i].erase_time =
511                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
512                 else
513                         cfi->chips[i].erase_time = 2000000;
514
515                 if (cfi->cfiq->WordWriteTimeoutTyp &&
516                     cfi->cfiq->WordWriteTimeoutMax)
517                         cfi->chips[i].word_write_time_max =
518                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
519                                     cfi->cfiq->WordWriteTimeoutMax);
520                 else
521                         cfi->chips[i].word_write_time_max = 50000 * 8;
522
523                 if (cfi->cfiq->BufWriteTimeoutTyp &&
524                     cfi->cfiq->BufWriteTimeoutMax)
525                         cfi->chips[i].buffer_write_time_max =
526                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
527                                     cfi->cfiq->BufWriteTimeoutMax);
528
529                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
530                     cfi->cfiq->BlockEraseTimeoutMax)
531                         cfi->chips[i].erase_time_max =
532                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
533                                        cfi->cfiq->BlockEraseTimeoutMax);
534                 else
535                         cfi->chips[i].erase_time_max = 2000000 * 8;
536
537                 cfi->chips[i].ref_point_counter = 0;
538                 init_waitqueue_head(&(cfi->chips[i].wq));
539         }
540
541         map->fldrv = &cfi_intelext_chipdrv;
542
543         return cfi_intelext_setup(mtd);
544 }
545 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
546 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
547 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
548 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
549 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
550
551 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
552 {
553         struct map_info *map = mtd->priv;
554         struct cfi_private *cfi = map->fldrv_priv;
555         unsigned long offset = 0;
556         int i,j;
557         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
558
559         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
560
561         mtd->size = devsize * cfi->numchips;
562
563         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
564         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
565                         * mtd->numeraseregions, GFP_KERNEL);
566         if (!mtd->eraseregions) {
567                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
568                 goto setup_err;
569         }
570
571         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
572                 unsigned long ernum, ersize;
573                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
574                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
575
576                 if (mtd->erasesize < ersize) {
577                         mtd->erasesize = ersize;
578                 }
579                 for (j=0; j<cfi->numchips; j++) {
580                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
581                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
582                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
583                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
584                 }
585                 offset += (ersize * ernum);
586         }
587
588         if (offset != devsize) {
589                 /* Argh */
590                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
591                 goto setup_err;
592         }
593
594         for (i=0; i<mtd->numeraseregions;i++){
595                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
596                        i,(unsigned long long)mtd->eraseregions[i].offset,
597                        mtd->eraseregions[i].erasesize,
598                        mtd->eraseregions[i].numblocks);
599         }
600
601 #ifdef CONFIG_MTD_OTP
602         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
603         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
604         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
605         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
606         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
607         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
608 #endif
609
610         /* This function has the potential to distort the reality
611            a bit and therefore should be called last. */
612         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
613                 goto setup_err;
614
615         __module_get(THIS_MODULE);
616         register_reboot_notifier(&mtd->reboot_notifier);
617         return mtd;
618
619  setup_err:
620         kfree(mtd->eraseregions);
621         kfree(mtd);
622         kfree(cfi->cmdset_priv);
623         return NULL;
624 }
625
626 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
627                                         struct cfi_private **pcfi)
628 {
629         struct map_info *map = mtd->priv;
630         struct cfi_private *cfi = *pcfi;
631         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
632
633         /*
634          * Probing of multi-partition flash chips.
635          *
636          * To support multiple partitions when available, we simply arrange
637          * for each of them to have their own flchip structure even if they
638          * are on the same physical chip.  This means completely recreating
639          * a new cfi_private structure right here which is a blatent code
640          * layering violation, but this is still the least intrusive
641          * arrangement at this point. This can be rearranged in the future
642          * if someone feels motivated enough.  --nico
643          */
644         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
645             && extp->FeatureSupport & (1 << 9)) {
646                 struct cfi_private *newcfi;
647                 struct flchip *chip;
648                 struct flchip_shared *shared;
649                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
650
651                 /* Protection Register info */
652                 offs = (extp->NumProtectionFields - 1) *
653                        sizeof(struct cfi_intelext_otpinfo);
654
655                 /* Burst Read info */
656                 offs += extp->extra[offs+1]+2;
657
658                 /* Number of partition regions */
659                 numregions = extp->extra[offs];
660                 offs += 1;
661
662                 /* skip the sizeof(partregion) field in CFI 1.4 */
663                 if (extp->MinorVersion >= '4')
664                         offs += 2;
665
666                 /* Number of hardware partitions */
667                 numparts = 0;
668                 for (i = 0; i < numregions; i++) {
669                         struct cfi_intelext_regioninfo *rinfo;
670                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
671                         numparts += rinfo->NumIdentPartitions;
672                         offs += sizeof(*rinfo)
673                                 + (rinfo->NumBlockTypes - 1) *
674                                   sizeof(struct cfi_intelext_blockinfo);
675                 }
676
677                 if (!numparts)
678                         numparts = 1;
679
680                 /* Programming Region info */
681                 if (extp->MinorVersion >= '4') {
682                         struct cfi_intelext_programming_regioninfo *prinfo;
683                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
684                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
685                         mtd->flags &= ~MTD_BIT_WRITEABLE;
686                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
687                                map->name, mtd->writesize,
688                                cfi->interleave * prinfo->ControlValid,
689                                cfi->interleave * prinfo->ControlInvalid);
690                 }
691
692                 /*
693                  * All functions below currently rely on all chips having
694                  * the same geometry so we'll just assume that all hardware
695                  * partitions are of the same size too.
696                  */
697                 partshift = cfi->chipshift - __ffs(numparts);
698
699                 if ((1 << partshift) < mtd->erasesize) {
700                         printk( KERN_ERR
701                                 "%s: bad number of hw partitions (%d)\n",
702                                 __func__, numparts);
703                         return -EINVAL;
704                 }
705
706                 numvirtchips = cfi->numchips * numparts;
707                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
708                 if (!newcfi)
709                         return -ENOMEM;
710                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
711                 if (!shared) {
712                         kfree(newcfi);
713                         return -ENOMEM;
714                 }
715                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
716                 newcfi->numchips = numvirtchips;
717                 newcfi->chipshift = partshift;
718
719                 chip = &newcfi->chips[0];
720                 for (i = 0; i < cfi->numchips; i++) {
721                         shared[i].writing = shared[i].erasing = NULL;
722                         mutex_init(&shared[i].lock);
723                         for (j = 0; j < numparts; j++) {
724                                 *chip = cfi->chips[i];
725                                 chip->start += j << partshift;
726                                 chip->priv = &shared[i];
727                                 /* those should be reset too since
728                                    they create memory references. */
729                                 init_waitqueue_head(&chip->wq);
730                                 mutex_init(&chip->mutex);
731                                 chip++;
732                         }
733                 }
734
735                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
736                                   "--> %d partitions of %d KiB\n",
737                                   map->name, cfi->numchips, cfi->interleave,
738                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
739
740                 map->fldrv_priv = newcfi;
741                 *pcfi = newcfi;
742                 kfree(cfi);
743         }
744
745         return 0;
746 }
747
748 /*
749  *  *********** CHIP ACCESS FUNCTIONS ***********
750  */
751 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
752 {
753         DECLARE_WAITQUEUE(wait, current);
754         struct cfi_private *cfi = map->fldrv_priv;
755         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
756         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
757         unsigned long timeo = jiffies + HZ;
758
759         /* Prevent setting state FL_SYNCING for chip in suspended state. */
760         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
761                 goto sleep;
762
763         switch (chip->state) {
764
765         case FL_STATUS:
766                 for (;;) {
767                         status = map_read(map, adr);
768                         if (map_word_andequal(map, status, status_OK, status_OK))
769                                 break;
770
771                         /* At this point we're fine with write operations
772                            in other partitions as they don't conflict. */
773                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
774                                 break;
775
776                         mutex_unlock(&chip->mutex);
777                         cfi_udelay(1);
778                         mutex_lock(&chip->mutex);
779                         /* Someone else might have been playing with it. */
780                         return -EAGAIN;
781                 }
782                 /* Fall through */
783         case FL_READY:
784         case FL_CFI_QUERY:
785         case FL_JEDEC_QUERY:
786                 return 0;
787
788         case FL_ERASING:
789                 if (!cfip ||
790                     !(cfip->FeatureSupport & 2) ||
791                     !(mode == FL_READY || mode == FL_POINT ||
792                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
793                         goto sleep;
794
795
796                 /* Erase suspend */
797                 map_write(map, CMD(0xB0), adr);
798
799                 /* If the flash has finished erasing, then 'erase suspend'
800                  * appears to make some (28F320) flash devices switch to
801                  * 'read' mode.  Make sure that we switch to 'read status'
802                  * mode so we get the right data. --rmk
803                  */
804                 map_write(map, CMD(0x70), adr);
805                 chip->oldstate = FL_ERASING;
806                 chip->state = FL_ERASE_SUSPENDING;
807                 chip->erase_suspended = 1;
808                 for (;;) {
809                         status = map_read(map, adr);
810                         if (map_word_andequal(map, status, status_OK, status_OK))
811                                 break;
812
813                         if (time_after(jiffies, timeo)) {
814                                 /* Urgh. Resume and pretend we weren't here.  */
815                                 map_write(map, CMD(0xd0), adr);
816                                 /* Make sure we're in 'read status' mode if it had finished */
817                                 map_write(map, CMD(0x70), adr);
818                                 chip->state = FL_ERASING;
819                                 chip->oldstate = FL_READY;
820                                 printk(KERN_ERR "%s: Chip not ready after erase "
821                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
822                                 return -EIO;
823                         }
824
825                         mutex_unlock(&chip->mutex);
826                         cfi_udelay(1);
827                         mutex_lock(&chip->mutex);
828                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
829                            So we can just loop here. */
830                 }
831                 chip->state = FL_STATUS;
832                 return 0;
833
834         case FL_XIP_WHILE_ERASING:
835                 if (mode != FL_READY && mode != FL_POINT &&
836                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
837                         goto sleep;
838                 chip->oldstate = chip->state;
839                 chip->state = FL_READY;
840                 return 0;
841
842         case FL_SHUTDOWN:
843                 /* The machine is rebooting now,so no one can get chip anymore */
844                 return -EIO;
845         case FL_POINT:
846                 /* Only if there's no operation suspended... */
847                 if (mode == FL_READY && chip->oldstate == FL_READY)
848                         return 0;
849                 /* Fall through */
850         default:
851         sleep:
852                 set_current_state(TASK_UNINTERRUPTIBLE);
853                 add_wait_queue(&chip->wq, &wait);
854                 mutex_unlock(&chip->mutex);
855                 schedule();
856                 remove_wait_queue(&chip->wq, &wait);
857                 mutex_lock(&chip->mutex);
858                 return -EAGAIN;
859         }
860 }
861
862 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
863 {
864         int ret;
865         DECLARE_WAITQUEUE(wait, current);
866
867  retry:
868         if (chip->priv &&
869             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
870             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
871                 /*
872                  * OK. We have possibility for contention on the write/erase
873                  * operations which are global to the real chip and not per
874                  * partition.  So let's fight it over in the partition which
875                  * currently has authority on the operation.
876                  *
877                  * The rules are as follows:
878                  *
879                  * - any write operation must own shared->writing.
880                  *
881                  * - any erase operation must own _both_ shared->writing and
882                  *   shared->erasing.
883                  *
884                  * - contention arbitration is handled in the owner's context.
885                  *
886                  * The 'shared' struct can be read and/or written only when
887                  * its lock is taken.
888                  */
889                 struct flchip_shared *shared = chip->priv;
890                 struct flchip *contender;
891                 mutex_lock(&shared->lock);
892                 contender = shared->writing;
893                 if (contender && contender != chip) {
894                         /*
895                          * The engine to perform desired operation on this
896                          * partition is already in use by someone else.
897                          * Let's fight over it in the context of the chip
898                          * currently using it.  If it is possible to suspend,
899                          * that other partition will do just that, otherwise
900                          * it'll happily send us to sleep.  In any case, when
901                          * get_chip returns success we're clear to go ahead.
902                          */
903                         ret = mutex_trylock(&contender->mutex);
904                         mutex_unlock(&shared->lock);
905                         if (!ret)
906                                 goto retry;
907                         mutex_unlock(&chip->mutex);
908                         ret = chip_ready(map, contender, contender->start, mode);
909                         mutex_lock(&chip->mutex);
910
911                         if (ret == -EAGAIN) {
912                                 mutex_unlock(&contender->mutex);
913                                 goto retry;
914                         }
915                         if (ret) {
916                                 mutex_unlock(&contender->mutex);
917                                 return ret;
918                         }
919                         mutex_lock(&shared->lock);
920
921                         /* We should not own chip if it is already
922                          * in FL_SYNCING state. Put contender and retry. */
923                         if (chip->state == FL_SYNCING) {
924                                 put_chip(map, contender, contender->start);
925                                 mutex_unlock(&contender->mutex);
926                                 goto retry;
927                         }
928                         mutex_unlock(&contender->mutex);
929                 }
930
931                 /* Check if we already have suspended erase
932                  * on this chip. Sleep. */
933                 if (mode == FL_ERASING && shared->erasing
934                     && shared->erasing->oldstate == FL_ERASING) {
935                         mutex_unlock(&shared->lock);
936                         set_current_state(TASK_UNINTERRUPTIBLE);
937                         add_wait_queue(&chip->wq, &wait);
938                         mutex_unlock(&chip->mutex);
939                         schedule();
940                         remove_wait_queue(&chip->wq, &wait);
941                         mutex_lock(&chip->mutex);
942                         goto retry;
943                 }
944
945                 /* We now own it */
946                 shared->writing = chip;
947                 if (mode == FL_ERASING)
948                         shared->erasing = chip;
949                 mutex_unlock(&shared->lock);
950         }
951         ret = chip_ready(map, chip, adr, mode);
952         if (ret == -EAGAIN)
953                 goto retry;
954
955         return ret;
956 }
957
958 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
959 {
960         struct cfi_private *cfi = map->fldrv_priv;
961
962         if (chip->priv) {
963                 struct flchip_shared *shared = chip->priv;
964                 mutex_lock(&shared->lock);
965                 if (shared->writing == chip && chip->oldstate == FL_READY) {
966                         /* We own the ability to write, but we're done */
967                         shared->writing = shared->erasing;
968                         if (shared->writing && shared->writing != chip) {
969                                 /* give back ownership to who we loaned it from */
970                                 struct flchip *loaner = shared->writing;
971                                 mutex_lock(&loaner->mutex);
972                                 mutex_unlock(&shared->lock);
973                                 mutex_unlock(&chip->mutex);
974                                 put_chip(map, loaner, loaner->start);
975                                 mutex_lock(&chip->mutex);
976                                 mutex_unlock(&loaner->mutex);
977                                 wake_up(&chip->wq);
978                                 return;
979                         }
980                         shared->erasing = NULL;
981                         shared->writing = NULL;
982                 } else if (shared->erasing == chip && shared->writing != chip) {
983                         /*
984                          * We own the ability to erase without the ability
985                          * to write, which means the erase was suspended
986                          * and some other partition is currently writing.
987                          * Don't let the switch below mess things up since
988                          * we don't have ownership to resume anything.
989                          */
990                         mutex_unlock(&shared->lock);
991                         wake_up(&chip->wq);
992                         return;
993                 }
994                 mutex_unlock(&shared->lock);
995         }
996
997         switch(chip->oldstate) {
998         case FL_ERASING:
999                 chip->state = chip->oldstate;
1000                 /* What if one interleaved chip has finished and the
1001                    other hasn't? The old code would leave the finished
1002                    one in READY mode. That's bad, and caused -EROFS
1003                    errors to be returned from do_erase_oneblock because
1004                    that's the only bit it checked for at the time.
1005                    As the state machine appears to explicitly allow
1006                    sending the 0x70 (Read Status) command to an erasing
1007                    chip and expecting it to be ignored, that's what we
1008                    do. */
1009                 map_write(map, CMD(0xd0), adr);
1010                 map_write(map, CMD(0x70), adr);
1011                 chip->oldstate = FL_READY;
1012                 chip->state = FL_ERASING;
1013                 break;
1014
1015         case FL_XIP_WHILE_ERASING:
1016                 chip->state = chip->oldstate;
1017                 chip->oldstate = FL_READY;
1018                 break;
1019
1020         case FL_READY:
1021         case FL_STATUS:
1022         case FL_JEDEC_QUERY:
1023                 /* We should really make set_vpp() count, rather than doing this */
1024                 DISABLE_VPP(map);
1025                 break;
1026         default:
1027                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1028         }
1029         wake_up(&chip->wq);
1030 }
1031
1032 #ifdef CONFIG_MTD_XIP
1033
1034 /*
1035  * No interrupt what so ever can be serviced while the flash isn't in array
1036  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1037  * enclosing any code path where the flash is known not to be in array mode.
1038  * And within a XIP disabled code path, only functions marked with __xipram
1039  * may be called and nothing else (it's a good thing to inspect generated
1040  * assembly to make sure inline functions were actually inlined and that gcc
1041  * didn't emit calls to its own support functions). Also configuring MTD CFI
1042  * support to a single buswidth and a single interleave is also recommended.
1043  */
1044
1045 static void xip_disable(struct map_info *map, struct flchip *chip,
1046                         unsigned long adr)
1047 {
1048         /* TODO: chips with no XIP use should ignore and return */
1049         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1050         local_irq_disable();
1051 }
1052
1053 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1054                                 unsigned long adr)
1055 {
1056         struct cfi_private *cfi = map->fldrv_priv;
1057         if (chip->state != FL_POINT && chip->state != FL_READY) {
1058                 map_write(map, CMD(0xff), adr);
1059                 chip->state = FL_READY;
1060         }
1061         (void) map_read(map, adr);
1062         xip_iprefetch();
1063         local_irq_enable();
1064 }
1065
1066 /*
1067  * When a delay is required for the flash operation to complete, the
1068  * xip_wait_for_operation() function is polling for both the given timeout
1069  * and pending (but still masked) hardware interrupts.  Whenever there is an
1070  * interrupt pending then the flash erase or write operation is suspended,
1071  * array mode restored and interrupts unmasked.  Task scheduling might also
1072  * happen at that point.  The CPU eventually returns from the interrupt or
1073  * the call to schedule() and the suspended flash operation is resumed for
1074  * the remaining of the delay period.
1075  *
1076  * Warning: this function _will_ fool interrupt latency tracing tools.
1077  */
1078
1079 static int __xipram xip_wait_for_operation(
1080                 struct map_info *map, struct flchip *chip,
1081                 unsigned long adr, unsigned int chip_op_time_max)
1082 {
1083         struct cfi_private *cfi = map->fldrv_priv;
1084         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1085         map_word status, OK = CMD(0x80);
1086         unsigned long usec, suspended, start, done;
1087         flstate_t oldstate, newstate;
1088
1089         start = xip_currtime();
1090         usec = chip_op_time_max;
1091         if (usec == 0)
1092                 usec = 500000;
1093         done = 0;
1094
1095         do {
1096                 cpu_relax();
1097                 if (xip_irqpending() && cfip &&
1098                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1099                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1100                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1101                         /*
1102                          * Let's suspend the erase or write operation when
1103                          * supported.  Note that we currently don't try to
1104                          * suspend interleaved chips if there is already
1105                          * another operation suspended (imagine what happens
1106                          * when one chip was already done with the current
1107                          * operation while another chip suspended it, then
1108                          * we resume the whole thing at once).  Yes, it
1109                          * can happen!
1110                          */
1111                         usec -= done;
1112                         map_write(map, CMD(0xb0), adr);
1113                         map_write(map, CMD(0x70), adr);
1114                         suspended = xip_currtime();
1115                         do {
1116                                 if (xip_elapsed_since(suspended) > 100000) {
1117                                         /*
1118                                          * The chip doesn't want to suspend
1119                                          * after waiting for 100 msecs.
1120                                          * This is a critical error but there
1121                                          * is not much we can do here.
1122                                          */
1123                                         return -EIO;
1124                                 }
1125                                 status = map_read(map, adr);
1126                         } while (!map_word_andequal(map, status, OK, OK));
1127
1128                         /* Suspend succeeded */
1129                         oldstate = chip->state;
1130                         if (oldstate == FL_ERASING) {
1131                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1132                                         break;
1133                                 newstate = FL_XIP_WHILE_ERASING;
1134                                 chip->erase_suspended = 1;
1135                         } else {
1136                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1137                                         break;
1138                                 newstate = FL_XIP_WHILE_WRITING;
1139                                 chip->write_suspended = 1;
1140                         }
1141                         chip->state = newstate;
1142                         map_write(map, CMD(0xff), adr);
1143                         (void) map_read(map, adr);
1144                         xip_iprefetch();
1145                         local_irq_enable();
1146                         mutex_unlock(&chip->mutex);
1147                         xip_iprefetch();
1148                         cond_resched();
1149
1150                         /*
1151                          * We're back.  However someone else might have
1152                          * decided to go write to the chip if we are in
1153                          * a suspended erase state.  If so let's wait
1154                          * until it's done.
1155                          */
1156                         mutex_lock(&chip->mutex);
1157                         while (chip->state != newstate) {
1158                                 DECLARE_WAITQUEUE(wait, current);
1159                                 set_current_state(TASK_UNINTERRUPTIBLE);
1160                                 add_wait_queue(&chip->wq, &wait);
1161                                 mutex_unlock(&chip->mutex);
1162                                 schedule();
1163                                 remove_wait_queue(&chip->wq, &wait);
1164                                 mutex_lock(&chip->mutex);
1165                         }
1166                         /* Disallow XIP again */
1167                         local_irq_disable();
1168
1169                         /* Resume the write or erase operation */
1170                         map_write(map, CMD(0xd0), adr);
1171                         map_write(map, CMD(0x70), adr);
1172                         chip->state = oldstate;
1173                         start = xip_currtime();
1174                 } else if (usec >= 1000000/HZ) {
1175                         /*
1176                          * Try to save on CPU power when waiting delay
1177                          * is at least a system timer tick period.
1178                          * No need to be extremely accurate here.
1179                          */
1180                         xip_cpu_idle();
1181                 }
1182                 status = map_read(map, adr);
1183                 done = xip_elapsed_since(start);
1184         } while (!map_word_andequal(map, status, OK, OK)
1185                  && done < usec);
1186
1187         return (done >= usec) ? -ETIME : 0;
1188 }
1189
1190 /*
1191  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1192  * the flash is actively programming or erasing since we have to poll for
1193  * the operation to complete anyway.  We can't do that in a generic way with
1194  * a XIP setup so do it before the actual flash operation in this case
1195  * and stub it out from INVAL_CACHE_AND_WAIT.
1196  */
1197 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1198         INVALIDATE_CACHED_RANGE(map, from, size)
1199
1200 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1201         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1202
1203 #else
1204
1205 #define xip_disable(map, chip, adr)
1206 #define xip_enable(map, chip, adr)
1207 #define XIP_INVAL_CACHED_RANGE(x...)
1208 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1209
1210 static int inval_cache_and_wait_for_operation(
1211                 struct map_info *map, struct flchip *chip,
1212                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1213                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1214 {
1215         struct cfi_private *cfi = map->fldrv_priv;
1216         map_word status, status_OK = CMD(0x80);
1217         int chip_state = chip->state;
1218         unsigned int timeo, sleep_time, reset_timeo;
1219
1220         mutex_unlock(&chip->mutex);
1221         if (inval_len)
1222                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1223         mutex_lock(&chip->mutex);
1224
1225         timeo = chip_op_time_max;
1226         if (!timeo)
1227                 timeo = 500000;
1228         reset_timeo = timeo;
1229         sleep_time = chip_op_time / 2;
1230
1231         for (;;) {
1232                 status = map_read(map, cmd_adr);
1233                 if (map_word_andequal(map, status, status_OK, status_OK))
1234                         break;
1235
1236                 if (!timeo) {
1237                         map_write(map, CMD(0x70), cmd_adr);
1238                         chip->state = FL_STATUS;
1239                         return -ETIME;
1240                 }
1241
1242                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1243                 mutex_unlock(&chip->mutex);
1244                 if (sleep_time >= 1000000/HZ) {
1245                         /*
1246                          * Half of the normal delay still remaining
1247                          * can be performed with a sleeping delay instead
1248                          * of busy waiting.
1249                          */
1250                         msleep(sleep_time/1000);
1251                         timeo -= sleep_time;
1252                         sleep_time = 1000000/HZ;
1253                 } else {
1254                         udelay(1);
1255                         cond_resched();
1256                         timeo--;
1257                 }
1258                 mutex_lock(&chip->mutex);
1259
1260                 while (chip->state != chip_state) {
1261                         /* Someone's suspended the operation: sleep */
1262                         DECLARE_WAITQUEUE(wait, current);
1263                         set_current_state(TASK_UNINTERRUPTIBLE);
1264                         add_wait_queue(&chip->wq, &wait);
1265                         mutex_unlock(&chip->mutex);
1266                         schedule();
1267                         remove_wait_queue(&chip->wq, &wait);
1268                         mutex_lock(&chip->mutex);
1269                 }
1270                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1271                         /* Erase suspend occured while sleep: reset timeout */
1272                         timeo = reset_timeo;
1273                         chip->erase_suspended = 0;
1274                 }
1275                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1276                         /* Write suspend occured while sleep: reset timeout */
1277                         timeo = reset_timeo;
1278                         chip->write_suspended = 0;
1279                 }
1280         }
1281
1282         /* Done and happy. */
1283         chip->state = FL_STATUS;
1284         return 0;
1285 }
1286
1287 #endif
1288
1289 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1290         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1291
1292
1293 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1294 {
1295         unsigned long cmd_addr;
1296         struct cfi_private *cfi = map->fldrv_priv;
1297         int ret = 0;
1298
1299         adr += chip->start;
1300
1301         /* Ensure cmd read/writes are aligned. */
1302         cmd_addr = adr & ~(map_bankwidth(map)-1);
1303
1304         mutex_lock(&chip->mutex);
1305
1306         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1307
1308         if (!ret) {
1309                 if (chip->state != FL_POINT && chip->state != FL_READY)
1310                         map_write(map, CMD(0xff), cmd_addr);
1311
1312                 chip->state = FL_POINT;
1313                 chip->ref_point_counter++;
1314         }
1315         mutex_unlock(&chip->mutex);
1316
1317         return ret;
1318 }
1319
1320 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1321                 size_t *retlen, void **virt, resource_size_t *phys)
1322 {
1323         struct map_info *map = mtd->priv;
1324         struct cfi_private *cfi = map->fldrv_priv;
1325         unsigned long ofs, last_end = 0;
1326         int chipnum;
1327         int ret = 0;
1328
1329         if (!map->virt || (from + len > mtd->size))
1330                 return -EINVAL;
1331
1332         /* Now lock the chip(s) to POINT state */
1333
1334         /* ofs: offset within the first chip that the first read should start */
1335         chipnum = (from >> cfi->chipshift);
1336         ofs = from - (chipnum << cfi->chipshift);
1337
1338         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1339         *retlen = 0;
1340         if (phys)
1341                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1342
1343         while (len) {
1344                 unsigned long thislen;
1345
1346                 if (chipnum >= cfi->numchips)
1347                         break;
1348
1349                 /* We cannot point across chips that are virtually disjoint */
1350                 if (!last_end)
1351                         last_end = cfi->chips[chipnum].start;
1352                 else if (cfi->chips[chipnum].start != last_end)
1353                         break;
1354
1355                 if ((len + ofs -1) >> cfi->chipshift)
1356                         thislen = (1<<cfi->chipshift) - ofs;
1357                 else
1358                         thislen = len;
1359
1360                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1361                 if (ret)
1362                         break;
1363
1364                 *retlen += thislen;
1365                 len -= thislen;
1366
1367                 ofs = 0;
1368                 last_end += 1 << cfi->chipshift;
1369                 chipnum++;
1370         }
1371         return 0;
1372 }
1373
1374 static void cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1375 {
1376         struct map_info *map = mtd->priv;
1377         struct cfi_private *cfi = map->fldrv_priv;
1378         unsigned long ofs;
1379         int chipnum;
1380
1381         /* Now unlock the chip(s) POINT state */
1382
1383         /* ofs: offset within the first chip that the first read should start */
1384         chipnum = (from >> cfi->chipshift);
1385         ofs = from - (chipnum <<  cfi->chipshift);
1386
1387         while (len) {
1388                 unsigned long thislen;
1389                 struct flchip *chip;
1390
1391                 chip = &cfi->chips[chipnum];
1392                 if (chipnum >= cfi->numchips)
1393                         break;
1394
1395                 if ((len + ofs -1) >> cfi->chipshift)
1396                         thislen = (1<<cfi->chipshift) - ofs;
1397                 else
1398                         thislen = len;
1399
1400                 mutex_lock(&chip->mutex);
1401                 if (chip->state == FL_POINT) {
1402                         chip->ref_point_counter--;
1403                         if(chip->ref_point_counter == 0)
1404                                 chip->state = FL_READY;
1405                 } else
1406                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1407
1408                 put_chip(map, chip, chip->start);
1409                 mutex_unlock(&chip->mutex);
1410
1411                 len -= thislen;
1412                 ofs = 0;
1413                 chipnum++;
1414         }
1415 }
1416
1417 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1418 {
1419         unsigned long cmd_addr;
1420         struct cfi_private *cfi = map->fldrv_priv;
1421         int ret;
1422
1423         adr += chip->start;
1424
1425         /* Ensure cmd read/writes are aligned. */
1426         cmd_addr = adr & ~(map_bankwidth(map)-1);
1427
1428         mutex_lock(&chip->mutex);
1429         ret = get_chip(map, chip, cmd_addr, FL_READY);
1430         if (ret) {
1431                 mutex_unlock(&chip->mutex);
1432                 return ret;
1433         }
1434
1435         if (chip->state != FL_POINT && chip->state != FL_READY) {
1436                 map_write(map, CMD(0xff), cmd_addr);
1437
1438                 chip->state = FL_READY;
1439         }
1440
1441         map_copy_from(map, buf, adr, len);
1442
1443         put_chip(map, chip, cmd_addr);
1444
1445         mutex_unlock(&chip->mutex);
1446         return 0;
1447 }
1448
1449 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1450 {
1451         struct map_info *map = mtd->priv;
1452         struct cfi_private *cfi = map->fldrv_priv;
1453         unsigned long ofs;
1454         int chipnum;
1455         int ret = 0;
1456
1457         /* ofs: offset within the first chip that the first read should start */
1458         chipnum = (from >> cfi->chipshift);
1459         ofs = from - (chipnum <<  cfi->chipshift);
1460
1461         *retlen = 0;
1462
1463         while (len) {
1464                 unsigned long thislen;
1465
1466                 if (chipnum >= cfi->numchips)
1467                         break;
1468
1469                 if ((len + ofs -1) >> cfi->chipshift)
1470                         thislen = (1<<cfi->chipshift) - ofs;
1471                 else
1472                         thislen = len;
1473
1474                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1475                 if (ret)
1476                         break;
1477
1478                 *retlen += thislen;
1479                 len -= thislen;
1480                 buf += thislen;
1481
1482                 ofs = 0;
1483                 chipnum++;
1484         }
1485         return ret;
1486 }
1487
1488 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1489                                      unsigned long adr, map_word datum, int mode)
1490 {
1491         struct cfi_private *cfi = map->fldrv_priv;
1492         map_word status, write_cmd;
1493         int ret=0;
1494
1495         adr += chip->start;
1496
1497         switch (mode) {
1498         case FL_WRITING:
1499                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1500                 break;
1501         case FL_OTP_WRITE:
1502                 write_cmd = CMD(0xc0);
1503                 break;
1504         default:
1505                 return -EINVAL;
1506         }
1507
1508         mutex_lock(&chip->mutex);
1509         ret = get_chip(map, chip, adr, mode);
1510         if (ret) {
1511                 mutex_unlock(&chip->mutex);
1512                 return ret;
1513         }
1514
1515         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1516         ENABLE_VPP(map);
1517         xip_disable(map, chip, adr);
1518         map_write(map, write_cmd, adr);
1519         map_write(map, datum, adr);
1520         chip->state = mode;
1521
1522         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1523                                    adr, map_bankwidth(map),
1524                                    chip->word_write_time,
1525                                    chip->word_write_time_max);
1526         if (ret) {
1527                 xip_enable(map, chip, adr);
1528                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1529                 goto out;
1530         }
1531
1532         /* check for errors */
1533         status = map_read(map, adr);
1534         if (map_word_bitsset(map, status, CMD(0x1a))) {
1535                 unsigned long chipstatus = MERGESTATUS(status);
1536
1537                 /* reset status */
1538                 map_write(map, CMD(0x50), adr);
1539                 map_write(map, CMD(0x70), adr);
1540                 xip_enable(map, chip, adr);
1541
1542                 if (chipstatus & 0x02) {
1543                         ret = -EROFS;
1544                 } else if (chipstatus & 0x08) {
1545                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1546                         ret = -EIO;
1547                 } else {
1548                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1549                         ret = -EINVAL;
1550                 }
1551
1552                 goto out;
1553         }
1554
1555         xip_enable(map, chip, adr);
1556  out:   put_chip(map, chip, adr);
1557         mutex_unlock(&chip->mutex);
1558         return ret;
1559 }
1560
1561
1562 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1563 {
1564         struct map_info *map = mtd->priv;
1565         struct cfi_private *cfi = map->fldrv_priv;
1566         int ret = 0;
1567         int chipnum;
1568         unsigned long ofs;
1569
1570         *retlen = 0;
1571         if (!len)
1572                 return 0;
1573
1574         chipnum = to >> cfi->chipshift;
1575         ofs = to  - (chipnum << cfi->chipshift);
1576
1577         /* If it's not bus-aligned, do the first byte write */
1578         if (ofs & (map_bankwidth(map)-1)) {
1579                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1580                 int gap = ofs - bus_ofs;
1581                 int n;
1582                 map_word datum;
1583
1584                 n = min_t(int, len, map_bankwidth(map)-gap);
1585                 datum = map_word_ff(map);
1586                 datum = map_word_load_partial(map, datum, buf, gap, n);
1587
1588                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1589                                                bus_ofs, datum, FL_WRITING);
1590                 if (ret)
1591                         return ret;
1592
1593                 len -= n;
1594                 ofs += n;
1595                 buf += n;
1596                 (*retlen) += n;
1597
1598                 if (ofs >> cfi->chipshift) {
1599                         chipnum ++;
1600                         ofs = 0;
1601                         if (chipnum == cfi->numchips)
1602                                 return 0;
1603                 }
1604         }
1605
1606         while(len >= map_bankwidth(map)) {
1607                 map_word datum = map_word_load(map, buf);
1608
1609                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1610                                        ofs, datum, FL_WRITING);
1611                 if (ret)
1612                         return ret;
1613
1614                 ofs += map_bankwidth(map);
1615                 buf += map_bankwidth(map);
1616                 (*retlen) += map_bankwidth(map);
1617                 len -= map_bankwidth(map);
1618
1619                 if (ofs >> cfi->chipshift) {
1620                         chipnum ++;
1621                         ofs = 0;
1622                         if (chipnum == cfi->numchips)
1623                                 return 0;
1624                 }
1625         }
1626
1627         if (len & (map_bankwidth(map)-1)) {
1628                 map_word datum;
1629
1630                 datum = map_word_ff(map);
1631                 datum = map_word_load_partial(map, datum, buf, 0, len);
1632
1633                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1634                                        ofs, datum, FL_WRITING);
1635                 if (ret)
1636                         return ret;
1637
1638                 (*retlen) += len;
1639         }
1640
1641         return 0;
1642 }
1643
1644
1645 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1646                                     unsigned long adr, const struct kvec **pvec,
1647                                     unsigned long *pvec_seek, int len)
1648 {
1649         struct cfi_private *cfi = map->fldrv_priv;
1650         map_word status, write_cmd, datum;
1651         unsigned long cmd_adr;
1652         int ret, wbufsize, word_gap, words;
1653         const struct kvec *vec;
1654         unsigned long vec_seek;
1655         unsigned long initial_adr;
1656         int initial_len = len;
1657
1658         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1659         adr += chip->start;
1660         initial_adr = adr;
1661         cmd_adr = adr & ~(wbufsize-1);
1662
1663         /* Let's determine this according to the interleave only once */
1664         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1665
1666         mutex_lock(&chip->mutex);
1667         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1668         if (ret) {
1669                 mutex_unlock(&chip->mutex);
1670                 return ret;
1671         }
1672
1673         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1674         ENABLE_VPP(map);
1675         xip_disable(map, chip, cmd_adr);
1676
1677         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1678            [...], the device will not accept any more Write to Buffer commands".
1679            So we must check here and reset those bits if they're set. Otherwise
1680            we're just pissing in the wind */
1681         if (chip->state != FL_STATUS) {
1682                 map_write(map, CMD(0x70), cmd_adr);
1683                 chip->state = FL_STATUS;
1684         }
1685         status = map_read(map, cmd_adr);
1686         if (map_word_bitsset(map, status, CMD(0x30))) {
1687                 xip_enable(map, chip, cmd_adr);
1688                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1689                 xip_disable(map, chip, cmd_adr);
1690                 map_write(map, CMD(0x50), cmd_adr);
1691                 map_write(map, CMD(0x70), cmd_adr);
1692         }
1693
1694         chip->state = FL_WRITING_TO_BUFFER;
1695         map_write(map, write_cmd, cmd_adr);
1696         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1697         if (ret) {
1698                 /* Argh. Not ready for write to buffer */
1699                 map_word Xstatus = map_read(map, cmd_adr);
1700                 map_write(map, CMD(0x70), cmd_adr);
1701                 chip->state = FL_STATUS;
1702                 status = map_read(map, cmd_adr);
1703                 map_write(map, CMD(0x50), cmd_adr);
1704                 map_write(map, CMD(0x70), cmd_adr);
1705                 xip_enable(map, chip, cmd_adr);
1706                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1707                                 map->name, Xstatus.x[0], status.x[0]);
1708                 goto out;
1709         }
1710
1711         /* Figure out the number of words to write */
1712         word_gap = (-adr & (map_bankwidth(map)-1));
1713         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1714         if (!word_gap) {
1715                 words--;
1716         } else {
1717                 word_gap = map_bankwidth(map) - word_gap;
1718                 adr -= word_gap;
1719                 datum = map_word_ff(map);
1720         }
1721
1722         /* Write length of data to come */
1723         map_write(map, CMD(words), cmd_adr );
1724
1725         /* Write data */
1726         vec = *pvec;
1727         vec_seek = *pvec_seek;
1728         do {
1729                 int n = map_bankwidth(map) - word_gap;
1730                 if (n > vec->iov_len - vec_seek)
1731                         n = vec->iov_len - vec_seek;
1732                 if (n > len)
1733                         n = len;
1734
1735                 if (!word_gap && len < map_bankwidth(map))
1736                         datum = map_word_ff(map);
1737
1738                 datum = map_word_load_partial(map, datum,
1739                                               vec->iov_base + vec_seek,
1740                                               word_gap, n);
1741
1742                 len -= n;
1743                 word_gap += n;
1744                 if (!len || word_gap == map_bankwidth(map)) {
1745                         map_write(map, datum, adr);
1746                         adr += map_bankwidth(map);
1747                         word_gap = 0;
1748                 }
1749
1750                 vec_seek += n;
1751                 if (vec_seek == vec->iov_len) {
1752                         vec++;
1753                         vec_seek = 0;
1754                 }
1755         } while (len);
1756         *pvec = vec;
1757         *pvec_seek = vec_seek;
1758
1759         /* GO GO GO */
1760         map_write(map, CMD(0xd0), cmd_adr);
1761         chip->state = FL_WRITING;
1762
1763         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1764                                    initial_adr, initial_len,
1765                                    chip->buffer_write_time,
1766                                    chip->buffer_write_time_max);
1767         if (ret) {
1768                 map_write(map, CMD(0x70), cmd_adr);
1769                 chip->state = FL_STATUS;
1770                 xip_enable(map, chip, cmd_adr);
1771                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1772                 goto out;
1773         }
1774
1775         /* check for errors */
1776         status = map_read(map, cmd_adr);
1777         if (map_word_bitsset(map, status, CMD(0x1a))) {
1778                 unsigned long chipstatus = MERGESTATUS(status);
1779
1780                 /* reset status */
1781                 map_write(map, CMD(0x50), cmd_adr);
1782                 map_write(map, CMD(0x70), cmd_adr);
1783                 xip_enable(map, chip, cmd_adr);
1784
1785                 if (chipstatus & 0x02) {
1786                         ret = -EROFS;
1787                 } else if (chipstatus & 0x08) {
1788                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1789                         ret = -EIO;
1790                 } else {
1791                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1792                         ret = -EINVAL;
1793                 }
1794
1795                 goto out;
1796         }
1797
1798         xip_enable(map, chip, cmd_adr);
1799  out:   put_chip(map, chip, cmd_adr);
1800         mutex_unlock(&chip->mutex);
1801         return ret;
1802 }
1803
1804 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1805                                 unsigned long count, loff_t to, size_t *retlen)
1806 {
1807         struct map_info *map = mtd->priv;
1808         struct cfi_private *cfi = map->fldrv_priv;
1809         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1810         int ret = 0;
1811         int chipnum;
1812         unsigned long ofs, vec_seek, i;
1813         size_t len = 0;
1814
1815         for (i = 0; i < count; i++)
1816                 len += vecs[i].iov_len;
1817
1818         *retlen = 0;
1819         if (!len)
1820                 return 0;
1821
1822         chipnum = to >> cfi->chipshift;
1823         ofs = to - (chipnum << cfi->chipshift);
1824         vec_seek = 0;
1825
1826         do {
1827                 /* We must not cross write block boundaries */
1828                 int size = wbufsize - (ofs & (wbufsize-1));
1829
1830                 if (size > len)
1831                         size = len;
1832                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1833                                       ofs, &vecs, &vec_seek, size);
1834                 if (ret)
1835                         return ret;
1836
1837                 ofs += size;
1838                 (*retlen) += size;
1839                 len -= size;
1840
1841                 if (ofs >> cfi->chipshift) {
1842                         chipnum ++;
1843                         ofs = 0;
1844                         if (chipnum == cfi->numchips)
1845                                 return 0;
1846                 }
1847
1848                 /* Be nice and reschedule with the chip in a usable state for other
1849                    processes. */
1850                 cond_resched();
1851
1852         } while (len);
1853
1854         return 0;
1855 }
1856
1857 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1858                                        size_t len, size_t *retlen, const u_char *buf)
1859 {
1860         struct kvec vec;
1861
1862         vec.iov_base = (void *) buf;
1863         vec.iov_len = len;
1864
1865         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1866 }
1867
1868 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1869                                       unsigned long adr, int len, void *thunk)
1870 {
1871         struct cfi_private *cfi = map->fldrv_priv;
1872         map_word status;
1873         int retries = 3;
1874         int ret;
1875
1876         adr += chip->start;
1877
1878  retry:
1879         mutex_lock(&chip->mutex);
1880         ret = get_chip(map, chip, adr, FL_ERASING);
1881         if (ret) {
1882                 mutex_unlock(&chip->mutex);
1883                 return ret;
1884         }
1885
1886         XIP_INVAL_CACHED_RANGE(map, adr, len);
1887         ENABLE_VPP(map);
1888         xip_disable(map, chip, adr);
1889
1890         /* Clear the status register first */
1891         map_write(map, CMD(0x50), adr);
1892
1893         /* Now erase */
1894         map_write(map, CMD(0x20), adr);
1895         map_write(map, CMD(0xD0), adr);
1896         chip->state = FL_ERASING;
1897         chip->erase_suspended = 0;
1898
1899         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1900                                    adr, len,
1901                                    chip->erase_time,
1902                                    chip->erase_time_max);
1903         if (ret) {
1904                 map_write(map, CMD(0x70), adr);
1905                 chip->state = FL_STATUS;
1906                 xip_enable(map, chip, adr);
1907                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1908                 goto out;
1909         }
1910
1911         /* We've broken this before. It doesn't hurt to be safe */
1912         map_write(map, CMD(0x70), adr);
1913         chip->state = FL_STATUS;
1914         status = map_read(map, adr);
1915
1916         /* check for errors */
1917         if (map_word_bitsset(map, status, CMD(0x3a))) {
1918                 unsigned long chipstatus = MERGESTATUS(status);
1919
1920                 /* Reset the error bits */
1921                 map_write(map, CMD(0x50), adr);
1922                 map_write(map, CMD(0x70), adr);
1923                 xip_enable(map, chip, adr);
1924
1925                 if ((chipstatus & 0x30) == 0x30) {
1926                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1927                         ret = -EINVAL;
1928                 } else if (chipstatus & 0x02) {
1929                         /* Protection bit set */
1930                         ret = -EROFS;
1931                 } else if (chipstatus & 0x8) {
1932                         /* Voltage */
1933                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1934                         ret = -EIO;
1935                 } else if (chipstatus & 0x20 && retries--) {
1936                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1937                         put_chip(map, chip, adr);
1938                         mutex_unlock(&chip->mutex);
1939                         goto retry;
1940                 } else {
1941                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1942                         ret = -EIO;
1943                 }
1944
1945                 goto out;
1946         }
1947
1948         xip_enable(map, chip, adr);
1949  out:   put_chip(map, chip, adr);
1950         mutex_unlock(&chip->mutex);
1951         return ret;
1952 }
1953
1954 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1955 {
1956         unsigned long ofs, len;
1957         int ret;
1958
1959         ofs = instr->addr;
1960         len = instr->len;
1961
1962         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1963         if (ret)
1964                 return ret;
1965
1966         instr->state = MTD_ERASE_DONE;
1967         mtd_erase_callback(instr);
1968
1969         return 0;
1970 }
1971
1972 static void cfi_intelext_sync (struct mtd_info *mtd)
1973 {
1974         struct map_info *map = mtd->priv;
1975         struct cfi_private *cfi = map->fldrv_priv;
1976         int i;
1977         struct flchip *chip;
1978         int ret = 0;
1979
1980         for (i=0; !ret && i<cfi->numchips; i++) {
1981                 chip = &cfi->chips[i];
1982
1983                 mutex_lock(&chip->mutex);
1984                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1985
1986                 if (!ret) {
1987                         chip->oldstate = chip->state;
1988                         chip->state = FL_SYNCING;
1989                         /* No need to wake_up() on this state change -
1990                          * as the whole point is that nobody can do anything
1991                          * with the chip now anyway.
1992                          */
1993                 }
1994                 mutex_unlock(&chip->mutex);
1995         }
1996
1997         /* Unlock the chips again */
1998
1999         for (i--; i >=0; i--) {
2000                 chip = &cfi->chips[i];
2001
2002                 mutex_lock(&chip->mutex);
2003
2004                 if (chip->state == FL_SYNCING) {
2005                         chip->state = chip->oldstate;
2006                         chip->oldstate = FL_READY;
2007                         wake_up(&chip->wq);
2008                 }
2009                 mutex_unlock(&chip->mutex);
2010         }
2011 }
2012
2013 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2014                                                 struct flchip *chip,
2015                                                 unsigned long adr,
2016                                                 int len, void *thunk)
2017 {
2018         struct cfi_private *cfi = map->fldrv_priv;
2019         int status, ofs_factor = cfi->interleave * cfi->device_type;
2020
2021         adr += chip->start;
2022         xip_disable(map, chip, adr+(2*ofs_factor));
2023         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2024         chip->state = FL_JEDEC_QUERY;
2025         status = cfi_read_query(map, adr+(2*ofs_factor));
2026         xip_enable(map, chip, 0);
2027         return status;
2028 }
2029
2030 #ifdef DEBUG_LOCK_BITS
2031 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2032                                                 struct flchip *chip,
2033                                                 unsigned long adr,
2034                                                 int len, void *thunk)
2035 {
2036         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2037                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2038         return 0;
2039 }
2040 #endif
2041
2042 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2043 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2044
2045 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2046                                        unsigned long adr, int len, void *thunk)
2047 {
2048         struct cfi_private *cfi = map->fldrv_priv;
2049         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2050         int udelay;
2051         int ret;
2052
2053         adr += chip->start;
2054
2055         mutex_lock(&chip->mutex);
2056         ret = get_chip(map, chip, adr, FL_LOCKING);
2057         if (ret) {
2058                 mutex_unlock(&chip->mutex);
2059                 return ret;
2060         }
2061
2062         ENABLE_VPP(map);
2063         xip_disable(map, chip, adr);
2064
2065         map_write(map, CMD(0x60), adr);
2066         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2067                 map_write(map, CMD(0x01), adr);
2068                 chip->state = FL_LOCKING;
2069         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2070                 map_write(map, CMD(0xD0), adr);
2071                 chip->state = FL_UNLOCKING;
2072         } else
2073                 BUG();
2074
2075         /*
2076          * If Instant Individual Block Locking supported then no need
2077          * to delay.
2078          */
2079         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2080
2081         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2082         if (ret) {
2083                 map_write(map, CMD(0x70), adr);
2084                 chip->state = FL_STATUS;
2085                 xip_enable(map, chip, adr);
2086                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2087                 goto out;
2088         }
2089
2090         xip_enable(map, chip, adr);
2091 out:    put_chip(map, chip, adr);
2092         mutex_unlock(&chip->mutex);
2093         return ret;
2094 }
2095
2096 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2097 {
2098         int ret;
2099
2100 #ifdef DEBUG_LOCK_BITS
2101         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2102                __func__, ofs, len);
2103         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2104                 ofs, len, NULL);
2105 #endif
2106
2107         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2108                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2109
2110 #ifdef DEBUG_LOCK_BITS
2111         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2112                __func__, ret);
2113         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2114                 ofs, len, NULL);
2115 #endif
2116
2117         return ret;
2118 }
2119
2120 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2121 {
2122         int ret;
2123
2124 #ifdef DEBUG_LOCK_BITS
2125         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2126                __func__, ofs, len);
2127         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2128                 ofs, len, NULL);
2129 #endif
2130
2131         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2132                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2133
2134 #ifdef DEBUG_LOCK_BITS
2135         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2136                __func__, ret);
2137         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2138                 ofs, len, NULL);
2139 #endif
2140
2141         return ret;
2142 }
2143
2144 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2145                                   uint64_t len)
2146 {
2147         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2148                                 ofs, len, NULL) ? 1 : 0;
2149 }
2150
2151 #ifdef CONFIG_MTD_OTP
2152
2153 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2154                         u_long data_offset, u_char *buf, u_int size,
2155                         u_long prot_offset, u_int groupno, u_int groupsize);
2156
2157 static int __xipram
2158 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2159             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2160 {
2161         struct cfi_private *cfi = map->fldrv_priv;
2162         int ret;
2163
2164         mutex_lock(&chip->mutex);
2165         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2166         if (ret) {
2167                 mutex_unlock(&chip->mutex);
2168                 return ret;
2169         }
2170
2171         /* let's ensure we're not reading back cached data from array mode */
2172         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2173
2174         xip_disable(map, chip, chip->start);
2175         if (chip->state != FL_JEDEC_QUERY) {
2176                 map_write(map, CMD(0x90), chip->start);
2177                 chip->state = FL_JEDEC_QUERY;
2178         }
2179         map_copy_from(map, buf, chip->start + offset, size);
2180         xip_enable(map, chip, chip->start);
2181
2182         /* then ensure we don't keep OTP data in the cache */
2183         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2184
2185         put_chip(map, chip, chip->start);
2186         mutex_unlock(&chip->mutex);
2187         return 0;
2188 }
2189
2190 static int
2191 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2192              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2193 {
2194         int ret;
2195
2196         while (size) {
2197                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2198                 int gap = offset - bus_ofs;
2199                 int n = min_t(int, size, map_bankwidth(map)-gap);
2200                 map_word datum = map_word_ff(map);
2201
2202                 datum = map_word_load_partial(map, datum, buf, gap, n);
2203                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2204                 if (ret)
2205                         return ret;
2206
2207                 offset += n;
2208                 buf += n;
2209                 size -= n;
2210         }
2211
2212         return 0;
2213 }
2214
2215 static int
2216 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2217             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2218 {
2219         struct cfi_private *cfi = map->fldrv_priv;
2220         map_word datum;
2221
2222         /* make sure area matches group boundaries */
2223         if (size != grpsz)
2224                 return -EXDEV;
2225
2226         datum = map_word_ff(map);
2227         datum = map_word_clr(map, datum, CMD(1 << grpno));
2228         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2229 }
2230
2231 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2232                                  size_t *retlen, u_char *buf,
2233                                  otp_op_t action, int user_regs)
2234 {
2235         struct map_info *map = mtd->priv;
2236         struct cfi_private *cfi = map->fldrv_priv;
2237         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2238         struct flchip *chip;
2239         struct cfi_intelext_otpinfo *otp;
2240         u_long devsize, reg_prot_offset, data_offset;
2241         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2242         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2243         int ret;
2244
2245         *retlen = 0;
2246
2247         /* Check that we actually have some OTP registers */
2248         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2249                 return -ENODATA;
2250
2251         /* we need real chips here not virtual ones */
2252         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2253         chip_step = devsize >> cfi->chipshift;
2254         chip_num = 0;
2255
2256         /* Some chips have OTP located in the _top_ partition only.
2257            For example: Intel 28F256L18T (T means top-parameter device) */
2258         if (cfi->mfr == CFI_MFR_INTEL) {
2259                 switch (cfi->id) {
2260                 case 0x880b:
2261                 case 0x880c:
2262                 case 0x880d:
2263                         chip_num = chip_step - 1;
2264                 }
2265         }
2266
2267         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2268                 chip = &cfi->chips[chip_num];
2269                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2270
2271                 /* first OTP region */
2272                 field = 0;
2273                 reg_prot_offset = extp->ProtRegAddr;
2274                 reg_fact_groups = 1;
2275                 reg_fact_size = 1 << extp->FactProtRegSize;
2276                 reg_user_groups = 1;
2277                 reg_user_size = 1 << extp->UserProtRegSize;
2278
2279                 while (len > 0) {
2280                         /* flash geometry fixup */
2281                         data_offset = reg_prot_offset + 1;
2282                         data_offset *= cfi->interleave * cfi->device_type;
2283                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2284                         reg_fact_size *= cfi->interleave;
2285                         reg_user_size *= cfi->interleave;
2286
2287                         if (user_regs) {
2288                                 groups = reg_user_groups;
2289                                 groupsize = reg_user_size;
2290                                 /* skip over factory reg area */
2291                                 groupno = reg_fact_groups;
2292                                 data_offset += reg_fact_groups * reg_fact_size;
2293                         } else {
2294                                 groups = reg_fact_groups;
2295                                 groupsize = reg_fact_size;
2296                                 groupno = 0;
2297                         }
2298
2299                         while (len > 0 && groups > 0) {
2300                                 if (!action) {
2301                                         /*
2302                                          * Special case: if action is NULL
2303                                          * we fill buf with otp_info records.
2304                                          */
2305                                         struct otp_info *otpinfo;
2306                                         map_word lockword;
2307                                         len -= sizeof(struct otp_info);
2308                                         if (len <= 0)
2309                                                 return -ENOSPC;
2310                                         ret = do_otp_read(map, chip,
2311                                                           reg_prot_offset,
2312                                                           (u_char *)&lockword,
2313                                                           map_bankwidth(map),
2314                                                           0, 0,  0);
2315                                         if (ret)
2316                                                 return ret;
2317                                         otpinfo = (struct otp_info *)buf;
2318                                         otpinfo->start = from;
2319                                         otpinfo->length = groupsize;
2320                                         otpinfo->locked =
2321                                            !map_word_bitsset(map, lockword,
2322                                                              CMD(1 << groupno));
2323                                         from += groupsize;
2324                                         buf += sizeof(*otpinfo);
2325                                         *retlen += sizeof(*otpinfo);
2326                                 } else if (from >= groupsize) {
2327                                         from -= groupsize;
2328                                         data_offset += groupsize;
2329                                 } else {
2330                                         int size = groupsize;
2331                                         data_offset += from;
2332                                         size -= from;
2333                                         from = 0;
2334                                         if (size > len)
2335                                                 size = len;
2336                                         ret = action(map, chip, data_offset,
2337                                                      buf, size, reg_prot_offset,
2338                                                      groupno, groupsize);
2339                                         if (ret < 0)
2340                                                 return ret;
2341                                         buf += size;
2342                                         len -= size;
2343                                         *retlen += size;
2344                                         data_offset += size;
2345                                 }
2346                                 groupno++;
2347                                 groups--;
2348                         }
2349
2350                         /* next OTP region */
2351                         if (++field == extp->NumProtectionFields)
2352                                 break;
2353                         reg_prot_offset = otp->ProtRegAddr;
2354                         reg_fact_groups = otp->FactGroups;
2355                         reg_fact_size = 1 << otp->FactProtRegSize;
2356                         reg_user_groups = otp->UserGroups;
2357                         reg_user_size = 1 << otp->UserProtRegSize;
2358                         otp++;
2359                 }
2360         }
2361
2362         return 0;
2363 }
2364
2365 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2366                                            size_t len, size_t *retlen,
2367                                             u_char *buf)
2368 {
2369         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2370                                      buf, do_otp_read, 0);
2371 }
2372
2373 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2374                                            size_t len, size_t *retlen,
2375                                             u_char *buf)
2376 {
2377         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2378                                      buf, do_otp_read, 1);
2379 }
2380
2381 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2382                                             size_t len, size_t *retlen,
2383                                              u_char *buf)
2384 {
2385         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2386                                      buf, do_otp_write, 1);
2387 }
2388
2389 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2390                                            loff_t from, size_t len)
2391 {
2392         size_t retlen;
2393         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2394                                      NULL, do_otp_lock, 1);
2395 }
2396
2397 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2398                                            struct otp_info *buf, size_t len)
2399 {
2400         size_t retlen;
2401         int ret;
2402
2403         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2404         return ret ? : retlen;
2405 }
2406
2407 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2408                                            struct otp_info *buf, size_t len)
2409 {
2410         size_t retlen;
2411         int ret;
2412
2413         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2414         return ret ? : retlen;
2415 }
2416
2417 #endif
2418
2419 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2420 {
2421         struct mtd_erase_region_info *region;
2422         int block, status, i;
2423         unsigned long adr;
2424         size_t len;
2425
2426         for (i = 0; i < mtd->numeraseregions; i++) {
2427                 region = &mtd->eraseregions[i];
2428                 if (!region->lockmap)
2429                         continue;
2430
2431                 for (block = 0; block < region->numblocks; block++){
2432                         len = region->erasesize;
2433                         adr = region->offset + block * len;
2434
2435                         status = cfi_varsize_frob(mtd,
2436                                         do_getlockstatus_oneblock, adr, len, NULL);
2437                         if (status)
2438                                 set_bit(block, region->lockmap);
2439                         else
2440                                 clear_bit(block, region->lockmap);
2441                 }
2442         }
2443 }
2444
2445 static int cfi_intelext_suspend(struct mtd_info *mtd)
2446 {
2447         struct map_info *map = mtd->priv;
2448         struct cfi_private *cfi = map->fldrv_priv;
2449         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2450         int i;
2451         struct flchip *chip;
2452         int ret = 0;
2453
2454         if ((mtd->flags & MTD_POWERUP_LOCK)
2455             && extp && (extp->FeatureSupport & (1 << 5)))
2456                 cfi_intelext_save_locks(mtd);
2457
2458         for (i=0; !ret && i<cfi->numchips; i++) {
2459                 chip = &cfi->chips[i];
2460
2461                 mutex_lock(&chip->mutex);
2462
2463                 switch (chip->state) {
2464                 case FL_READY:
2465                 case FL_STATUS:
2466                 case FL_CFI_QUERY:
2467                 case FL_JEDEC_QUERY:
2468                         if (chip->oldstate == FL_READY) {
2469                                 /* place the chip in a known state before suspend */
2470                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2471                                 chip->oldstate = chip->state;
2472                                 chip->state = FL_PM_SUSPENDED;
2473                                 /* No need to wake_up() on this state change -
2474                                  * as the whole point is that nobody can do anything
2475                                  * with the chip now anyway.
2476                                  */
2477                         } else {
2478                                 /* There seems to be an operation pending. We must wait for it. */
2479                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2480                                 ret = -EAGAIN;
2481                         }
2482                         break;
2483                 default:
2484                         /* Should we actually wait? Once upon a time these routines weren't
2485                            allowed to. Or should we return -EAGAIN, because the upper layers
2486                            ought to have already shut down anything which was using the device
2487                            anyway? The latter for now. */
2488                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2489                         ret = -EAGAIN;
2490                 case FL_PM_SUSPENDED:
2491                         break;
2492                 }
2493                 mutex_unlock(&chip->mutex);
2494         }
2495
2496         /* Unlock the chips again */
2497
2498         if (ret) {
2499                 for (i--; i >=0; i--) {
2500                         chip = &cfi->chips[i];
2501
2502                         mutex_lock(&chip->mutex);
2503
2504                         if (chip->state == FL_PM_SUSPENDED) {
2505                                 /* No need to force it into a known state here,
2506                                    because we're returning failure, and it didn't
2507                                    get power cycled */
2508                                 chip->state = chip->oldstate;
2509                                 chip->oldstate = FL_READY;
2510                                 wake_up(&chip->wq);
2511                         }
2512                         mutex_unlock(&chip->mutex);
2513                 }
2514         }
2515
2516         return ret;
2517 }
2518
2519 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2520 {
2521         struct mtd_erase_region_info *region;
2522         int block, i;
2523         unsigned long adr;
2524         size_t len;
2525
2526         for (i = 0; i < mtd->numeraseregions; i++) {
2527                 region = &mtd->eraseregions[i];
2528                 if (!region->lockmap)
2529                         continue;
2530
2531                 for (block = 0; block < region->numblocks; block++) {
2532                         len = region->erasesize;
2533                         adr = region->offset + block * len;
2534
2535                         if (!test_bit(block, region->lockmap))
2536                                 cfi_intelext_unlock(mtd, adr, len);
2537                 }
2538         }
2539 }
2540
2541 static void cfi_intelext_resume(struct mtd_info *mtd)
2542 {
2543         struct map_info *map = mtd->priv;
2544         struct cfi_private *cfi = map->fldrv_priv;
2545         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2546         int i;
2547         struct flchip *chip;
2548
2549         for (i=0; i<cfi->numchips; i++) {
2550
2551                 chip = &cfi->chips[i];
2552
2553                 mutex_lock(&chip->mutex);
2554
2555                 /* Go to known state. Chip may have been power cycled */
2556                 if (chip->state == FL_PM_SUSPENDED) {
2557                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2558                         chip->oldstate = chip->state = FL_READY;
2559                         wake_up(&chip->wq);
2560                 }
2561
2562                 mutex_unlock(&chip->mutex);
2563         }
2564
2565         if ((mtd->flags & MTD_POWERUP_LOCK)
2566             && extp && (extp->FeatureSupport & (1 << 5)))
2567                 cfi_intelext_restore_locks(mtd);
2568 }
2569
2570 static int cfi_intelext_reset(struct mtd_info *mtd)
2571 {
2572         struct map_info *map = mtd->priv;
2573         struct cfi_private *cfi = map->fldrv_priv;
2574         int i, ret;
2575
2576         for (i=0; i < cfi->numchips; i++) {
2577                 struct flchip *chip = &cfi->chips[i];
2578
2579                 /* force the completion of any ongoing operation
2580                    and switch to array mode so any bootloader in
2581                    flash is accessible for soft reboot. */
2582                 mutex_lock(&chip->mutex);
2583                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2584                 if (!ret) {
2585                         map_write(map, CMD(0xff), chip->start);
2586                         chip->state = FL_SHUTDOWN;
2587                         put_chip(map, chip, chip->start);
2588                 }
2589                 mutex_unlock(&chip->mutex);
2590         }
2591
2592         return 0;
2593 }
2594
2595 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2596                                void *v)
2597 {
2598         struct mtd_info *mtd;
2599
2600         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2601         cfi_intelext_reset(mtd);
2602         return NOTIFY_DONE;
2603 }
2604
2605 static void cfi_intelext_destroy(struct mtd_info *mtd)
2606 {
2607         struct map_info *map = mtd->priv;
2608         struct cfi_private *cfi = map->fldrv_priv;
2609         struct mtd_erase_region_info *region;
2610         int i;
2611         cfi_intelext_reset(mtd);
2612         unregister_reboot_notifier(&mtd->reboot_notifier);
2613         kfree(cfi->cmdset_priv);
2614         kfree(cfi->cfiq);
2615         kfree(cfi->chips[0].priv);
2616         kfree(cfi);
2617         for (i = 0; i < mtd->numeraseregions; i++) {
2618                 region = &mtd->eraseregions[i];
2619                 if (region->lockmap)
2620                         kfree(region->lockmap);
2621         }
2622         kfree(mtd->eraseregions);
2623 }
2624
2625 MODULE_LICENSE("GPL");
2626 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2627 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2628 MODULE_ALIAS("cfi_cmdset_0003");
2629 MODULE_ALIAS("cfi_cmdset_0200");