]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/chips/cfi_cmdset_0001.c
mtd: fix 'Flash device refused suspend due to active operation' message
[karo-tx-linux.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 /* Intel chips */
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define PF38F4476       0x881c
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56
57 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
58 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
59 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
60 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
61 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
62 static void cfi_intelext_sync (struct mtd_info *);
63 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
64 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
65 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
66                                   uint64_t len);
67 #ifdef CONFIG_MTD_OTP
68 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
69 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
70 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
71 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
72 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
73                                             struct otp_info *, size_t);
74 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
75                                             struct otp_info *, size_t);
76 #endif
77 static int cfi_intelext_suspend (struct mtd_info *);
78 static void cfi_intelext_resume (struct mtd_info *);
79 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
80
81 static void cfi_intelext_destroy(struct mtd_info *);
82
83 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
84
85 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
86 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
87
88 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
89                      size_t *retlen, void **virt, resource_size_t *phys);
90 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
91
92 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
93 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
94 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
95 #include "fwh_lock.h"
96
97
98
99 /*
100  *  *********** SETUP AND PROBE BITS  ***********
101  */
102
103 static struct mtd_chip_driver cfi_intelext_chipdrv = {
104         .probe          = NULL, /* Not usable directly */
105         .destroy        = cfi_intelext_destroy,
106         .name           = "cfi_cmdset_0001",
107         .module         = THIS_MODULE
108 };
109
110 /* #define DEBUG_LOCK_BITS */
111 /* #define DEBUG_CFI_FEATURES */
112
113 #ifdef DEBUG_CFI_FEATURES
114 static void cfi_tell_features(struct cfi_pri_intelext *extp)
115 {
116         int i;
117         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
118         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
119         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
120         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
121         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
122         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
123         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
124         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
125         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
126         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
127         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
128         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
129         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
130         for (i=11; i<32; i++) {
131                 if (extp->FeatureSupport & (1<<i))
132                         printk("     - Unknown Bit %X:      supported\n", i);
133         }
134
135         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
136         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
137         for (i=1; i<8; i++) {
138                 if (extp->SuspendCmdSupport & (1<<i))
139                         printk("     - Unknown Bit %X:               supported\n", i);
140         }
141
142         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
143         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
144         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
145         for (i=2; i<3; i++) {
146                 if (extp->BlkStatusRegMask & (1<<i))
147                         printk("     - Unknown Bit %X Active: yes\n",i);
148         }
149         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
150         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
151         for (i=6; i<16; i++) {
152                 if (extp->BlkStatusRegMask & (1<<i))
153                         printk("     - Unknown Bit %X Active: yes\n",i);
154         }
155
156         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
157                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
158         if (extp->VppOptimal)
159                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
160                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
161 }
162 #endif
163
164 /* Atmel chips don't use the same PRI format as Intel chips */
165 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
166 {
167         struct map_info *map = mtd->priv;
168         struct cfi_private *cfi = map->fldrv_priv;
169         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
170         struct cfi_pri_atmel atmel_pri;
171         uint32_t features = 0;
172
173         /* Reverse byteswapping */
174         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
175         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
176         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
177
178         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
179         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
180
181         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
182
183         if (atmel_pri.Features & 0x01) /* chip erase supported */
184                 features |= (1<<0);
185         if (atmel_pri.Features & 0x02) /* erase suspend supported */
186                 features |= (1<<1);
187         if (atmel_pri.Features & 0x04) /* program suspend supported */
188                 features |= (1<<2);
189         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
190                 features |= (1<<9);
191         if (atmel_pri.Features & 0x20) /* page mode read supported */
192                 features |= (1<<7);
193         if (atmel_pri.Features & 0x40) /* queued erase supported */
194                 features |= (1<<4);
195         if (atmel_pri.Features & 0x80) /* Protection bits supported */
196                 features |= (1<<6);
197
198         extp->FeatureSupport = features;
199
200         /* burst write mode not supported */
201         cfi->cfiq->BufWriteTimeoutTyp = 0;
202         cfi->cfiq->BufWriteTimeoutMax = 0;
203 }
204
205 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
206 {
207         struct map_info *map = mtd->priv;
208         struct cfi_private *cfi = map->fldrv_priv;
209         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
210
211         cfip->FeatureSupport |= (1 << 5);
212         mtd->flags |= MTD_POWERUP_LOCK;
213 }
214
215 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
216 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
217 static void fixup_intel_strataflash(struct mtd_info *mtd)
218 {
219         struct map_info *map = mtd->priv;
220         struct cfi_private *cfi = map->fldrv_priv;
221         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
222
223         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
224                             "erase on write disabled.\n");
225         extp->SuspendCmdSupport &= ~1;
226 }
227 #endif
228
229 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
230 static void fixup_no_write_suspend(struct mtd_info *mtd)
231 {
232         struct map_info *map = mtd->priv;
233         struct cfi_private *cfi = map->fldrv_priv;
234         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
235
236         if (cfip && (cfip->FeatureSupport&4)) {
237                 cfip->FeatureSupport &= ~4;
238                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
239         }
240 }
241 #endif
242
243 static void fixup_st_m28w320ct(struct mtd_info *mtd)
244 {
245         struct map_info *map = mtd->priv;
246         struct cfi_private *cfi = map->fldrv_priv;
247
248         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
249         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
250 }
251
252 static void fixup_st_m28w320cb(struct mtd_info *mtd)
253 {
254         struct map_info *map = mtd->priv;
255         struct cfi_private *cfi = map->fldrv_priv;
256
257         /* Note this is done after the region info is endian swapped */
258         cfi->cfiq->EraseRegionInfo[1] =
259                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
260 };
261
262 static void fixup_use_point(struct mtd_info *mtd)
263 {
264         struct map_info *map = mtd->priv;
265         if (!mtd->_point && map_is_linear(map)) {
266                 mtd->_point   = cfi_intelext_point;
267                 mtd->_unpoint = cfi_intelext_unpoint;
268         }
269 }
270
271 static void fixup_use_write_buffers(struct mtd_info *mtd)
272 {
273         struct map_info *map = mtd->priv;
274         struct cfi_private *cfi = map->fldrv_priv;
275         if (cfi->cfiq->BufWriteTimeoutTyp) {
276                 printk(KERN_INFO "Using buffer write method\n" );
277                 mtd->_write = cfi_intelext_write_buffers;
278                 mtd->_writev = cfi_intelext_writev;
279         }
280 }
281
282 /*
283  * Some chips power-up with all sectors locked by default.
284  */
285 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
286 {
287         struct map_info *map = mtd->priv;
288         struct cfi_private *cfi = map->fldrv_priv;
289         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
290
291         if (cfip->FeatureSupport&32) {
292                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
293                 mtd->flags |= MTD_POWERUP_LOCK;
294         }
295 }
296
297 static struct cfi_fixup cfi_fixup_table[] = {
298         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
299         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
300         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
301 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
302         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
303 #endif
304 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
305         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
306 #endif
307 #if !FORCE_WORD_WRITE
308         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
309 #endif
310         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
311         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
312         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
313         { 0, 0, NULL }
314 };
315
316 static struct cfi_fixup jedec_fixup_table[] = {
317         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
318         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
319         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
320         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
321         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
322         { 0, 0, NULL }
323 };
324 static struct cfi_fixup fixup_table[] = {
325         /* The CFI vendor ids and the JEDEC vendor IDs appear
326          * to be common.  It is like the devices id's are as
327          * well.  This table is to pick all cases where
328          * we know that is the case.
329          */
330         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
331         { 0, 0, NULL }
332 };
333
334 static void cfi_fixup_major_minor(struct cfi_private *cfi,
335                                                 struct cfi_pri_intelext *extp)
336 {
337         if (cfi->mfr == CFI_MFR_INTEL &&
338                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
339                 extp->MinorVersion = '1';
340 }
341
342 static inline struct cfi_pri_intelext *
343 read_pri_intelext(struct map_info *map, __u16 adr)
344 {
345         struct cfi_private *cfi = map->fldrv_priv;
346         struct cfi_pri_intelext *extp;
347         unsigned int extra_size = 0;
348         unsigned int extp_size = sizeof(*extp);
349
350  again:
351         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
352         if (!extp)
353                 return NULL;
354
355         cfi_fixup_major_minor(cfi, extp);
356
357         if (extp->MajorVersion != '1' ||
358             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
359                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
360                        "version %c.%c.\n",  extp->MajorVersion,
361                        extp->MinorVersion);
362                 kfree(extp);
363                 return NULL;
364         }
365
366         /* Do some byteswapping if necessary */
367         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
368         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
369         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
370
371         if (extp->MinorVersion >= '0') {
372                 extra_size = 0;
373
374                 /* Protection Register info */
375                 extra_size += (extp->NumProtectionFields - 1) *
376                               sizeof(struct cfi_intelext_otpinfo);
377         }
378
379         if (extp->MinorVersion >= '1') {
380                 /* Burst Read info */
381                 extra_size += 2;
382                 if (extp_size < sizeof(*extp) + extra_size)
383                         goto need_more;
384                 extra_size += extp->extra[extra_size - 1];
385         }
386
387         if (extp->MinorVersion >= '3') {
388                 int nb_parts, i;
389
390                 /* Number of hardware-partitions */
391                 extra_size += 1;
392                 if (extp_size < sizeof(*extp) + extra_size)
393                         goto need_more;
394                 nb_parts = extp->extra[extra_size - 1];
395
396                 /* skip the sizeof(partregion) field in CFI 1.4 */
397                 if (extp->MinorVersion >= '4')
398                         extra_size += 2;
399
400                 for (i = 0; i < nb_parts; i++) {
401                         struct cfi_intelext_regioninfo *rinfo;
402                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
403                         extra_size += sizeof(*rinfo);
404                         if (extp_size < sizeof(*extp) + extra_size)
405                                 goto need_more;
406                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
407                         extra_size += (rinfo->NumBlockTypes - 1)
408                                       * sizeof(struct cfi_intelext_blockinfo);
409                 }
410
411                 if (extp->MinorVersion >= '4')
412                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
413
414                 if (extp_size < sizeof(*extp) + extra_size) {
415                         need_more:
416                         extp_size = sizeof(*extp) + extra_size;
417                         kfree(extp);
418                         if (extp_size > 4096) {
419                                 printk(KERN_ERR
420                                         "%s: cfi_pri_intelext is too fat\n",
421                                         __func__);
422                                 return NULL;
423                         }
424                         goto again;
425                 }
426         }
427
428         return extp;
429 }
430
431 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
432 {
433         struct cfi_private *cfi = map->fldrv_priv;
434         struct mtd_info *mtd;
435         int i;
436
437         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
438         if (!mtd) {
439                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
440                 return NULL;
441         }
442         mtd->priv = map;
443         mtd->type = MTD_NORFLASH;
444
445         /* Fill in the default mtd operations */
446         mtd->_erase   = cfi_intelext_erase_varsize;
447         mtd->_read    = cfi_intelext_read;
448         mtd->_write   = cfi_intelext_write_words;
449         mtd->_sync    = cfi_intelext_sync;
450         mtd->_lock    = cfi_intelext_lock;
451         mtd->_unlock  = cfi_intelext_unlock;
452         mtd->_is_locked = cfi_intelext_is_locked;
453         mtd->_suspend = cfi_intelext_suspend;
454         mtd->_resume  = cfi_intelext_resume;
455         mtd->flags   = MTD_CAP_NORFLASH;
456         mtd->name    = map->name;
457         mtd->writesize = 1;
458         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
459
460         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
461
462         if (cfi->cfi_mode == CFI_MODE_CFI) {
463                 /*
464                  * It's a real CFI chip, not one for which the probe
465                  * routine faked a CFI structure. So we read the feature
466                  * table from it.
467                  */
468                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
469                 struct cfi_pri_intelext *extp;
470
471                 extp = read_pri_intelext(map, adr);
472                 if (!extp) {
473                         kfree(mtd);
474                         return NULL;
475                 }
476
477                 /* Install our own private info structure */
478                 cfi->cmdset_priv = extp;
479
480                 cfi_fixup(mtd, cfi_fixup_table);
481
482 #ifdef DEBUG_CFI_FEATURES
483                 /* Tell the user about it in lots of lovely detail */
484                 cfi_tell_features(extp);
485 #endif
486
487                 if(extp->SuspendCmdSupport & 1) {
488                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
489                 }
490         }
491         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
492                 /* Apply jedec specific fixups */
493                 cfi_fixup(mtd, jedec_fixup_table);
494         }
495         /* Apply generic fixups */
496         cfi_fixup(mtd, fixup_table);
497
498         for (i=0; i< cfi->numchips; i++) {
499                 if (cfi->cfiq->WordWriteTimeoutTyp)
500                         cfi->chips[i].word_write_time =
501                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
502                 else
503                         cfi->chips[i].word_write_time = 50000;
504
505                 if (cfi->cfiq->BufWriteTimeoutTyp)
506                         cfi->chips[i].buffer_write_time =
507                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
508                 /* No default; if it isn't specified, we won't use it */
509
510                 if (cfi->cfiq->BlockEraseTimeoutTyp)
511                         cfi->chips[i].erase_time =
512                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
513                 else
514                         cfi->chips[i].erase_time = 2000000;
515
516                 if (cfi->cfiq->WordWriteTimeoutTyp &&
517                     cfi->cfiq->WordWriteTimeoutMax)
518                         cfi->chips[i].word_write_time_max =
519                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
520                                     cfi->cfiq->WordWriteTimeoutMax);
521                 else
522                         cfi->chips[i].word_write_time_max = 50000 * 8;
523
524                 if (cfi->cfiq->BufWriteTimeoutTyp &&
525                     cfi->cfiq->BufWriteTimeoutMax)
526                         cfi->chips[i].buffer_write_time_max =
527                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
528                                     cfi->cfiq->BufWriteTimeoutMax);
529
530                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
531                     cfi->cfiq->BlockEraseTimeoutMax)
532                         cfi->chips[i].erase_time_max =
533                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
534                                        cfi->cfiq->BlockEraseTimeoutMax);
535                 else
536                         cfi->chips[i].erase_time_max = 2000000 * 8;
537
538                 cfi->chips[i].ref_point_counter = 0;
539                 init_waitqueue_head(&(cfi->chips[i].wq));
540         }
541
542         map->fldrv = &cfi_intelext_chipdrv;
543
544         return cfi_intelext_setup(mtd);
545 }
546 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
547 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
548 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
549 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
550 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
551
552 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
553 {
554         struct map_info *map = mtd->priv;
555         struct cfi_private *cfi = map->fldrv_priv;
556         unsigned long offset = 0;
557         int i,j;
558         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
559
560         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
561
562         mtd->size = devsize * cfi->numchips;
563
564         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
565         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
566                         * mtd->numeraseregions, GFP_KERNEL);
567         if (!mtd->eraseregions) {
568                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
569                 goto setup_err;
570         }
571
572         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
573                 unsigned long ernum, ersize;
574                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
575                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
576
577                 if (mtd->erasesize < ersize) {
578                         mtd->erasesize = ersize;
579                 }
580                 for (j=0; j<cfi->numchips; j++) {
581                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
582                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
583                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
584                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
585                 }
586                 offset += (ersize * ernum);
587         }
588
589         if (offset != devsize) {
590                 /* Argh */
591                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
592                 goto setup_err;
593         }
594
595         for (i=0; i<mtd->numeraseregions;i++){
596                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
597                        i,(unsigned long long)mtd->eraseregions[i].offset,
598                        mtd->eraseregions[i].erasesize,
599                        mtd->eraseregions[i].numblocks);
600         }
601
602 #ifdef CONFIG_MTD_OTP
603         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
604         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
605         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
606         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
607         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
608         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
609 #endif
610
611         /* This function has the potential to distort the reality
612            a bit and therefore should be called last. */
613         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
614                 goto setup_err;
615
616         __module_get(THIS_MODULE);
617         register_reboot_notifier(&mtd->reboot_notifier);
618         return mtd;
619
620  setup_err:
621         kfree(mtd->eraseregions);
622         kfree(mtd);
623         kfree(cfi->cmdset_priv);
624         return NULL;
625 }
626
627 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
628                                         struct cfi_private **pcfi)
629 {
630         struct map_info *map = mtd->priv;
631         struct cfi_private *cfi = *pcfi;
632         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
633
634         /*
635          * Probing of multi-partition flash chips.
636          *
637          * To support multiple partitions when available, we simply arrange
638          * for each of them to have their own flchip structure even if they
639          * are on the same physical chip.  This means completely recreating
640          * a new cfi_private structure right here which is a blatent code
641          * layering violation, but this is still the least intrusive
642          * arrangement at this point. This can be rearranged in the future
643          * if someone feels motivated enough.  --nico
644          */
645         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
646             && extp->FeatureSupport & (1 << 9)) {
647                 struct cfi_private *newcfi;
648                 struct flchip *chip;
649                 struct flchip_shared *shared;
650                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
651
652                 /* Protection Register info */
653                 offs = (extp->NumProtectionFields - 1) *
654                        sizeof(struct cfi_intelext_otpinfo);
655
656                 /* Burst Read info */
657                 offs += extp->extra[offs+1]+2;
658
659                 /* Number of partition regions */
660                 numregions = extp->extra[offs];
661                 offs += 1;
662
663                 /* skip the sizeof(partregion) field in CFI 1.4 */
664                 if (extp->MinorVersion >= '4')
665                         offs += 2;
666
667                 /* Number of hardware partitions */
668                 numparts = 0;
669                 for (i = 0; i < numregions; i++) {
670                         struct cfi_intelext_regioninfo *rinfo;
671                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
672                         numparts += rinfo->NumIdentPartitions;
673                         offs += sizeof(*rinfo)
674                                 + (rinfo->NumBlockTypes - 1) *
675                                   sizeof(struct cfi_intelext_blockinfo);
676                 }
677
678                 if (!numparts)
679                         numparts = 1;
680
681                 /* Programming Region info */
682                 if (extp->MinorVersion >= '4') {
683                         struct cfi_intelext_programming_regioninfo *prinfo;
684                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
685                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
686                         mtd->flags &= ~MTD_BIT_WRITEABLE;
687                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
688                                map->name, mtd->writesize,
689                                cfi->interleave * prinfo->ControlValid,
690                                cfi->interleave * prinfo->ControlInvalid);
691                 }
692
693                 /*
694                  * All functions below currently rely on all chips having
695                  * the same geometry so we'll just assume that all hardware
696                  * partitions are of the same size too.
697                  */
698                 partshift = cfi->chipshift - __ffs(numparts);
699
700                 if ((1 << partshift) < mtd->erasesize) {
701                         printk( KERN_ERR
702                                 "%s: bad number of hw partitions (%d)\n",
703                                 __func__, numparts);
704                         return -EINVAL;
705                 }
706
707                 numvirtchips = cfi->numchips * numparts;
708                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
709                 if (!newcfi)
710                         return -ENOMEM;
711                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
712                 if (!shared) {
713                         kfree(newcfi);
714                         return -ENOMEM;
715                 }
716                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
717                 newcfi->numchips = numvirtchips;
718                 newcfi->chipshift = partshift;
719
720                 chip = &newcfi->chips[0];
721                 for (i = 0; i < cfi->numchips; i++) {
722                         shared[i].writing = shared[i].erasing = NULL;
723                         mutex_init(&shared[i].lock);
724                         for (j = 0; j < numparts; j++) {
725                                 *chip = cfi->chips[i];
726                                 chip->start += j << partshift;
727                                 chip->priv = &shared[i];
728                                 /* those should be reset too since
729                                    they create memory references. */
730                                 init_waitqueue_head(&chip->wq);
731                                 mutex_init(&chip->mutex);
732                                 chip++;
733                         }
734                 }
735
736                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
737                                   "--> %d partitions of %d KiB\n",
738                                   map->name, cfi->numchips, cfi->interleave,
739                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
740
741                 map->fldrv_priv = newcfi;
742                 *pcfi = newcfi;
743                 kfree(cfi);
744         }
745
746         return 0;
747 }
748
749 /*
750  *  *********** CHIP ACCESS FUNCTIONS ***********
751  */
752 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
753 {
754         DECLARE_WAITQUEUE(wait, current);
755         struct cfi_private *cfi = map->fldrv_priv;
756         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
757         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
758         unsigned long timeo = jiffies + HZ;
759
760         /* Prevent setting state FL_SYNCING for chip in suspended state. */
761         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
762                 goto sleep;
763
764         switch (chip->state) {
765
766         case FL_STATUS:
767                 for (;;) {
768                         status = map_read(map, adr);
769                         if (map_word_andequal(map, status, status_OK, status_OK))
770                                 break;
771
772                         /* At this point we're fine with write operations
773                            in other partitions as they don't conflict. */
774                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
775                                 break;
776
777                         mutex_unlock(&chip->mutex);
778                         cfi_udelay(1);
779                         mutex_lock(&chip->mutex);
780                         /* Someone else might have been playing with it. */
781                         return -EAGAIN;
782                 }
783                 /* Fall through */
784         case FL_READY:
785         case FL_CFI_QUERY:
786         case FL_JEDEC_QUERY:
787                 return 0;
788
789         case FL_ERASING:
790                 if (!cfip ||
791                     !(cfip->FeatureSupport & 2) ||
792                     !(mode == FL_READY || mode == FL_POINT ||
793                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
794                         goto sleep;
795
796
797                 /* Erase suspend */
798                 map_write(map, CMD(0xB0), adr);
799
800                 /* If the flash has finished erasing, then 'erase suspend'
801                  * appears to make some (28F320) flash devices switch to
802                  * 'read' mode.  Make sure that we switch to 'read status'
803                  * mode so we get the right data. --rmk
804                  */
805                 map_write(map, CMD(0x70), adr);
806                 chip->oldstate = FL_ERASING;
807                 chip->state = FL_ERASE_SUSPENDING;
808                 chip->erase_suspended = 1;
809                 for (;;) {
810                         status = map_read(map, adr);
811                         if (map_word_andequal(map, status, status_OK, status_OK))
812                                 break;
813
814                         if (time_after(jiffies, timeo)) {
815                                 /* Urgh. Resume and pretend we weren't here.
816                                  * Make sure we're in 'read status' mode if it had finished */
817                                 put_chip(map, chip, adr);
818                                 printk(KERN_ERR "%s: Chip not ready after erase "
819                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
820                                 return -EIO;
821                         }
822
823                         mutex_unlock(&chip->mutex);
824                         cfi_udelay(1);
825                         mutex_lock(&chip->mutex);
826                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
827                            So we can just loop here. */
828                 }
829                 chip->state = FL_STATUS;
830                 return 0;
831
832         case FL_XIP_WHILE_ERASING:
833                 if (mode != FL_READY && mode != FL_POINT &&
834                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
835                         goto sleep;
836                 chip->oldstate = chip->state;
837                 chip->state = FL_READY;
838                 return 0;
839
840         case FL_SHUTDOWN:
841                 /* The machine is rebooting now,so no one can get chip anymore */
842                 return -EIO;
843         case FL_POINT:
844                 /* Only if there's no operation suspended... */
845                 if (mode == FL_READY && chip->oldstate == FL_READY)
846                         return 0;
847                 /* Fall through */
848         default:
849         sleep:
850                 set_current_state(TASK_UNINTERRUPTIBLE);
851                 add_wait_queue(&chip->wq, &wait);
852                 mutex_unlock(&chip->mutex);
853                 schedule();
854                 remove_wait_queue(&chip->wq, &wait);
855                 mutex_lock(&chip->mutex);
856                 return -EAGAIN;
857         }
858 }
859
860 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
861 {
862         int ret;
863         DECLARE_WAITQUEUE(wait, current);
864
865  retry:
866         if (chip->priv &&
867             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
868             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
869                 /*
870                  * OK. We have possibility for contention on the write/erase
871                  * operations which are global to the real chip and not per
872                  * partition.  So let's fight it over in the partition which
873                  * currently has authority on the operation.
874                  *
875                  * The rules are as follows:
876                  *
877                  * - any write operation must own shared->writing.
878                  *
879                  * - any erase operation must own _both_ shared->writing and
880                  *   shared->erasing.
881                  *
882                  * - contention arbitration is handled in the owner's context.
883                  *
884                  * The 'shared' struct can be read and/or written only when
885                  * its lock is taken.
886                  */
887                 struct flchip_shared *shared = chip->priv;
888                 struct flchip *contender;
889                 mutex_lock(&shared->lock);
890                 contender = shared->writing;
891                 if (contender && contender != chip) {
892                         /*
893                          * The engine to perform desired operation on this
894                          * partition is already in use by someone else.
895                          * Let's fight over it in the context of the chip
896                          * currently using it.  If it is possible to suspend,
897                          * that other partition will do just that, otherwise
898                          * it'll happily send us to sleep.  In any case, when
899                          * get_chip returns success we're clear to go ahead.
900                          */
901                         ret = mutex_trylock(&contender->mutex);
902                         mutex_unlock(&shared->lock);
903                         if (!ret)
904                                 goto retry;
905                         mutex_unlock(&chip->mutex);
906                         ret = chip_ready(map, contender, contender->start, mode);
907                         mutex_lock(&chip->mutex);
908
909                         if (ret == -EAGAIN) {
910                                 mutex_unlock(&contender->mutex);
911                                 goto retry;
912                         }
913                         if (ret) {
914                                 mutex_unlock(&contender->mutex);
915                                 return ret;
916                         }
917                         mutex_lock(&shared->lock);
918
919                         /* We should not own chip if it is already
920                          * in FL_SYNCING state. Put contender and retry. */
921                         if (chip->state == FL_SYNCING) {
922                                 put_chip(map, contender, contender->start);
923                                 mutex_unlock(&contender->mutex);
924                                 goto retry;
925                         }
926                         mutex_unlock(&contender->mutex);
927                 }
928
929                 /* Check if we already have suspended erase
930                  * on this chip. Sleep. */
931                 if (mode == FL_ERASING && shared->erasing
932                     && shared->erasing->oldstate == FL_ERASING) {
933                         mutex_unlock(&shared->lock);
934                         set_current_state(TASK_UNINTERRUPTIBLE);
935                         add_wait_queue(&chip->wq, &wait);
936                         mutex_unlock(&chip->mutex);
937                         schedule();
938                         remove_wait_queue(&chip->wq, &wait);
939                         mutex_lock(&chip->mutex);
940                         goto retry;
941                 }
942
943                 /* We now own it */
944                 shared->writing = chip;
945                 if (mode == FL_ERASING)
946                         shared->erasing = chip;
947                 mutex_unlock(&shared->lock);
948         }
949         ret = chip_ready(map, chip, adr, mode);
950         if (ret == -EAGAIN)
951                 goto retry;
952
953         return ret;
954 }
955
956 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
957 {
958         struct cfi_private *cfi = map->fldrv_priv;
959
960         if (chip->priv) {
961                 struct flchip_shared *shared = chip->priv;
962                 mutex_lock(&shared->lock);
963                 if (shared->writing == chip && chip->oldstate == FL_READY) {
964                         /* We own the ability to write, but we're done */
965                         shared->writing = shared->erasing;
966                         if (shared->writing && shared->writing != chip) {
967                                 /* give back ownership to who we loaned it from */
968                                 struct flchip *loaner = shared->writing;
969                                 mutex_lock(&loaner->mutex);
970                                 mutex_unlock(&shared->lock);
971                                 mutex_unlock(&chip->mutex);
972                                 put_chip(map, loaner, loaner->start);
973                                 mutex_lock(&chip->mutex);
974                                 mutex_unlock(&loaner->mutex);
975                                 wake_up(&chip->wq);
976                                 return;
977                         }
978                         shared->erasing = NULL;
979                         shared->writing = NULL;
980                 } else if (shared->erasing == chip && shared->writing != chip) {
981                         /*
982                          * We own the ability to erase without the ability
983                          * to write, which means the erase was suspended
984                          * and some other partition is currently writing.
985                          * Don't let the switch below mess things up since
986                          * we don't have ownership to resume anything.
987                          */
988                         mutex_unlock(&shared->lock);
989                         wake_up(&chip->wq);
990                         return;
991                 }
992                 mutex_unlock(&shared->lock);
993         }
994
995         switch(chip->oldstate) {
996         case FL_ERASING:
997                 /* What if one interleaved chip has finished and the
998                    other hasn't? The old code would leave the finished
999                    one in READY mode. That's bad, and caused -EROFS
1000                    errors to be returned from do_erase_oneblock because
1001                    that's the only bit it checked for at the time.
1002                    As the state machine appears to explicitly allow
1003                    sending the 0x70 (Read Status) command to an erasing
1004                    chip and expecting it to be ignored, that's what we
1005                    do. */
1006                 map_write(map, CMD(0xd0), adr);
1007                 map_write(map, CMD(0x70), adr);
1008                 chip->oldstate = FL_READY;
1009                 chip->state = FL_ERASING;
1010                 break;
1011
1012         case FL_XIP_WHILE_ERASING:
1013                 chip->state = chip->oldstate;
1014                 chip->oldstate = FL_READY;
1015                 break;
1016
1017         case FL_READY:
1018         case FL_STATUS:
1019         case FL_JEDEC_QUERY:
1020                 /* We should really make set_vpp() count, rather than doing this */
1021                 DISABLE_VPP(map);
1022                 break;
1023         default:
1024                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1025         }
1026         wake_up(&chip->wq);
1027 }
1028
1029 #ifdef CONFIG_MTD_XIP
1030
1031 /*
1032  * No interrupt what so ever can be serviced while the flash isn't in array
1033  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1034  * enclosing any code path where the flash is known not to be in array mode.
1035  * And within a XIP disabled code path, only functions marked with __xipram
1036  * may be called and nothing else (it's a good thing to inspect generated
1037  * assembly to make sure inline functions were actually inlined and that gcc
1038  * didn't emit calls to its own support functions). Also configuring MTD CFI
1039  * support to a single buswidth and a single interleave is also recommended.
1040  */
1041
1042 static void xip_disable(struct map_info *map, struct flchip *chip,
1043                         unsigned long adr)
1044 {
1045         /* TODO: chips with no XIP use should ignore and return */
1046         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1047         local_irq_disable();
1048 }
1049
1050 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1051                                 unsigned long adr)
1052 {
1053         struct cfi_private *cfi = map->fldrv_priv;
1054         if (chip->state != FL_POINT && chip->state != FL_READY) {
1055                 map_write(map, CMD(0xff), adr);
1056                 chip->state = FL_READY;
1057         }
1058         (void) map_read(map, adr);
1059         xip_iprefetch();
1060         local_irq_enable();
1061 }
1062
1063 /*
1064  * When a delay is required for the flash operation to complete, the
1065  * xip_wait_for_operation() function is polling for both the given timeout
1066  * and pending (but still masked) hardware interrupts.  Whenever there is an
1067  * interrupt pending then the flash erase or write operation is suspended,
1068  * array mode restored and interrupts unmasked.  Task scheduling might also
1069  * happen at that point.  The CPU eventually returns from the interrupt or
1070  * the call to schedule() and the suspended flash operation is resumed for
1071  * the remaining of the delay period.
1072  *
1073  * Warning: this function _will_ fool interrupt latency tracing tools.
1074  */
1075
1076 static int __xipram xip_wait_for_operation(
1077                 struct map_info *map, struct flchip *chip,
1078                 unsigned long adr, unsigned int chip_op_time_max)
1079 {
1080         struct cfi_private *cfi = map->fldrv_priv;
1081         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1082         map_word status, OK = CMD(0x80);
1083         unsigned long usec, suspended, start, done;
1084         flstate_t oldstate, newstate;
1085
1086         start = xip_currtime();
1087         usec = chip_op_time_max;
1088         if (usec == 0)
1089                 usec = 500000;
1090         done = 0;
1091
1092         do {
1093                 cpu_relax();
1094                 if (xip_irqpending() && cfip &&
1095                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1096                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1097                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1098                         /*
1099                          * Let's suspend the erase or write operation when
1100                          * supported.  Note that we currently don't try to
1101                          * suspend interleaved chips if there is already
1102                          * another operation suspended (imagine what happens
1103                          * when one chip was already done with the current
1104                          * operation while another chip suspended it, then
1105                          * we resume the whole thing at once).  Yes, it
1106                          * can happen!
1107                          */
1108                         usec -= done;
1109                         map_write(map, CMD(0xb0), adr);
1110                         map_write(map, CMD(0x70), adr);
1111                         suspended = xip_currtime();
1112                         do {
1113                                 if (xip_elapsed_since(suspended) > 100000) {
1114                                         /*
1115                                          * The chip doesn't want to suspend
1116                                          * after waiting for 100 msecs.
1117                                          * This is a critical error but there
1118                                          * is not much we can do here.
1119                                          */
1120                                         return -EIO;
1121                                 }
1122                                 status = map_read(map, adr);
1123                         } while (!map_word_andequal(map, status, OK, OK));
1124
1125                         /* Suspend succeeded */
1126                         oldstate = chip->state;
1127                         if (oldstate == FL_ERASING) {
1128                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1129                                         break;
1130                                 newstate = FL_XIP_WHILE_ERASING;
1131                                 chip->erase_suspended = 1;
1132                         } else {
1133                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1134                                         break;
1135                                 newstate = FL_XIP_WHILE_WRITING;
1136                                 chip->write_suspended = 1;
1137                         }
1138                         chip->state = newstate;
1139                         map_write(map, CMD(0xff), adr);
1140                         (void) map_read(map, adr);
1141                         xip_iprefetch();
1142                         local_irq_enable();
1143                         mutex_unlock(&chip->mutex);
1144                         xip_iprefetch();
1145                         cond_resched();
1146
1147                         /*
1148                          * We're back.  However someone else might have
1149                          * decided to go write to the chip if we are in
1150                          * a suspended erase state.  If so let's wait
1151                          * until it's done.
1152                          */
1153                         mutex_lock(&chip->mutex);
1154                         while (chip->state != newstate) {
1155                                 DECLARE_WAITQUEUE(wait, current);
1156                                 set_current_state(TASK_UNINTERRUPTIBLE);
1157                                 add_wait_queue(&chip->wq, &wait);
1158                                 mutex_unlock(&chip->mutex);
1159                                 schedule();
1160                                 remove_wait_queue(&chip->wq, &wait);
1161                                 mutex_lock(&chip->mutex);
1162                         }
1163                         /* Disallow XIP again */
1164                         local_irq_disable();
1165
1166                         /* Resume the write or erase operation */
1167                         map_write(map, CMD(0xd0), adr);
1168                         map_write(map, CMD(0x70), adr);
1169                         chip->state = oldstate;
1170                         start = xip_currtime();
1171                 } else if (usec >= 1000000/HZ) {
1172                         /*
1173                          * Try to save on CPU power when waiting delay
1174                          * is at least a system timer tick period.
1175                          * No need to be extremely accurate here.
1176                          */
1177                         xip_cpu_idle();
1178                 }
1179                 status = map_read(map, adr);
1180                 done = xip_elapsed_since(start);
1181         } while (!map_word_andequal(map, status, OK, OK)
1182                  && done < usec);
1183
1184         return (done >= usec) ? -ETIME : 0;
1185 }
1186
1187 /*
1188  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1189  * the flash is actively programming or erasing since we have to poll for
1190  * the operation to complete anyway.  We can't do that in a generic way with
1191  * a XIP setup so do it before the actual flash operation in this case
1192  * and stub it out from INVAL_CACHE_AND_WAIT.
1193  */
1194 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1195         INVALIDATE_CACHED_RANGE(map, from, size)
1196
1197 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1198         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1199
1200 #else
1201
1202 #define xip_disable(map, chip, adr)
1203 #define xip_enable(map, chip, adr)
1204 #define XIP_INVAL_CACHED_RANGE(x...)
1205 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1206
1207 static int inval_cache_and_wait_for_operation(
1208                 struct map_info *map, struct flchip *chip,
1209                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1210                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1211 {
1212         struct cfi_private *cfi = map->fldrv_priv;
1213         map_word status, status_OK = CMD(0x80);
1214         int chip_state = chip->state;
1215         unsigned int timeo, sleep_time, reset_timeo;
1216
1217         mutex_unlock(&chip->mutex);
1218         if (inval_len)
1219                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1220         mutex_lock(&chip->mutex);
1221
1222         timeo = chip_op_time_max;
1223         if (!timeo)
1224                 timeo = 500000;
1225         reset_timeo = timeo;
1226         sleep_time = chip_op_time / 2;
1227
1228         for (;;) {
1229                 if (chip->state != chip_state) {
1230                         /* Someone's suspended the operation: sleep */
1231                         DECLARE_WAITQUEUE(wait, current);
1232                         set_current_state(TASK_UNINTERRUPTIBLE);
1233                         add_wait_queue(&chip->wq, &wait);
1234                         mutex_unlock(&chip->mutex);
1235                         schedule();
1236                         remove_wait_queue(&chip->wq, &wait);
1237                         mutex_lock(&chip->mutex);
1238                         continue;
1239                 }
1240
1241                 status = map_read(map, cmd_adr);
1242                 if (map_word_andequal(map, status, status_OK, status_OK))
1243                         break;
1244
1245                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1246                         /* Erase suspend occurred while sleep: reset timeout */
1247                         timeo = reset_timeo;
1248                         chip->erase_suspended = 0;
1249                 }
1250                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1251                         /* Write suspend occurred while sleep: reset timeout */
1252                         timeo = reset_timeo;
1253                         chip->write_suspended = 0;
1254                 }
1255                 if (!timeo) {
1256                         map_write(map, CMD(0x70), cmd_adr);
1257                         chip->state = FL_STATUS;
1258                         return -ETIME;
1259                 }
1260
1261                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1262                 mutex_unlock(&chip->mutex);
1263                 if (sleep_time >= 1000000/HZ) {
1264                         /*
1265                          * Half of the normal delay still remaining
1266                          * can be performed with a sleeping delay instead
1267                          * of busy waiting.
1268                          */
1269                         msleep(sleep_time/1000);
1270                         timeo -= sleep_time;
1271                         sleep_time = 1000000/HZ;
1272                 } else {
1273                         udelay(1);
1274                         cond_resched();
1275                         timeo--;
1276                 }
1277                 mutex_lock(&chip->mutex);
1278         }
1279
1280         /* Done and happy. */
1281         chip->state = FL_STATUS;
1282         return 0;
1283 }
1284
1285 #endif
1286
1287 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1288         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1289
1290
1291 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1292 {
1293         unsigned long cmd_addr;
1294         struct cfi_private *cfi = map->fldrv_priv;
1295         int ret = 0;
1296
1297         adr += chip->start;
1298
1299         /* Ensure cmd read/writes are aligned. */
1300         cmd_addr = adr & ~(map_bankwidth(map)-1);
1301
1302         mutex_lock(&chip->mutex);
1303
1304         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1305
1306         if (!ret) {
1307                 if (chip->state != FL_POINT && chip->state != FL_READY)
1308                         map_write(map, CMD(0xff), cmd_addr);
1309
1310                 chip->state = FL_POINT;
1311                 chip->ref_point_counter++;
1312         }
1313         mutex_unlock(&chip->mutex);
1314
1315         return ret;
1316 }
1317
1318 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1319                 size_t *retlen, void **virt, resource_size_t *phys)
1320 {
1321         struct map_info *map = mtd->priv;
1322         struct cfi_private *cfi = map->fldrv_priv;
1323         unsigned long ofs, last_end = 0;
1324         int chipnum;
1325         int ret = 0;
1326
1327         if (!map->virt)
1328                 return -EINVAL;
1329
1330         /* Now lock the chip(s) to POINT state */
1331
1332         /* ofs: offset within the first chip that the first read should start */
1333         chipnum = (from >> cfi->chipshift);
1334         ofs = from - (chipnum << cfi->chipshift);
1335
1336         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1337         if (phys)
1338                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1339
1340         while (len) {
1341                 unsigned long thislen;
1342
1343                 if (chipnum >= cfi->numchips)
1344                         break;
1345
1346                 /* We cannot point across chips that are virtually disjoint */
1347                 if (!last_end)
1348                         last_end = cfi->chips[chipnum].start;
1349                 else if (cfi->chips[chipnum].start != last_end)
1350                         break;
1351
1352                 if ((len + ofs -1) >> cfi->chipshift)
1353                         thislen = (1<<cfi->chipshift) - ofs;
1354                 else
1355                         thislen = len;
1356
1357                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1358                 if (ret)
1359                         break;
1360
1361                 *retlen += thislen;
1362                 len -= thislen;
1363
1364                 ofs = 0;
1365                 last_end += 1 << cfi->chipshift;
1366                 chipnum++;
1367         }
1368         return 0;
1369 }
1370
1371 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1372 {
1373         struct map_info *map = mtd->priv;
1374         struct cfi_private *cfi = map->fldrv_priv;
1375         unsigned long ofs;
1376         int chipnum, err = 0;
1377
1378         /* Now unlock the chip(s) POINT state */
1379
1380         /* ofs: offset within the first chip that the first read should start */
1381         chipnum = (from >> cfi->chipshift);
1382         ofs = from - (chipnum <<  cfi->chipshift);
1383
1384         while (len && !err) {
1385                 unsigned long thislen;
1386                 struct flchip *chip;
1387
1388                 chip = &cfi->chips[chipnum];
1389                 if (chipnum >= cfi->numchips)
1390                         break;
1391
1392                 if ((len + ofs -1) >> cfi->chipshift)
1393                         thislen = (1<<cfi->chipshift) - ofs;
1394                 else
1395                         thislen = len;
1396
1397                 mutex_lock(&chip->mutex);
1398                 if (chip->state == FL_POINT) {
1399                         chip->ref_point_counter--;
1400                         if(chip->ref_point_counter == 0)
1401                                 chip->state = FL_READY;
1402                 } else {
1403                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1404                         err = -EINVAL;
1405                 }
1406
1407                 put_chip(map, chip, chip->start);
1408                 mutex_unlock(&chip->mutex);
1409
1410                 len -= thislen;
1411                 ofs = 0;
1412                 chipnum++;
1413         }
1414
1415         return err;
1416 }
1417
1418 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1419 {
1420         unsigned long cmd_addr;
1421         struct cfi_private *cfi = map->fldrv_priv;
1422         int ret;
1423
1424         adr += chip->start;
1425
1426         /* Ensure cmd read/writes are aligned. */
1427         cmd_addr = adr & ~(map_bankwidth(map)-1);
1428
1429         mutex_lock(&chip->mutex);
1430         ret = get_chip(map, chip, cmd_addr, FL_READY);
1431         if (ret) {
1432                 mutex_unlock(&chip->mutex);
1433                 return ret;
1434         }
1435
1436         if (chip->state != FL_POINT && chip->state != FL_READY) {
1437                 map_write(map, CMD(0xff), cmd_addr);
1438
1439                 chip->state = FL_READY;
1440         }
1441
1442         map_copy_from(map, buf, adr, len);
1443
1444         put_chip(map, chip, cmd_addr);
1445
1446         mutex_unlock(&chip->mutex);
1447         return 0;
1448 }
1449
1450 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1451 {
1452         struct map_info *map = mtd->priv;
1453         struct cfi_private *cfi = map->fldrv_priv;
1454         unsigned long ofs;
1455         int chipnum;
1456         int ret = 0;
1457
1458         /* ofs: offset within the first chip that the first read should start */
1459         chipnum = (from >> cfi->chipshift);
1460         ofs = from - (chipnum <<  cfi->chipshift);
1461
1462         while (len) {
1463                 unsigned long thislen;
1464
1465                 if (chipnum >= cfi->numchips)
1466                         break;
1467
1468                 if ((len + ofs -1) >> cfi->chipshift)
1469                         thislen = (1<<cfi->chipshift) - ofs;
1470                 else
1471                         thislen = len;
1472
1473                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1474                 if (ret)
1475                         break;
1476
1477                 *retlen += thislen;
1478                 len -= thislen;
1479                 buf += thislen;
1480
1481                 ofs = 0;
1482                 chipnum++;
1483         }
1484         return ret;
1485 }
1486
1487 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1488                                      unsigned long adr, map_word datum, int mode)
1489 {
1490         struct cfi_private *cfi = map->fldrv_priv;
1491         map_word status, write_cmd;
1492         int ret=0;
1493
1494         adr += chip->start;
1495
1496         switch (mode) {
1497         case FL_WRITING:
1498                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1499                 break;
1500         case FL_OTP_WRITE:
1501                 write_cmd = CMD(0xc0);
1502                 break;
1503         default:
1504                 return -EINVAL;
1505         }
1506
1507         mutex_lock(&chip->mutex);
1508         ret = get_chip(map, chip, adr, mode);
1509         if (ret) {
1510                 mutex_unlock(&chip->mutex);
1511                 return ret;
1512         }
1513
1514         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1515         ENABLE_VPP(map);
1516         xip_disable(map, chip, adr);
1517         map_write(map, write_cmd, adr);
1518         map_write(map, datum, adr);
1519         chip->state = mode;
1520
1521         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1522                                    adr, map_bankwidth(map),
1523                                    chip->word_write_time,
1524                                    chip->word_write_time_max);
1525         if (ret) {
1526                 xip_enable(map, chip, adr);
1527                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1528                 goto out;
1529         }
1530
1531         /* check for errors */
1532         status = map_read(map, adr);
1533         if (map_word_bitsset(map, status, CMD(0x1a))) {
1534                 unsigned long chipstatus = MERGESTATUS(status);
1535
1536                 /* reset status */
1537                 map_write(map, CMD(0x50), adr);
1538                 map_write(map, CMD(0x70), adr);
1539                 xip_enable(map, chip, adr);
1540
1541                 if (chipstatus & 0x02) {
1542                         ret = -EROFS;
1543                 } else if (chipstatus & 0x08) {
1544                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1545                         ret = -EIO;
1546                 } else {
1547                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1548                         ret = -EINVAL;
1549                 }
1550
1551                 goto out;
1552         }
1553
1554         xip_enable(map, chip, adr);
1555  out:   put_chip(map, chip, adr);
1556         mutex_unlock(&chip->mutex);
1557         return ret;
1558 }
1559
1560
1561 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1562 {
1563         struct map_info *map = mtd->priv;
1564         struct cfi_private *cfi = map->fldrv_priv;
1565         int ret = 0;
1566         int chipnum;
1567         unsigned long ofs;
1568
1569         chipnum = to >> cfi->chipshift;
1570         ofs = to  - (chipnum << cfi->chipshift);
1571
1572         /* If it's not bus-aligned, do the first byte write */
1573         if (ofs & (map_bankwidth(map)-1)) {
1574                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1575                 int gap = ofs - bus_ofs;
1576                 int n;
1577                 map_word datum;
1578
1579                 n = min_t(int, len, map_bankwidth(map)-gap);
1580                 datum = map_word_ff(map);
1581                 datum = map_word_load_partial(map, datum, buf, gap, n);
1582
1583                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1584                                                bus_ofs, datum, FL_WRITING);
1585                 if (ret)
1586                         return ret;
1587
1588                 len -= n;
1589                 ofs += n;
1590                 buf += n;
1591                 (*retlen) += n;
1592
1593                 if (ofs >> cfi->chipshift) {
1594                         chipnum ++;
1595                         ofs = 0;
1596                         if (chipnum == cfi->numchips)
1597                                 return 0;
1598                 }
1599         }
1600
1601         while(len >= map_bankwidth(map)) {
1602                 map_word datum = map_word_load(map, buf);
1603
1604                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1605                                        ofs, datum, FL_WRITING);
1606                 if (ret)
1607                         return ret;
1608
1609                 ofs += map_bankwidth(map);
1610                 buf += map_bankwidth(map);
1611                 (*retlen) += map_bankwidth(map);
1612                 len -= map_bankwidth(map);
1613
1614                 if (ofs >> cfi->chipshift) {
1615                         chipnum ++;
1616                         ofs = 0;
1617                         if (chipnum == cfi->numchips)
1618                                 return 0;
1619                 }
1620         }
1621
1622         if (len & (map_bankwidth(map)-1)) {
1623                 map_word datum;
1624
1625                 datum = map_word_ff(map);
1626                 datum = map_word_load_partial(map, datum, buf, 0, len);
1627
1628                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1629                                        ofs, datum, FL_WRITING);
1630                 if (ret)
1631                         return ret;
1632
1633                 (*retlen) += len;
1634         }
1635
1636         return 0;
1637 }
1638
1639
1640 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1641                                     unsigned long adr, const struct kvec **pvec,
1642                                     unsigned long *pvec_seek, int len)
1643 {
1644         struct cfi_private *cfi = map->fldrv_priv;
1645         map_word status, write_cmd, datum;
1646         unsigned long cmd_adr;
1647         int ret, wbufsize, word_gap, words;
1648         const struct kvec *vec;
1649         unsigned long vec_seek;
1650         unsigned long initial_adr;
1651         int initial_len = len;
1652
1653         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1654         adr += chip->start;
1655         initial_adr = adr;
1656         cmd_adr = adr & ~(wbufsize-1);
1657
1658         /* Let's determine this according to the interleave only once */
1659         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1660
1661         mutex_lock(&chip->mutex);
1662         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1663         if (ret) {
1664                 mutex_unlock(&chip->mutex);
1665                 return ret;
1666         }
1667
1668         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1669         ENABLE_VPP(map);
1670         xip_disable(map, chip, cmd_adr);
1671
1672         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1673            [...], the device will not accept any more Write to Buffer commands".
1674            So we must check here and reset those bits if they're set. Otherwise
1675            we're just pissing in the wind */
1676         if (chip->state != FL_STATUS) {
1677                 map_write(map, CMD(0x70), cmd_adr);
1678                 chip->state = FL_STATUS;
1679         }
1680         status = map_read(map, cmd_adr);
1681         if (map_word_bitsset(map, status, CMD(0x30))) {
1682                 xip_enable(map, chip, cmd_adr);
1683                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1684                 xip_disable(map, chip, cmd_adr);
1685                 map_write(map, CMD(0x50), cmd_adr);
1686                 map_write(map, CMD(0x70), cmd_adr);
1687         }
1688
1689         chip->state = FL_WRITING_TO_BUFFER;
1690         map_write(map, write_cmd, cmd_adr);
1691         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1692         if (ret) {
1693                 /* Argh. Not ready for write to buffer */
1694                 map_word Xstatus = map_read(map, cmd_adr);
1695                 map_write(map, CMD(0x70), cmd_adr);
1696                 chip->state = FL_STATUS;
1697                 status = map_read(map, cmd_adr);
1698                 map_write(map, CMD(0x50), cmd_adr);
1699                 map_write(map, CMD(0x70), cmd_adr);
1700                 xip_enable(map, chip, cmd_adr);
1701                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1702                                 map->name, Xstatus.x[0], status.x[0]);
1703                 goto out;
1704         }
1705
1706         /* Figure out the number of words to write */
1707         word_gap = (-adr & (map_bankwidth(map)-1));
1708         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1709         if (!word_gap) {
1710                 words--;
1711         } else {
1712                 word_gap = map_bankwidth(map) - word_gap;
1713                 adr -= word_gap;
1714                 datum = map_word_ff(map);
1715         }
1716
1717         /* Write length of data to come */
1718         map_write(map, CMD(words), cmd_adr );
1719
1720         /* Write data */
1721         vec = *pvec;
1722         vec_seek = *pvec_seek;
1723         do {
1724                 int n = map_bankwidth(map) - word_gap;
1725                 if (n > vec->iov_len - vec_seek)
1726                         n = vec->iov_len - vec_seek;
1727                 if (n > len)
1728                         n = len;
1729
1730                 if (!word_gap && len < map_bankwidth(map))
1731                         datum = map_word_ff(map);
1732
1733                 datum = map_word_load_partial(map, datum,
1734                                               vec->iov_base + vec_seek,
1735                                               word_gap, n);
1736
1737                 len -= n;
1738                 word_gap += n;
1739                 if (!len || word_gap == map_bankwidth(map)) {
1740                         map_write(map, datum, adr);
1741                         adr += map_bankwidth(map);
1742                         word_gap = 0;
1743                 }
1744
1745                 vec_seek += n;
1746                 if (vec_seek == vec->iov_len) {
1747                         vec++;
1748                         vec_seek = 0;
1749                 }
1750         } while (len);
1751         *pvec = vec;
1752         *pvec_seek = vec_seek;
1753
1754         /* GO GO GO */
1755         map_write(map, CMD(0xd0), cmd_adr);
1756         chip->state = FL_WRITING;
1757
1758         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1759                                    initial_adr, initial_len,
1760                                    chip->buffer_write_time,
1761                                    chip->buffer_write_time_max);
1762         if (ret) {
1763                 map_write(map, CMD(0x70), cmd_adr);
1764                 chip->state = FL_STATUS;
1765                 xip_enable(map, chip, cmd_adr);
1766                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1767                 goto out;
1768         }
1769
1770         /* check for errors */
1771         status = map_read(map, cmd_adr);
1772         if (map_word_bitsset(map, status, CMD(0x1a))) {
1773                 unsigned long chipstatus = MERGESTATUS(status);
1774
1775                 /* reset status */
1776                 map_write(map, CMD(0x50), cmd_adr);
1777                 map_write(map, CMD(0x70), cmd_adr);
1778                 xip_enable(map, chip, cmd_adr);
1779
1780                 if (chipstatus & 0x02) {
1781                         ret = -EROFS;
1782                 } else if (chipstatus & 0x08) {
1783                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1784                         ret = -EIO;
1785                 } else {
1786                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1787                         ret = -EINVAL;
1788                 }
1789
1790                 goto out;
1791         }
1792
1793         xip_enable(map, chip, cmd_adr);
1794  out:   put_chip(map, chip, cmd_adr);
1795         mutex_unlock(&chip->mutex);
1796         return ret;
1797 }
1798
1799 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1800                                 unsigned long count, loff_t to, size_t *retlen)
1801 {
1802         struct map_info *map = mtd->priv;
1803         struct cfi_private *cfi = map->fldrv_priv;
1804         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1805         int ret = 0;
1806         int chipnum;
1807         unsigned long ofs, vec_seek, i;
1808         size_t len = 0;
1809
1810         for (i = 0; i < count; i++)
1811                 len += vecs[i].iov_len;
1812
1813         if (!len)
1814                 return 0;
1815
1816         chipnum = to >> cfi->chipshift;
1817         ofs = to - (chipnum << cfi->chipshift);
1818         vec_seek = 0;
1819
1820         do {
1821                 /* We must not cross write block boundaries */
1822                 int size = wbufsize - (ofs & (wbufsize-1));
1823
1824                 if (size > len)
1825                         size = len;
1826                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1827                                       ofs, &vecs, &vec_seek, size);
1828                 if (ret)
1829                         return ret;
1830
1831                 ofs += size;
1832                 (*retlen) += size;
1833                 len -= size;
1834
1835                 if (ofs >> cfi->chipshift) {
1836                         chipnum ++;
1837                         ofs = 0;
1838                         if (chipnum == cfi->numchips)
1839                                 return 0;
1840                 }
1841
1842                 /* Be nice and reschedule with the chip in a usable state for other
1843                    processes. */
1844                 cond_resched();
1845
1846         } while (len);
1847
1848         return 0;
1849 }
1850
1851 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1852                                        size_t len, size_t *retlen, const u_char *buf)
1853 {
1854         struct kvec vec;
1855
1856         vec.iov_base = (void *) buf;
1857         vec.iov_len = len;
1858
1859         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1860 }
1861
1862 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1863                                       unsigned long adr, int len, void *thunk)
1864 {
1865         struct cfi_private *cfi = map->fldrv_priv;
1866         map_word status;
1867         int retries = 3;
1868         int ret;
1869
1870         adr += chip->start;
1871
1872  retry:
1873         mutex_lock(&chip->mutex);
1874         ret = get_chip(map, chip, adr, FL_ERASING);
1875         if (ret) {
1876                 mutex_unlock(&chip->mutex);
1877                 return ret;
1878         }
1879
1880         XIP_INVAL_CACHED_RANGE(map, adr, len);
1881         ENABLE_VPP(map);
1882         xip_disable(map, chip, adr);
1883
1884         /* Clear the status register first */
1885         map_write(map, CMD(0x50), adr);
1886
1887         /* Now erase */
1888         map_write(map, CMD(0x20), adr);
1889         map_write(map, CMD(0xD0), adr);
1890         chip->state = FL_ERASING;
1891         chip->erase_suspended = 0;
1892
1893         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1894                                    adr, len,
1895                                    chip->erase_time,
1896                                    chip->erase_time_max);
1897         if (ret) {
1898                 map_write(map, CMD(0x70), adr);
1899                 chip->state = FL_STATUS;
1900                 xip_enable(map, chip, adr);
1901                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1902                 goto out;
1903         }
1904
1905         /* We've broken this before. It doesn't hurt to be safe */
1906         map_write(map, CMD(0x70), adr);
1907         chip->state = FL_STATUS;
1908         status = map_read(map, adr);
1909
1910         /* check for errors */
1911         if (map_word_bitsset(map, status, CMD(0x3a))) {
1912                 unsigned long chipstatus = MERGESTATUS(status);
1913
1914                 /* Reset the error bits */
1915                 map_write(map, CMD(0x50), adr);
1916                 map_write(map, CMD(0x70), adr);
1917                 xip_enable(map, chip, adr);
1918
1919                 if ((chipstatus & 0x30) == 0x30) {
1920                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1921                         ret = -EINVAL;
1922                 } else if (chipstatus & 0x02) {
1923                         /* Protection bit set */
1924                         ret = -EROFS;
1925                 } else if (chipstatus & 0x8) {
1926                         /* Voltage */
1927                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1928                         ret = -EIO;
1929                 } else if (chipstatus & 0x20 && retries--) {
1930                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1931                         put_chip(map, chip, adr);
1932                         mutex_unlock(&chip->mutex);
1933                         goto retry;
1934                 } else {
1935                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1936                         ret = -EIO;
1937                 }
1938
1939                 goto out;
1940         }
1941
1942         xip_enable(map, chip, adr);
1943  out:   put_chip(map, chip, adr);
1944         mutex_unlock(&chip->mutex);
1945         return ret;
1946 }
1947
1948 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1949 {
1950         unsigned long ofs, len;
1951         int ret;
1952
1953         ofs = instr->addr;
1954         len = instr->len;
1955
1956         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1957         if (ret)
1958                 return ret;
1959
1960         instr->state = MTD_ERASE_DONE;
1961         mtd_erase_callback(instr);
1962
1963         return 0;
1964 }
1965
1966 static void cfi_intelext_sync (struct mtd_info *mtd)
1967 {
1968         struct map_info *map = mtd->priv;
1969         struct cfi_private *cfi = map->fldrv_priv;
1970         int i;
1971         struct flchip *chip;
1972         int ret = 0;
1973
1974         for (i=0; !ret && i<cfi->numchips; i++) {
1975                 chip = &cfi->chips[i];
1976
1977                 mutex_lock(&chip->mutex);
1978                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1979
1980                 if (!ret) {
1981                         chip->oldstate = chip->state;
1982                         chip->state = FL_SYNCING;
1983                         /* No need to wake_up() on this state change -
1984                          * as the whole point is that nobody can do anything
1985                          * with the chip now anyway.
1986                          */
1987                 }
1988                 mutex_unlock(&chip->mutex);
1989         }
1990
1991         /* Unlock the chips again */
1992
1993         for (i--; i >=0; i--) {
1994                 chip = &cfi->chips[i];
1995
1996                 mutex_lock(&chip->mutex);
1997
1998                 if (chip->state == FL_SYNCING) {
1999                         chip->state = chip->oldstate;
2000                         chip->oldstate = FL_READY;
2001                         wake_up(&chip->wq);
2002                 }
2003                 mutex_unlock(&chip->mutex);
2004         }
2005 }
2006
2007 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2008                                                 struct flchip *chip,
2009                                                 unsigned long adr,
2010                                                 int len, void *thunk)
2011 {
2012         struct cfi_private *cfi = map->fldrv_priv;
2013         int status, ofs_factor = cfi->interleave * cfi->device_type;
2014
2015         adr += chip->start;
2016         xip_disable(map, chip, adr+(2*ofs_factor));
2017         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2018         chip->state = FL_JEDEC_QUERY;
2019         status = cfi_read_query(map, adr+(2*ofs_factor));
2020         xip_enable(map, chip, 0);
2021         return status;
2022 }
2023
2024 #ifdef DEBUG_LOCK_BITS
2025 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2026                                                 struct flchip *chip,
2027                                                 unsigned long adr,
2028                                                 int len, void *thunk)
2029 {
2030         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2031                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2032         return 0;
2033 }
2034 #endif
2035
2036 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2037 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2038
2039 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2040                                        unsigned long adr, int len, void *thunk)
2041 {
2042         struct cfi_private *cfi = map->fldrv_priv;
2043         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2044         int udelay;
2045         int ret;
2046
2047         adr += chip->start;
2048
2049         mutex_lock(&chip->mutex);
2050         ret = get_chip(map, chip, adr, FL_LOCKING);
2051         if (ret) {
2052                 mutex_unlock(&chip->mutex);
2053                 return ret;
2054         }
2055
2056         ENABLE_VPP(map);
2057         xip_disable(map, chip, adr);
2058
2059         map_write(map, CMD(0x60), adr);
2060         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2061                 map_write(map, CMD(0x01), adr);
2062                 chip->state = FL_LOCKING;
2063         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2064                 map_write(map, CMD(0xD0), adr);
2065                 chip->state = FL_UNLOCKING;
2066         } else
2067                 BUG();
2068
2069         /*
2070          * If Instant Individual Block Locking supported then no need
2071          * to delay.
2072          */
2073         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
2074
2075         ret = WAIT_TIMEOUT(map, chip, adr, udelay, udelay * 100);
2076         if (ret) {
2077                 map_write(map, CMD(0x70), adr);
2078                 chip->state = FL_STATUS;
2079                 xip_enable(map, chip, adr);
2080                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2081                 goto out;
2082         }
2083
2084         xip_enable(map, chip, adr);
2085 out:    put_chip(map, chip, adr);
2086         mutex_unlock(&chip->mutex);
2087         return ret;
2088 }
2089
2090 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2091 {
2092         int ret;
2093
2094 #ifdef DEBUG_LOCK_BITS
2095         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2096                __func__, ofs, len);
2097         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2098                 ofs, len, NULL);
2099 #endif
2100
2101         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2102                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2103
2104 #ifdef DEBUG_LOCK_BITS
2105         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2106                __func__, ret);
2107         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2108                 ofs, len, NULL);
2109 #endif
2110
2111         return ret;
2112 }
2113
2114 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2115 {
2116         int ret;
2117
2118 #ifdef DEBUG_LOCK_BITS
2119         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2120                __func__, ofs, len);
2121         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2122                 ofs, len, NULL);
2123 #endif
2124
2125         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2126                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2127
2128 #ifdef DEBUG_LOCK_BITS
2129         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2130                __func__, ret);
2131         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2132                 ofs, len, NULL);
2133 #endif
2134
2135         return ret;
2136 }
2137
2138 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2139                                   uint64_t len)
2140 {
2141         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2142                                 ofs, len, NULL) ? 1 : 0;
2143 }
2144
2145 #ifdef CONFIG_MTD_OTP
2146
2147 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2148                         u_long data_offset, u_char *buf, u_int size,
2149                         u_long prot_offset, u_int groupno, u_int groupsize);
2150
2151 static int __xipram
2152 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2153             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2154 {
2155         struct cfi_private *cfi = map->fldrv_priv;
2156         int ret;
2157
2158         mutex_lock(&chip->mutex);
2159         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2160         if (ret) {
2161                 mutex_unlock(&chip->mutex);
2162                 return ret;
2163         }
2164
2165         /* let's ensure we're not reading back cached data from array mode */
2166         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2167
2168         xip_disable(map, chip, chip->start);
2169         if (chip->state != FL_JEDEC_QUERY) {
2170                 map_write(map, CMD(0x90), chip->start);
2171                 chip->state = FL_JEDEC_QUERY;
2172         }
2173         map_copy_from(map, buf, chip->start + offset, size);
2174         xip_enable(map, chip, chip->start);
2175
2176         /* then ensure we don't keep OTP data in the cache */
2177         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2178
2179         put_chip(map, chip, chip->start);
2180         mutex_unlock(&chip->mutex);
2181         return 0;
2182 }
2183
2184 static int
2185 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2186              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2187 {
2188         int ret;
2189
2190         while (size) {
2191                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2192                 int gap = offset - bus_ofs;
2193                 int n = min_t(int, size, map_bankwidth(map)-gap);
2194                 map_word datum = map_word_ff(map);
2195
2196                 datum = map_word_load_partial(map, datum, buf, gap, n);
2197                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2198                 if (ret)
2199                         return ret;
2200
2201                 offset += n;
2202                 buf += n;
2203                 size -= n;
2204         }
2205
2206         return 0;
2207 }
2208
2209 static int
2210 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2211             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2212 {
2213         struct cfi_private *cfi = map->fldrv_priv;
2214         map_word datum;
2215
2216         /* make sure area matches group boundaries */
2217         if (size != grpsz)
2218                 return -EXDEV;
2219
2220         datum = map_word_ff(map);
2221         datum = map_word_clr(map, datum, CMD(1 << grpno));
2222         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2223 }
2224
2225 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2226                                  size_t *retlen, u_char *buf,
2227                                  otp_op_t action, int user_regs)
2228 {
2229         struct map_info *map = mtd->priv;
2230         struct cfi_private *cfi = map->fldrv_priv;
2231         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2232         struct flchip *chip;
2233         struct cfi_intelext_otpinfo *otp;
2234         u_long devsize, reg_prot_offset, data_offset;
2235         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2236         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2237         int ret;
2238
2239         *retlen = 0;
2240
2241         /* Check that we actually have some OTP registers */
2242         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2243                 return -ENODATA;
2244
2245         /* we need real chips here not virtual ones */
2246         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2247         chip_step = devsize >> cfi->chipshift;
2248         chip_num = 0;
2249
2250         /* Some chips have OTP located in the _top_ partition only.
2251            For example: Intel 28F256L18T (T means top-parameter device) */
2252         if (cfi->mfr == CFI_MFR_INTEL) {
2253                 switch (cfi->id) {
2254                 case 0x880b:
2255                 case 0x880c:
2256                 case 0x880d:
2257                         chip_num = chip_step - 1;
2258                 }
2259         }
2260
2261         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2262                 chip = &cfi->chips[chip_num];
2263                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2264
2265                 /* first OTP region */
2266                 field = 0;
2267                 reg_prot_offset = extp->ProtRegAddr;
2268                 reg_fact_groups = 1;
2269                 reg_fact_size = 1 << extp->FactProtRegSize;
2270                 reg_user_groups = 1;
2271                 reg_user_size = 1 << extp->UserProtRegSize;
2272
2273                 while (len > 0) {
2274                         /* flash geometry fixup */
2275                         data_offset = reg_prot_offset + 1;
2276                         data_offset *= cfi->interleave * cfi->device_type;
2277                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2278                         reg_fact_size *= cfi->interleave;
2279                         reg_user_size *= cfi->interleave;
2280
2281                         if (user_regs) {
2282                                 groups = reg_user_groups;
2283                                 groupsize = reg_user_size;
2284                                 /* skip over factory reg area */
2285                                 groupno = reg_fact_groups;
2286                                 data_offset += reg_fact_groups * reg_fact_size;
2287                         } else {
2288                                 groups = reg_fact_groups;
2289                                 groupsize = reg_fact_size;
2290                                 groupno = 0;
2291                         }
2292
2293                         while (len > 0 && groups > 0) {
2294                                 if (!action) {
2295                                         /*
2296                                          * Special case: if action is NULL
2297                                          * we fill buf with otp_info records.
2298                                          */
2299                                         struct otp_info *otpinfo;
2300                                         map_word lockword;
2301                                         len -= sizeof(struct otp_info);
2302                                         if (len <= 0)
2303                                                 return -ENOSPC;
2304                                         ret = do_otp_read(map, chip,
2305                                                           reg_prot_offset,
2306                                                           (u_char *)&lockword,
2307                                                           map_bankwidth(map),
2308                                                           0, 0,  0);
2309                                         if (ret)
2310                                                 return ret;
2311                                         otpinfo = (struct otp_info *)buf;
2312                                         otpinfo->start = from;
2313                                         otpinfo->length = groupsize;
2314                                         otpinfo->locked =
2315                                            !map_word_bitsset(map, lockword,
2316                                                              CMD(1 << groupno));
2317                                         from += groupsize;
2318                                         buf += sizeof(*otpinfo);
2319                                         *retlen += sizeof(*otpinfo);
2320                                 } else if (from >= groupsize) {
2321                                         from -= groupsize;
2322                                         data_offset += groupsize;
2323                                 } else {
2324                                         int size = groupsize;
2325                                         data_offset += from;
2326                                         size -= from;
2327                                         from = 0;
2328                                         if (size > len)
2329                                                 size = len;
2330                                         ret = action(map, chip, data_offset,
2331                                                      buf, size, reg_prot_offset,
2332                                                      groupno, groupsize);
2333                                         if (ret < 0)
2334                                                 return ret;
2335                                         buf += size;
2336                                         len -= size;
2337                                         *retlen += size;
2338                                         data_offset += size;
2339                                 }
2340                                 groupno++;
2341                                 groups--;
2342                         }
2343
2344                         /* next OTP region */
2345                         if (++field == extp->NumProtectionFields)
2346                                 break;
2347                         reg_prot_offset = otp->ProtRegAddr;
2348                         reg_fact_groups = otp->FactGroups;
2349                         reg_fact_size = 1 << otp->FactProtRegSize;
2350                         reg_user_groups = otp->UserGroups;
2351                         reg_user_size = 1 << otp->UserProtRegSize;
2352                         otp++;
2353                 }
2354         }
2355
2356         return 0;
2357 }
2358
2359 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2360                                            size_t len, size_t *retlen,
2361                                             u_char *buf)
2362 {
2363         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2364                                      buf, do_otp_read, 0);
2365 }
2366
2367 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2368                                            size_t len, size_t *retlen,
2369                                             u_char *buf)
2370 {
2371         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2372                                      buf, do_otp_read, 1);
2373 }
2374
2375 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2376                                             size_t len, size_t *retlen,
2377                                              u_char *buf)
2378 {
2379         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2380                                      buf, do_otp_write, 1);
2381 }
2382
2383 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2384                                            loff_t from, size_t len)
2385 {
2386         size_t retlen;
2387         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2388                                      NULL, do_otp_lock, 1);
2389 }
2390
2391 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2392                                            struct otp_info *buf, size_t len)
2393 {
2394         size_t retlen;
2395         int ret;
2396
2397         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2398         return ret ? : retlen;
2399 }
2400
2401 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2402                                            struct otp_info *buf, size_t len)
2403 {
2404         size_t retlen;
2405         int ret;
2406
2407         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2408         return ret ? : retlen;
2409 }
2410
2411 #endif
2412
2413 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2414 {
2415         struct mtd_erase_region_info *region;
2416         int block, status, i;
2417         unsigned long adr;
2418         size_t len;
2419
2420         for (i = 0; i < mtd->numeraseregions; i++) {
2421                 region = &mtd->eraseregions[i];
2422                 if (!region->lockmap)
2423                         continue;
2424
2425                 for (block = 0; block < region->numblocks; block++){
2426                         len = region->erasesize;
2427                         adr = region->offset + block * len;
2428
2429                         status = cfi_varsize_frob(mtd,
2430                                         do_getlockstatus_oneblock, adr, len, NULL);
2431                         if (status)
2432                                 set_bit(block, region->lockmap);
2433                         else
2434                                 clear_bit(block, region->lockmap);
2435                 }
2436         }
2437 }
2438
2439 static int cfi_intelext_suspend(struct mtd_info *mtd)
2440 {
2441         struct map_info *map = mtd->priv;
2442         struct cfi_private *cfi = map->fldrv_priv;
2443         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2444         int i;
2445         struct flchip *chip;
2446         int ret = 0;
2447
2448         if ((mtd->flags & MTD_POWERUP_LOCK)
2449             && extp && (extp->FeatureSupport & (1 << 5)))
2450                 cfi_intelext_save_locks(mtd);
2451
2452         for (i=0; !ret && i<cfi->numchips; i++) {
2453                 chip = &cfi->chips[i];
2454
2455                 mutex_lock(&chip->mutex);
2456
2457                 switch (chip->state) {
2458                 case FL_READY:
2459                 case FL_STATUS:
2460                 case FL_CFI_QUERY:
2461                 case FL_JEDEC_QUERY:
2462                         if (chip->oldstate == FL_READY) {
2463                                 /* place the chip in a known state before suspend */
2464                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2465                                 chip->oldstate = chip->state;
2466                                 chip->state = FL_PM_SUSPENDED;
2467                                 /* No need to wake_up() on this state change -
2468                                  * as the whole point is that nobody can do anything
2469                                  * with the chip now anyway.
2470                                  */
2471                         } else {
2472                                 /* There seems to be an operation pending. We must wait for it. */
2473                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2474                                 ret = -EAGAIN;
2475                         }
2476                         break;
2477                 default:
2478                         /* Should we actually wait? Once upon a time these routines weren't
2479                            allowed to. Or should we return -EAGAIN, because the upper layers
2480                            ought to have already shut down anything which was using the device
2481                            anyway? The latter for now. */
2482                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2483                         ret = -EAGAIN;
2484                 case FL_PM_SUSPENDED:
2485                         break;
2486                 }
2487                 mutex_unlock(&chip->mutex);
2488         }
2489
2490         /* Unlock the chips again */
2491
2492         if (ret) {
2493                 for (i--; i >=0; i--) {
2494                         chip = &cfi->chips[i];
2495
2496                         mutex_lock(&chip->mutex);
2497
2498                         if (chip->state == FL_PM_SUSPENDED) {
2499                                 /* No need to force it into a known state here,
2500                                    because we're returning failure, and it didn't
2501                                    get power cycled */
2502                                 chip->state = chip->oldstate;
2503                                 chip->oldstate = FL_READY;
2504                                 wake_up(&chip->wq);
2505                         }
2506                         mutex_unlock(&chip->mutex);
2507                 }
2508         }
2509
2510         return ret;
2511 }
2512
2513 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2514 {
2515         struct mtd_erase_region_info *region;
2516         int block, i;
2517         unsigned long adr;
2518         size_t len;
2519
2520         for (i = 0; i < mtd->numeraseregions; i++) {
2521                 region = &mtd->eraseregions[i];
2522                 if (!region->lockmap)
2523                         continue;
2524
2525                 for (block = 0; block < region->numblocks; block++) {
2526                         len = region->erasesize;
2527                         adr = region->offset + block * len;
2528
2529                         if (!test_bit(block, region->lockmap))
2530                                 cfi_intelext_unlock(mtd, adr, len);
2531                 }
2532         }
2533 }
2534
2535 static void cfi_intelext_resume(struct mtd_info *mtd)
2536 {
2537         struct map_info *map = mtd->priv;
2538         struct cfi_private *cfi = map->fldrv_priv;
2539         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2540         int i;
2541         struct flchip *chip;
2542
2543         for (i=0; i<cfi->numchips; i++) {
2544
2545                 chip = &cfi->chips[i];
2546
2547                 mutex_lock(&chip->mutex);
2548
2549                 /* Go to known state. Chip may have been power cycled */
2550                 if (chip->state == FL_PM_SUSPENDED) {
2551                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2552                         chip->oldstate = chip->state = FL_READY;
2553                         wake_up(&chip->wq);
2554                 }
2555
2556                 mutex_unlock(&chip->mutex);
2557         }
2558
2559         if ((mtd->flags & MTD_POWERUP_LOCK)
2560             && extp && (extp->FeatureSupport & (1 << 5)))
2561                 cfi_intelext_restore_locks(mtd);
2562 }
2563
2564 static int cfi_intelext_reset(struct mtd_info *mtd)
2565 {
2566         struct map_info *map = mtd->priv;
2567         struct cfi_private *cfi = map->fldrv_priv;
2568         int i, ret;
2569
2570         for (i=0; i < cfi->numchips; i++) {
2571                 struct flchip *chip = &cfi->chips[i];
2572
2573                 /* force the completion of any ongoing operation
2574                    and switch to array mode so any bootloader in
2575                    flash is accessible for soft reboot. */
2576                 mutex_lock(&chip->mutex);
2577                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2578                 if (!ret) {
2579                         map_write(map, CMD(0xff), chip->start);
2580                         chip->state = FL_SHUTDOWN;
2581                         put_chip(map, chip, chip->start);
2582                 }
2583                 mutex_unlock(&chip->mutex);
2584         }
2585
2586         return 0;
2587 }
2588
2589 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2590                                void *v)
2591 {
2592         struct mtd_info *mtd;
2593
2594         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2595         cfi_intelext_reset(mtd);
2596         return NOTIFY_DONE;
2597 }
2598
2599 static void cfi_intelext_destroy(struct mtd_info *mtd)
2600 {
2601         struct map_info *map = mtd->priv;
2602         struct cfi_private *cfi = map->fldrv_priv;
2603         struct mtd_erase_region_info *region;
2604         int i;
2605         cfi_intelext_reset(mtd);
2606         unregister_reboot_notifier(&mtd->reboot_notifier);
2607         kfree(cfi->cmdset_priv);
2608         kfree(cfi->cfiq);
2609         kfree(cfi->chips[0].priv);
2610         kfree(cfi);
2611         for (i = 0; i < mtd->numeraseregions; i++) {
2612                 region = &mtd->eraseregions[i];
2613                 if (region->lockmap)
2614                         kfree(region->lockmap);
2615         }
2616         kfree(mtd->eraseregions);
2617 }
2618
2619 MODULE_LICENSE("GPL");
2620 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2621 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2622 MODULE_ALIAS("cfi_cmdset_0003");
2623 MODULE_ALIAS("cfi_cmdset_0200");