]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/chips/cfi_cmdset_0020.c
vfs: fix out-of-date dentry_unhash() comment
[karo-tx-linux.git] / drivers / mtd / chips / cfi_cmdset_0020.c
1 /*
2  * Common Flash Interface support:
3  *   ST Advanced Architecture Command Set (ID 0x0020)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
8  *      - completely revamped method functions so they are aware and
9  *        independent of the flash geometry (buswidth, interleave, etc.)
10  *      - scalability vs code size is completely set at compile-time
11  *        (see include/linux/mtd/cfi.h for selection)
12  *      - optimized write buffer method
13  * 06/21/2002   Joern Engel <joern@wh.fh-wedel.de> and others
14  *      - modified Intel Command Set 0x0001 to support ST Advanced Architecture
15  *        (command set 0x0020)
16  *      - added a writev function
17  * 07/13/2005   Joern Engel <joern@wh.fh-wedel.de>
18  *      - Plugged memory leak in cfi_staa_writev().
19  */
20
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <asm/io.h>
27 #include <asm/byteorder.h>
28
29 #include <linux/errno.h>
30 #include <linux/slab.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/cfi.h>
35 #include <linux/mtd/mtd.h>
36
37
38 static int cfi_staa_read(struct mtd_info *, loff_t, size_t, size_t *, u_char *);
39 static int cfi_staa_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
40 static int cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
41                 unsigned long count, loff_t to, size_t *retlen);
42 static int cfi_staa_erase_varsize(struct mtd_info *, struct erase_info *);
43 static void cfi_staa_sync (struct mtd_info *);
44 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
45 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
46 static int cfi_staa_suspend (struct mtd_info *);
47 static void cfi_staa_resume (struct mtd_info *);
48
49 static void cfi_staa_destroy(struct mtd_info *);
50
51 struct mtd_info *cfi_cmdset_0020(struct map_info *, int);
52
53 static struct mtd_info *cfi_staa_setup (struct map_info *);
54
55 static struct mtd_chip_driver cfi_staa_chipdrv = {
56         .probe          = NULL, /* Not usable directly */
57         .destroy        = cfi_staa_destroy,
58         .name           = "cfi_cmdset_0020",
59         .module         = THIS_MODULE
60 };
61
62 /* #define DEBUG_LOCK_BITS */
63 //#define DEBUG_CFI_FEATURES
64
65 #ifdef DEBUG_CFI_FEATURES
66 static void cfi_tell_features(struct cfi_pri_intelext *extp)
67 {
68         int i;
69         printk("  Feature/Command Support: %4.4X\n", extp->FeatureSupport);
70         printk("     - Chip Erase:         %s\n", extp->FeatureSupport&1?"supported":"unsupported");
71         printk("     - Suspend Erase:      %s\n", extp->FeatureSupport&2?"supported":"unsupported");
72         printk("     - Suspend Program:    %s\n", extp->FeatureSupport&4?"supported":"unsupported");
73         printk("     - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
74         printk("     - Queued Erase:       %s\n", extp->FeatureSupport&16?"supported":"unsupported");
75         printk("     - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
76         printk("     - Protection Bits:    %s\n", extp->FeatureSupport&64?"supported":"unsupported");
77         printk("     - Page-mode read:     %s\n", extp->FeatureSupport&128?"supported":"unsupported");
78         printk("     - Synchronous read:   %s\n", extp->FeatureSupport&256?"supported":"unsupported");
79         for (i=9; i<32; i++) {
80                 if (extp->FeatureSupport & (1<<i))
81                         printk("     - Unknown Bit %X:      supported\n", i);
82         }
83
84         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
85         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
86         for (i=1; i<8; i++) {
87                 if (extp->SuspendCmdSupport & (1<<i))
88                         printk("     - Unknown Bit %X:               supported\n", i);
89         }
90
91         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
92         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
93         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
94         for (i=2; i<16; i++) {
95                 if (extp->BlkStatusRegMask & (1<<i))
96                         printk("     - Unknown Bit %X Active: yes\n",i);
97         }
98
99         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
100                extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
101         if (extp->VppOptimal)
102                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
103                        extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
104 }
105 #endif
106
107 /* This routine is made available to other mtd code via
108  * inter_module_register.  It must only be accessed through
109  * inter_module_get which will bump the use count of this module.  The
110  * addresses passed back in cfi are valid as long as the use count of
111  * this module is non-zero, i.e. between inter_module_get and
112  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
113  */
114 struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
115 {
116         struct cfi_private *cfi = map->fldrv_priv;
117         int i;
118
119         if (cfi->cfi_mode) {
120                 /*
121                  * It's a real CFI chip, not one for which the probe
122                  * routine faked a CFI structure. So we read the feature
123                  * table from it.
124                  */
125                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
126                 struct cfi_pri_intelext *extp;
127
128                 extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics");
129                 if (!extp)
130                         return NULL;
131
132                 if (extp->MajorVersion != '1' ||
133                     (extp->MinorVersion < '0' || extp->MinorVersion > '3')) {
134                         printk(KERN_ERR "  Unknown ST Microelectronics"
135                                " Extended Query version %c.%c.\n",
136                                extp->MajorVersion, extp->MinorVersion);
137                         kfree(extp);
138                         return NULL;
139                 }
140
141                 /* Do some byteswapping if necessary */
142                 extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
143                 extp->BlkStatusRegMask = cfi32_to_cpu(map,
144                                                 extp->BlkStatusRegMask);
145
146 #ifdef DEBUG_CFI_FEATURES
147                 /* Tell the user about it in lots of lovely detail */
148                 cfi_tell_features(extp);
149 #endif
150
151                 /* Install our own private info structure */
152                 cfi->cmdset_priv = extp;
153         }
154
155         for (i=0; i< cfi->numchips; i++) {
156                 cfi->chips[i].word_write_time = 128;
157                 cfi->chips[i].buffer_write_time = 128;
158                 cfi->chips[i].erase_time = 1024;
159                 cfi->chips[i].ref_point_counter = 0;
160                 init_waitqueue_head(&(cfi->chips[i].wq));
161         }
162
163         return cfi_staa_setup(map);
164 }
165 EXPORT_SYMBOL_GPL(cfi_cmdset_0020);
166
167 static struct mtd_info *cfi_staa_setup(struct map_info *map)
168 {
169         struct cfi_private *cfi = map->fldrv_priv;
170         struct mtd_info *mtd;
171         unsigned long offset = 0;
172         int i,j;
173         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
174
175         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
176         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
177
178         if (!mtd) {
179                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
180                 kfree(cfi->cmdset_priv);
181                 return NULL;
182         }
183
184         mtd->priv = map;
185         mtd->type = MTD_NORFLASH;
186         mtd->size = devsize * cfi->numchips;
187
188         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
189         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
190                         * mtd->numeraseregions, GFP_KERNEL);
191         if (!mtd->eraseregions) {
192                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
193                 kfree(cfi->cmdset_priv);
194                 kfree(mtd);
195                 return NULL;
196         }
197
198         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
199                 unsigned long ernum, ersize;
200                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
201                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
202
203                 if (mtd->erasesize < ersize) {
204                         mtd->erasesize = ersize;
205                 }
206                 for (j=0; j<cfi->numchips; j++) {
207                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
208                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
209                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
210                 }
211                 offset += (ersize * ernum);
212                 }
213
214                 if (offset != devsize) {
215                         /* Argh */
216                         printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
217                         kfree(mtd->eraseregions);
218                         kfree(cfi->cmdset_priv);
219                         kfree(mtd);
220                         return NULL;
221                 }
222
223                 for (i=0; i<mtd->numeraseregions;i++){
224                         printk(KERN_DEBUG "%d: offset=0x%llx,size=0x%x,blocks=%d\n",
225                                i, (unsigned long long)mtd->eraseregions[i].offset,
226                                mtd->eraseregions[i].erasesize,
227                                mtd->eraseregions[i].numblocks);
228                 }
229
230         /* Also select the correct geometry setup too */
231         mtd->erase = cfi_staa_erase_varsize;
232         mtd->read = cfi_staa_read;
233         mtd->write = cfi_staa_write_buffers;
234         mtd->writev = cfi_staa_writev;
235         mtd->sync = cfi_staa_sync;
236         mtd->lock = cfi_staa_lock;
237         mtd->unlock = cfi_staa_unlock;
238         mtd->suspend = cfi_staa_suspend;
239         mtd->resume = cfi_staa_resume;
240         mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE;
241         mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */
242         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
243         map->fldrv = &cfi_staa_chipdrv;
244         __module_get(THIS_MODULE);
245         mtd->name = map->name;
246         return mtd;
247 }
248
249
250 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
251 {
252         map_word status, status_OK;
253         unsigned long timeo;
254         DECLARE_WAITQUEUE(wait, current);
255         int suspended = 0;
256         unsigned long cmd_addr;
257         struct cfi_private *cfi = map->fldrv_priv;
258
259         adr += chip->start;
260
261         /* Ensure cmd read/writes are aligned. */
262         cmd_addr = adr & ~(map_bankwidth(map)-1);
263
264         /* Let's determine this according to the interleave only once */
265         status_OK = CMD(0x80);
266
267         timeo = jiffies + HZ;
268  retry:
269         mutex_lock(&chip->mutex);
270
271         /* Check that the chip's ready to talk to us.
272          * If it's in FL_ERASING state, suspend it and make it talk now.
273          */
274         switch (chip->state) {
275         case FL_ERASING:
276                 if (!(((struct cfi_pri_intelext *)cfi->cmdset_priv)->FeatureSupport & 2))
277                         goto sleep; /* We don't support erase suspend */
278
279                 map_write (map, CMD(0xb0), cmd_addr);
280                 /* If the flash has finished erasing, then 'erase suspend'
281                  * appears to make some (28F320) flash devices switch to
282                  * 'read' mode.  Make sure that we switch to 'read status'
283                  * mode so we get the right data. --rmk
284                  */
285                 map_write(map, CMD(0x70), cmd_addr);
286                 chip->oldstate = FL_ERASING;
287                 chip->state = FL_ERASE_SUSPENDING;
288                 //              printk("Erase suspending at 0x%lx\n", cmd_addr);
289                 for (;;) {
290                         status = map_read(map, cmd_addr);
291                         if (map_word_andequal(map, status, status_OK, status_OK))
292                                 break;
293
294                         if (time_after(jiffies, timeo)) {
295                                 /* Urgh */
296                                 map_write(map, CMD(0xd0), cmd_addr);
297                                 /* make sure we're in 'read status' mode */
298                                 map_write(map, CMD(0x70), cmd_addr);
299                                 chip->state = FL_ERASING;
300                                 wake_up(&chip->wq);
301                                 mutex_unlock(&chip->mutex);
302                                 printk(KERN_ERR "Chip not ready after erase "
303                                        "suspended: status = 0x%lx\n", status.x[0]);
304                                 return -EIO;
305                         }
306
307                         mutex_unlock(&chip->mutex);
308                         cfi_udelay(1);
309                         mutex_lock(&chip->mutex);
310                 }
311
312                 suspended = 1;
313                 map_write(map, CMD(0xff), cmd_addr);
314                 chip->state = FL_READY;
315                 break;
316
317 #if 0
318         case FL_WRITING:
319                 /* Not quite yet */
320 #endif
321
322         case FL_READY:
323                 break;
324
325         case FL_CFI_QUERY:
326         case FL_JEDEC_QUERY:
327                 map_write(map, CMD(0x70), cmd_addr);
328                 chip->state = FL_STATUS;
329
330         case FL_STATUS:
331                 status = map_read(map, cmd_addr);
332                 if (map_word_andequal(map, status, status_OK, status_OK)) {
333                         map_write(map, CMD(0xff), cmd_addr);
334                         chip->state = FL_READY;
335                         break;
336                 }
337
338                 /* Urgh. Chip not yet ready to talk to us. */
339                 if (time_after(jiffies, timeo)) {
340                         mutex_unlock(&chip->mutex);
341                         printk(KERN_ERR "waiting for chip to be ready timed out in read. WSM status = %lx\n", status.x[0]);
342                         return -EIO;
343                 }
344
345                 /* Latency issues. Drop the lock, wait a while and retry */
346                 mutex_unlock(&chip->mutex);
347                 cfi_udelay(1);
348                 goto retry;
349
350         default:
351         sleep:
352                 /* Stick ourselves on a wait queue to be woken when
353                    someone changes the status */
354                 set_current_state(TASK_UNINTERRUPTIBLE);
355                 add_wait_queue(&chip->wq, &wait);
356                 mutex_unlock(&chip->mutex);
357                 schedule();
358                 remove_wait_queue(&chip->wq, &wait);
359                 timeo = jiffies + HZ;
360                 goto retry;
361         }
362
363         map_copy_from(map, buf, adr, len);
364
365         if (suspended) {
366                 chip->state = chip->oldstate;
367                 /* What if one interleaved chip has finished and the
368                    other hasn't? The old code would leave the finished
369                    one in READY mode. That's bad, and caused -EROFS
370                    errors to be returned from do_erase_oneblock because
371                    that's the only bit it checked for at the time.
372                    As the state machine appears to explicitly allow
373                    sending the 0x70 (Read Status) command to an erasing
374                    chip and expecting it to be ignored, that's what we
375                    do. */
376                 map_write(map, CMD(0xd0), cmd_addr);
377                 map_write(map, CMD(0x70), cmd_addr);
378         }
379
380         wake_up(&chip->wq);
381         mutex_unlock(&chip->mutex);
382         return 0;
383 }
384
385 static int cfi_staa_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
386 {
387         struct map_info *map = mtd->priv;
388         struct cfi_private *cfi = map->fldrv_priv;
389         unsigned long ofs;
390         int chipnum;
391         int ret = 0;
392
393         /* ofs: offset within the first chip that the first read should start */
394         chipnum = (from >> cfi->chipshift);
395         ofs = from - (chipnum <<  cfi->chipshift);
396
397         *retlen = 0;
398
399         while (len) {
400                 unsigned long thislen;
401
402                 if (chipnum >= cfi->numchips)
403                         break;
404
405                 if ((len + ofs -1) >> cfi->chipshift)
406                         thislen = (1<<cfi->chipshift) - ofs;
407                 else
408                         thislen = len;
409
410                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
411                 if (ret)
412                         break;
413
414                 *retlen += thislen;
415                 len -= thislen;
416                 buf += thislen;
417
418                 ofs = 0;
419                 chipnum++;
420         }
421         return ret;
422 }
423
424 static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
425                                   unsigned long adr, const u_char *buf, int len)
426 {
427         struct cfi_private *cfi = map->fldrv_priv;
428         map_word status, status_OK;
429         unsigned long cmd_adr, timeo;
430         DECLARE_WAITQUEUE(wait, current);
431         int wbufsize, z;
432
433         /* M58LW064A requires bus alignment for buffer wriets -- saw */
434         if (adr & (map_bankwidth(map)-1))
435             return -EINVAL;
436
437         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
438         adr += chip->start;
439         cmd_adr = adr & ~(wbufsize-1);
440
441         /* Let's determine this according to the interleave only once */
442         status_OK = CMD(0x80);
443
444         timeo = jiffies + HZ;
445  retry:
446
447 #ifdef DEBUG_CFI_FEATURES
448        printk("%s: chip->state[%d]\n", __func__, chip->state);
449 #endif
450         mutex_lock(&chip->mutex);
451
452         /* Check that the chip's ready to talk to us.
453          * Later, we can actually think about interrupting it
454          * if it's in FL_ERASING state.
455          * Not just yet, though.
456          */
457         switch (chip->state) {
458         case FL_READY:
459                 break;
460
461         case FL_CFI_QUERY:
462         case FL_JEDEC_QUERY:
463                 map_write(map, CMD(0x70), cmd_adr);
464                 chip->state = FL_STATUS;
465 #ifdef DEBUG_CFI_FEATURES
466         printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
467 #endif
468
469         case FL_STATUS:
470                 status = map_read(map, cmd_adr);
471                 if (map_word_andequal(map, status, status_OK, status_OK))
472                         break;
473                 /* Urgh. Chip not yet ready to talk to us. */
474                 if (time_after(jiffies, timeo)) {
475                         mutex_unlock(&chip->mutex);
476                         printk(KERN_ERR "waiting for chip to be ready timed out in buffer write Xstatus = %lx, status = %lx\n",
477                                status.x[0], map_read(map, cmd_adr).x[0]);
478                         return -EIO;
479                 }
480
481                 /* Latency issues. Drop the lock, wait a while and retry */
482                 mutex_unlock(&chip->mutex);
483                 cfi_udelay(1);
484                 goto retry;
485
486         default:
487                 /* Stick ourselves on a wait queue to be woken when
488                    someone changes the status */
489                 set_current_state(TASK_UNINTERRUPTIBLE);
490                 add_wait_queue(&chip->wq, &wait);
491                 mutex_unlock(&chip->mutex);
492                 schedule();
493                 remove_wait_queue(&chip->wq, &wait);
494                 timeo = jiffies + HZ;
495                 goto retry;
496         }
497
498         ENABLE_VPP(map);
499         map_write(map, CMD(0xe8), cmd_adr);
500         chip->state = FL_WRITING_TO_BUFFER;
501
502         z = 0;
503         for (;;) {
504                 status = map_read(map, cmd_adr);
505                 if (map_word_andequal(map, status, status_OK, status_OK))
506                         break;
507
508                 mutex_unlock(&chip->mutex);
509                 cfi_udelay(1);
510                 mutex_lock(&chip->mutex);
511
512                 if (++z > 100) {
513                         /* Argh. Not ready for write to buffer */
514                         DISABLE_VPP(map);
515                         map_write(map, CMD(0x70), cmd_adr);
516                         chip->state = FL_STATUS;
517                         mutex_unlock(&chip->mutex);
518                         printk(KERN_ERR "Chip not ready for buffer write. Xstatus = %lx\n", status.x[0]);
519                         return -EIO;
520                 }
521         }
522
523         /* Write length of data to come */
524         map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr );
525
526         /* Write data */
527         for (z = 0; z < len;
528              z += map_bankwidth(map), buf += map_bankwidth(map)) {
529                 map_word d;
530                 d = map_word_load(map, buf);
531                 map_write(map, d, adr+z);
532         }
533         /* GO GO GO */
534         map_write(map, CMD(0xd0), cmd_adr);
535         chip->state = FL_WRITING;
536
537         mutex_unlock(&chip->mutex);
538         cfi_udelay(chip->buffer_write_time);
539         mutex_lock(&chip->mutex);
540
541         timeo = jiffies + (HZ/2);
542         z = 0;
543         for (;;) {
544                 if (chip->state != FL_WRITING) {
545                         /* Someone's suspended the write. Sleep */
546                         set_current_state(TASK_UNINTERRUPTIBLE);
547                         add_wait_queue(&chip->wq, &wait);
548                         mutex_unlock(&chip->mutex);
549                         schedule();
550                         remove_wait_queue(&chip->wq, &wait);
551                         timeo = jiffies + (HZ / 2); /* FIXME */
552                         mutex_lock(&chip->mutex);
553                         continue;
554                 }
555
556                 status = map_read(map, cmd_adr);
557                 if (map_word_andequal(map, status, status_OK, status_OK))
558                         break;
559
560                 /* OK Still waiting */
561                 if (time_after(jiffies, timeo)) {
562                         /* clear status */
563                         map_write(map, CMD(0x50), cmd_adr);
564                         /* put back into read status register mode */
565                         map_write(map, CMD(0x70), adr);
566                         chip->state = FL_STATUS;
567                         DISABLE_VPP(map);
568                         mutex_unlock(&chip->mutex);
569                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
570                         return -EIO;
571                 }
572
573                 /* Latency issues. Drop the lock, wait a while and retry */
574                 mutex_unlock(&chip->mutex);
575                 cfi_udelay(1);
576                 z++;
577                 mutex_lock(&chip->mutex);
578         }
579         if (!z) {
580                 chip->buffer_write_time--;
581                 if (!chip->buffer_write_time)
582                         chip->buffer_write_time++;
583         }
584         if (z > 1)
585                 chip->buffer_write_time++;
586
587         /* Done and happy. */
588         DISABLE_VPP(map);
589         chip->state = FL_STATUS;
590
591         /* check for errors: 'lock bit', 'VPP', 'dead cell'/'unerased cell' or 'incorrect cmd' -- saw */
592         if (map_word_bitsset(map, status, CMD(0x3a))) {
593 #ifdef DEBUG_CFI_FEATURES
594                 printk("%s: 2 status[%lx]\n", __func__, status.x[0]);
595 #endif
596                 /* clear status */
597                 map_write(map, CMD(0x50), cmd_adr);
598                 /* put back into read status register mode */
599                 map_write(map, CMD(0x70), adr);
600                 wake_up(&chip->wq);
601                 mutex_unlock(&chip->mutex);
602                 return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO;
603         }
604         wake_up(&chip->wq);
605         mutex_unlock(&chip->mutex);
606
607         return 0;
608 }
609
610 static int cfi_staa_write_buffers (struct mtd_info *mtd, loff_t to,
611                                        size_t len, size_t *retlen, const u_char *buf)
612 {
613         struct map_info *map = mtd->priv;
614         struct cfi_private *cfi = map->fldrv_priv;
615         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
616         int ret = 0;
617         int chipnum;
618         unsigned long ofs;
619
620         *retlen = 0;
621         if (!len)
622                 return 0;
623
624         chipnum = to >> cfi->chipshift;
625         ofs = to  - (chipnum << cfi->chipshift);
626
627 #ifdef DEBUG_CFI_FEATURES
628         printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map));
629         printk("%s: chipnum[%x] wbufsize[%x]\n", __func__, chipnum, wbufsize);
630         printk("%s: ofs[%x] len[%x]\n", __func__, ofs, len);
631 #endif
632
633         /* Write buffer is worth it only if more than one word to write... */
634         while (len > 0) {
635                 /* We must not cross write block boundaries */
636                 int size = wbufsize - (ofs & (wbufsize-1));
637
638                 if (size > len)
639                     size = len;
640
641                 ret = do_write_buffer(map, &cfi->chips[chipnum],
642                                       ofs, buf, size);
643                 if (ret)
644                         return ret;
645
646                 ofs += size;
647                 buf += size;
648                 (*retlen) += size;
649                 len -= size;
650
651                 if (ofs >> cfi->chipshift) {
652                         chipnum ++;
653                         ofs = 0;
654                         if (chipnum == cfi->numchips)
655                                 return 0;
656                 }
657         }
658
659         return 0;
660 }
661
662 /*
663  * Writev for ECC-Flashes is a little more complicated. We need to maintain
664  * a small buffer for this.
665  * XXX: If the buffer size is not a multiple of 2, this will break
666  */
667 #define ECCBUF_SIZE (mtd->writesize)
668 #define ECCBUF_DIV(x) ((x) & ~(ECCBUF_SIZE - 1))
669 #define ECCBUF_MOD(x) ((x) &  (ECCBUF_SIZE - 1))
670 static int
671 cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
672                 unsigned long count, loff_t to, size_t *retlen)
673 {
674         unsigned long i;
675         size_t   totlen = 0, thislen;
676         int      ret = 0;
677         size_t   buflen = 0;
678         static char *buffer;
679
680         if (!ECCBUF_SIZE) {
681                 /* We should fall back to a general writev implementation.
682                  * Until that is written, just break.
683                  */
684                 return -EIO;
685         }
686         buffer = kmalloc(ECCBUF_SIZE, GFP_KERNEL);
687         if (!buffer)
688                 return -ENOMEM;
689
690         for (i=0; i<count; i++) {
691                 size_t elem_len = vecs[i].iov_len;
692                 void *elem_base = vecs[i].iov_base;
693                 if (!elem_len) /* FIXME: Might be unnecessary. Check that */
694                         continue;
695                 if (buflen) { /* cut off head */
696                         if (buflen + elem_len < ECCBUF_SIZE) { /* just accumulate */
697                                 memcpy(buffer+buflen, elem_base, elem_len);
698                                 buflen += elem_len;
699                                 continue;
700                         }
701                         memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
702                         ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
703                                         buffer);
704                         totlen += thislen;
705                         if (ret || thislen != ECCBUF_SIZE)
706                                 goto write_error;
707                         elem_len -= thislen-buflen;
708                         elem_base += thislen-buflen;
709                         to += ECCBUF_SIZE;
710                 }
711                 if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
712                         ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
713                                         &thislen, elem_base);
714                         totlen += thislen;
715                         if (ret || thislen != ECCBUF_DIV(elem_len))
716                                 goto write_error;
717                         to += thislen;
718                 }
719                 buflen = ECCBUF_MOD(elem_len); /* cut off tail */
720                 if (buflen) {
721                         memset(buffer, 0xff, ECCBUF_SIZE);
722                         memcpy(buffer, elem_base + thislen, buflen);
723                 }
724         }
725         if (buflen) { /* flush last page, even if not full */
726                 /* This is sometimes intended behaviour, really */
727                 ret = mtd_write(mtd, to, buflen, &thislen, buffer);
728                 totlen += thislen;
729                 if (ret || thislen != ECCBUF_SIZE)
730                         goto write_error;
731         }
732 write_error:
733         if (retlen)
734                 *retlen = totlen;
735         kfree(buffer);
736         return ret;
737 }
738
739
740 static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
741 {
742         struct cfi_private *cfi = map->fldrv_priv;
743         map_word status, status_OK;
744         unsigned long timeo;
745         int retries = 3;
746         DECLARE_WAITQUEUE(wait, current);
747         int ret = 0;
748
749         adr += chip->start;
750
751         /* Let's determine this according to the interleave only once */
752         status_OK = CMD(0x80);
753
754         timeo = jiffies + HZ;
755 retry:
756         mutex_lock(&chip->mutex);
757
758         /* Check that the chip's ready to talk to us. */
759         switch (chip->state) {
760         case FL_CFI_QUERY:
761         case FL_JEDEC_QUERY:
762         case FL_READY:
763                 map_write(map, CMD(0x70), adr);
764                 chip->state = FL_STATUS;
765
766         case FL_STATUS:
767                 status = map_read(map, adr);
768                 if (map_word_andequal(map, status, status_OK, status_OK))
769                         break;
770
771                 /* Urgh. Chip not yet ready to talk to us. */
772                 if (time_after(jiffies, timeo)) {
773                         mutex_unlock(&chip->mutex);
774                         printk(KERN_ERR "waiting for chip to be ready timed out in erase\n");
775                         return -EIO;
776                 }
777
778                 /* Latency issues. Drop the lock, wait a while and retry */
779                 mutex_unlock(&chip->mutex);
780                 cfi_udelay(1);
781                 goto retry;
782
783         default:
784                 /* Stick ourselves on a wait queue to be woken when
785                    someone changes the status */
786                 set_current_state(TASK_UNINTERRUPTIBLE);
787                 add_wait_queue(&chip->wq, &wait);
788                 mutex_unlock(&chip->mutex);
789                 schedule();
790                 remove_wait_queue(&chip->wq, &wait);
791                 timeo = jiffies + HZ;
792                 goto retry;
793         }
794
795         ENABLE_VPP(map);
796         /* Clear the status register first */
797         map_write(map, CMD(0x50), adr);
798
799         /* Now erase */
800         map_write(map, CMD(0x20), adr);
801         map_write(map, CMD(0xD0), adr);
802         chip->state = FL_ERASING;
803
804         mutex_unlock(&chip->mutex);
805         msleep(1000);
806         mutex_lock(&chip->mutex);
807
808         /* FIXME. Use a timer to check this, and return immediately. */
809         /* Once the state machine's known to be working I'll do that */
810
811         timeo = jiffies + (HZ*20);
812         for (;;) {
813                 if (chip->state != FL_ERASING) {
814                         /* Someone's suspended the erase. Sleep */
815                         set_current_state(TASK_UNINTERRUPTIBLE);
816                         add_wait_queue(&chip->wq, &wait);
817                         mutex_unlock(&chip->mutex);
818                         schedule();
819                         remove_wait_queue(&chip->wq, &wait);
820                         timeo = jiffies + (HZ*20); /* FIXME */
821                         mutex_lock(&chip->mutex);
822                         continue;
823                 }
824
825                 status = map_read(map, adr);
826                 if (map_word_andequal(map, status, status_OK, status_OK))
827                         break;
828
829                 /* OK Still waiting */
830                 if (time_after(jiffies, timeo)) {
831                         map_write(map, CMD(0x70), adr);
832                         chip->state = FL_STATUS;
833                         printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
834                         DISABLE_VPP(map);
835                         mutex_unlock(&chip->mutex);
836                         return -EIO;
837                 }
838
839                 /* Latency issues. Drop the lock, wait a while and retry */
840                 mutex_unlock(&chip->mutex);
841                 cfi_udelay(1);
842                 mutex_lock(&chip->mutex);
843         }
844
845         DISABLE_VPP(map);
846         ret = 0;
847
848         /* We've broken this before. It doesn't hurt to be safe */
849         map_write(map, CMD(0x70), adr);
850         chip->state = FL_STATUS;
851         status = map_read(map, adr);
852
853         /* check for lock bit */
854         if (map_word_bitsset(map, status, CMD(0x3a))) {
855                 unsigned char chipstatus = status.x[0];
856                 if (!map_word_equal(map, status, CMD(chipstatus))) {
857                         int i, w;
858                         for (w=0; w<map_words(map); w++) {
859                                 for (i = 0; i<cfi_interleave(cfi); i++) {
860                                         chipstatus |= status.x[w] >> (cfi->device_type * 8);
861                                 }
862                         }
863                         printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
864                                status.x[0], chipstatus);
865                 }
866                 /* Reset the error bits */
867                 map_write(map, CMD(0x50), adr);
868                 map_write(map, CMD(0x70), adr);
869
870                 if ((chipstatus & 0x30) == 0x30) {
871                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
872                         ret = -EIO;
873                 } else if (chipstatus & 0x02) {
874                         /* Protection bit set */
875                         ret = -EROFS;
876                 } else if (chipstatus & 0x8) {
877                         /* Voltage */
878                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
879                         ret = -EIO;
880                 } else if (chipstatus & 0x20) {
881                         if (retries--) {
882                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
883                                 timeo = jiffies + HZ;
884                                 chip->state = FL_STATUS;
885                                 mutex_unlock(&chip->mutex);
886                                 goto retry;
887                         }
888                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
889                         ret = -EIO;
890                 }
891         }
892
893         wake_up(&chip->wq);
894         mutex_unlock(&chip->mutex);
895         return ret;
896 }
897
898 static int cfi_staa_erase_varsize(struct mtd_info *mtd,
899                                   struct erase_info *instr)
900 {       struct map_info *map = mtd->priv;
901         struct cfi_private *cfi = map->fldrv_priv;
902         unsigned long adr, len;
903         int chipnum, ret = 0;
904         int i, first;
905         struct mtd_erase_region_info *regions = mtd->eraseregions;
906
907         if (instr->addr > mtd->size)
908                 return -EINVAL;
909
910         if ((instr->len + instr->addr) > mtd->size)
911                 return -EINVAL;
912
913         /* Check that both start and end of the requested erase are
914          * aligned with the erasesize at the appropriate addresses.
915          */
916
917         i = 0;
918
919         /* Skip all erase regions which are ended before the start of
920            the requested erase. Actually, to save on the calculations,
921            we skip to the first erase region which starts after the
922            start of the requested erase, and then go back one.
923         */
924
925         while (i < mtd->numeraseregions && instr->addr >= regions[i].offset)
926                i++;
927         i--;
928
929         /* OK, now i is pointing at the erase region in which this
930            erase request starts. Check the start of the requested
931            erase range is aligned with the erase size which is in
932            effect here.
933         */
934
935         if (instr->addr & (regions[i].erasesize-1))
936                 return -EINVAL;
937
938         /* Remember the erase region we start on */
939         first = i;
940
941         /* Next, check that the end of the requested erase is aligned
942          * with the erase region at that address.
943          */
944
945         while (i<mtd->numeraseregions && (instr->addr + instr->len) >= regions[i].offset)
946                 i++;
947
948         /* As before, drop back one to point at the region in which
949            the address actually falls
950         */
951         i--;
952
953         if ((instr->addr + instr->len) & (regions[i].erasesize-1))
954                 return -EINVAL;
955
956         chipnum = instr->addr >> cfi->chipshift;
957         adr = instr->addr - (chipnum << cfi->chipshift);
958         len = instr->len;
959
960         i=first;
961
962         while(len) {
963                 ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr);
964
965                 if (ret)
966                         return ret;
967
968                 adr += regions[i].erasesize;
969                 len -= regions[i].erasesize;
970
971                 if (adr % (1<< cfi->chipshift) == (((unsigned long)regions[i].offset + (regions[i].erasesize * regions[i].numblocks)) %( 1<< cfi->chipshift)))
972                         i++;
973
974                 if (adr >> cfi->chipshift) {
975                         adr = 0;
976                         chipnum++;
977
978                         if (chipnum >= cfi->numchips)
979                         break;
980                 }
981         }
982
983         instr->state = MTD_ERASE_DONE;
984         mtd_erase_callback(instr);
985
986         return 0;
987 }
988
989 static void cfi_staa_sync (struct mtd_info *mtd)
990 {
991         struct map_info *map = mtd->priv;
992         struct cfi_private *cfi = map->fldrv_priv;
993         int i;
994         struct flchip *chip;
995         int ret = 0;
996         DECLARE_WAITQUEUE(wait, current);
997
998         for (i=0; !ret && i<cfi->numchips; i++) {
999                 chip = &cfi->chips[i];
1000
1001         retry:
1002                 mutex_lock(&chip->mutex);
1003
1004                 switch(chip->state) {
1005                 case FL_READY:
1006                 case FL_STATUS:
1007                 case FL_CFI_QUERY:
1008                 case FL_JEDEC_QUERY:
1009                         chip->oldstate = chip->state;
1010                         chip->state = FL_SYNCING;
1011                         /* No need to wake_up() on this state change -
1012                          * as the whole point is that nobody can do anything
1013                          * with the chip now anyway.
1014                          */
1015                 case FL_SYNCING:
1016                         mutex_unlock(&chip->mutex);
1017                         break;
1018
1019                 default:
1020                         /* Not an idle state */
1021                         set_current_state(TASK_UNINTERRUPTIBLE);
1022                         add_wait_queue(&chip->wq, &wait);
1023
1024                         mutex_unlock(&chip->mutex);
1025                         schedule();
1026                         remove_wait_queue(&chip->wq, &wait);
1027
1028                         goto retry;
1029                 }
1030         }
1031
1032         /* Unlock the chips again */
1033
1034         for (i--; i >=0; i--) {
1035                 chip = &cfi->chips[i];
1036
1037                 mutex_lock(&chip->mutex);
1038
1039                 if (chip->state == FL_SYNCING) {
1040                         chip->state = chip->oldstate;
1041                         wake_up(&chip->wq);
1042                 }
1043                 mutex_unlock(&chip->mutex);
1044         }
1045 }
1046
1047 static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1048 {
1049         struct cfi_private *cfi = map->fldrv_priv;
1050         map_word status, status_OK;
1051         unsigned long timeo = jiffies + HZ;
1052         DECLARE_WAITQUEUE(wait, current);
1053
1054         adr += chip->start;
1055
1056         /* Let's determine this according to the interleave only once */
1057         status_OK = CMD(0x80);
1058
1059         timeo = jiffies + HZ;
1060 retry:
1061         mutex_lock(&chip->mutex);
1062
1063         /* Check that the chip's ready to talk to us. */
1064         switch (chip->state) {
1065         case FL_CFI_QUERY:
1066         case FL_JEDEC_QUERY:
1067         case FL_READY:
1068                 map_write(map, CMD(0x70), adr);
1069                 chip->state = FL_STATUS;
1070
1071         case FL_STATUS:
1072                 status = map_read(map, adr);
1073                 if (map_word_andequal(map, status, status_OK, status_OK))
1074                         break;
1075
1076                 /* Urgh. Chip not yet ready to talk to us. */
1077                 if (time_after(jiffies, timeo)) {
1078                         mutex_unlock(&chip->mutex);
1079                         printk(KERN_ERR "waiting for chip to be ready timed out in lock\n");
1080                         return -EIO;
1081                 }
1082
1083                 /* Latency issues. Drop the lock, wait a while and retry */
1084                 mutex_unlock(&chip->mutex);
1085                 cfi_udelay(1);
1086                 goto retry;
1087
1088         default:
1089                 /* Stick ourselves on a wait queue to be woken when
1090                    someone changes the status */
1091                 set_current_state(TASK_UNINTERRUPTIBLE);
1092                 add_wait_queue(&chip->wq, &wait);
1093                 mutex_unlock(&chip->mutex);
1094                 schedule();
1095                 remove_wait_queue(&chip->wq, &wait);
1096                 timeo = jiffies + HZ;
1097                 goto retry;
1098         }
1099
1100         ENABLE_VPP(map);
1101         map_write(map, CMD(0x60), adr);
1102         map_write(map, CMD(0x01), adr);
1103         chip->state = FL_LOCKING;
1104
1105         mutex_unlock(&chip->mutex);
1106         msleep(1000);
1107         mutex_lock(&chip->mutex);
1108
1109         /* FIXME. Use a timer to check this, and return immediately. */
1110         /* Once the state machine's known to be working I'll do that */
1111
1112         timeo = jiffies + (HZ*2);
1113         for (;;) {
1114
1115                 status = map_read(map, adr);
1116                 if (map_word_andequal(map, status, status_OK, status_OK))
1117                         break;
1118
1119                 /* OK Still waiting */
1120                 if (time_after(jiffies, timeo)) {
1121                         map_write(map, CMD(0x70), adr);
1122                         chip->state = FL_STATUS;
1123                         printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1124                         DISABLE_VPP(map);
1125                         mutex_unlock(&chip->mutex);
1126                         return -EIO;
1127                 }
1128
1129                 /* Latency issues. Drop the lock, wait a while and retry */
1130                 mutex_unlock(&chip->mutex);
1131                 cfi_udelay(1);
1132                 mutex_lock(&chip->mutex);
1133         }
1134
1135         /* Done and happy. */
1136         chip->state = FL_STATUS;
1137         DISABLE_VPP(map);
1138         wake_up(&chip->wq);
1139         mutex_unlock(&chip->mutex);
1140         return 0;
1141 }
1142 static int cfi_staa_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1143 {
1144         struct map_info *map = mtd->priv;
1145         struct cfi_private *cfi = map->fldrv_priv;
1146         unsigned long adr;
1147         int chipnum, ret = 0;
1148 #ifdef DEBUG_LOCK_BITS
1149         int ofs_factor = cfi->interleave * cfi->device_type;
1150 #endif
1151
1152         if (ofs & (mtd->erasesize - 1))
1153                 return -EINVAL;
1154
1155         if (len & (mtd->erasesize -1))
1156                 return -EINVAL;
1157
1158         if ((len + ofs) > mtd->size)
1159                 return -EINVAL;
1160
1161         chipnum = ofs >> cfi->chipshift;
1162         adr = ofs - (chipnum << cfi->chipshift);
1163
1164         while(len) {
1165
1166 #ifdef DEBUG_LOCK_BITS
1167                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1168                 printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1169                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1170 #endif
1171
1172                 ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr);
1173
1174 #ifdef DEBUG_LOCK_BITS
1175                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1176                 printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1177                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1178 #endif
1179
1180                 if (ret)
1181                         return ret;
1182
1183                 adr += mtd->erasesize;
1184                 len -= mtd->erasesize;
1185
1186                 if (adr >> cfi->chipshift) {
1187                         adr = 0;
1188                         chipnum++;
1189
1190                         if (chipnum >= cfi->numchips)
1191                         break;
1192                 }
1193         }
1194         return 0;
1195 }
1196 static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
1197 {
1198         struct cfi_private *cfi = map->fldrv_priv;
1199         map_word status, status_OK;
1200         unsigned long timeo = jiffies + HZ;
1201         DECLARE_WAITQUEUE(wait, current);
1202
1203         adr += chip->start;
1204
1205         /* Let's determine this according to the interleave only once */
1206         status_OK = CMD(0x80);
1207
1208         timeo = jiffies + HZ;
1209 retry:
1210         mutex_lock(&chip->mutex);
1211
1212         /* Check that the chip's ready to talk to us. */
1213         switch (chip->state) {
1214         case FL_CFI_QUERY:
1215         case FL_JEDEC_QUERY:
1216         case FL_READY:
1217                 map_write(map, CMD(0x70), adr);
1218                 chip->state = FL_STATUS;
1219
1220         case FL_STATUS:
1221                 status = map_read(map, adr);
1222                 if (map_word_andequal(map, status, status_OK, status_OK))
1223                         break;
1224
1225                 /* Urgh. Chip not yet ready to talk to us. */
1226                 if (time_after(jiffies, timeo)) {
1227                         mutex_unlock(&chip->mutex);
1228                         printk(KERN_ERR "waiting for chip to be ready timed out in unlock\n");
1229                         return -EIO;
1230                 }
1231
1232                 /* Latency issues. Drop the lock, wait a while and retry */
1233                 mutex_unlock(&chip->mutex);
1234                 cfi_udelay(1);
1235                 goto retry;
1236
1237         default:
1238                 /* Stick ourselves on a wait queue to be woken when
1239                    someone changes the status */
1240                 set_current_state(TASK_UNINTERRUPTIBLE);
1241                 add_wait_queue(&chip->wq, &wait);
1242                 mutex_unlock(&chip->mutex);
1243                 schedule();
1244                 remove_wait_queue(&chip->wq, &wait);
1245                 timeo = jiffies + HZ;
1246                 goto retry;
1247         }
1248
1249         ENABLE_VPP(map);
1250         map_write(map, CMD(0x60), adr);
1251         map_write(map, CMD(0xD0), adr);
1252         chip->state = FL_UNLOCKING;
1253
1254         mutex_unlock(&chip->mutex);
1255         msleep(1000);
1256         mutex_lock(&chip->mutex);
1257
1258         /* FIXME. Use a timer to check this, and return immediately. */
1259         /* Once the state machine's known to be working I'll do that */
1260
1261         timeo = jiffies + (HZ*2);
1262         for (;;) {
1263
1264                 status = map_read(map, adr);
1265                 if (map_word_andequal(map, status, status_OK, status_OK))
1266                         break;
1267
1268                 /* OK Still waiting */
1269                 if (time_after(jiffies, timeo)) {
1270                         map_write(map, CMD(0x70), adr);
1271                         chip->state = FL_STATUS;
1272                         printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]);
1273                         DISABLE_VPP(map);
1274                         mutex_unlock(&chip->mutex);
1275                         return -EIO;
1276                 }
1277
1278                 /* Latency issues. Drop the unlock, wait a while and retry */
1279                 mutex_unlock(&chip->mutex);
1280                 cfi_udelay(1);
1281                 mutex_lock(&chip->mutex);
1282         }
1283
1284         /* Done and happy. */
1285         chip->state = FL_STATUS;
1286         DISABLE_VPP(map);
1287         wake_up(&chip->wq);
1288         mutex_unlock(&chip->mutex);
1289         return 0;
1290 }
1291 static int cfi_staa_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1292 {
1293         struct map_info *map = mtd->priv;
1294         struct cfi_private *cfi = map->fldrv_priv;
1295         unsigned long adr;
1296         int chipnum, ret = 0;
1297 #ifdef DEBUG_LOCK_BITS
1298         int ofs_factor = cfi->interleave * cfi->device_type;
1299 #endif
1300
1301         chipnum = ofs >> cfi->chipshift;
1302         adr = ofs - (chipnum << cfi->chipshift);
1303
1304 #ifdef DEBUG_LOCK_BITS
1305         {
1306                 unsigned long temp_adr = adr;
1307                 unsigned long temp_len = len;
1308
1309                 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1310                 while (temp_len) {
1311                         printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor)));
1312                         temp_adr += mtd->erasesize;
1313                         temp_len -= mtd->erasesize;
1314                 }
1315                 cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1316         }
1317 #endif
1318
1319         ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr);
1320
1321 #ifdef DEBUG_LOCK_BITS
1322         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1323         printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor)));
1324         cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL);
1325 #endif
1326
1327         return ret;
1328 }
1329
1330 static int cfi_staa_suspend(struct mtd_info *mtd)
1331 {
1332         struct map_info *map = mtd->priv;
1333         struct cfi_private *cfi = map->fldrv_priv;
1334         int i;
1335         struct flchip *chip;
1336         int ret = 0;
1337
1338         for (i=0; !ret && i<cfi->numchips; i++) {
1339                 chip = &cfi->chips[i];
1340
1341                 mutex_lock(&chip->mutex);
1342
1343                 switch(chip->state) {
1344                 case FL_READY:
1345                 case FL_STATUS:
1346                 case FL_CFI_QUERY:
1347                 case FL_JEDEC_QUERY:
1348                         chip->oldstate = chip->state;
1349                         chip->state = FL_PM_SUSPENDED;
1350                         /* No need to wake_up() on this state change -
1351                          * as the whole point is that nobody can do anything
1352                          * with the chip now anyway.
1353                          */
1354                 case FL_PM_SUSPENDED:
1355                         break;
1356
1357                 default:
1358                         ret = -EAGAIN;
1359                         break;
1360                 }
1361                 mutex_unlock(&chip->mutex);
1362         }
1363
1364         /* Unlock the chips again */
1365
1366         if (ret) {
1367                 for (i--; i >=0; i--) {
1368                         chip = &cfi->chips[i];
1369
1370                         mutex_lock(&chip->mutex);
1371
1372                         if (chip->state == FL_PM_SUSPENDED) {
1373                                 /* No need to force it into a known state here,
1374                                    because we're returning failure, and it didn't
1375                                    get power cycled */
1376                                 chip->state = chip->oldstate;
1377                                 wake_up(&chip->wq);
1378                         }
1379                         mutex_unlock(&chip->mutex);
1380                 }
1381         }
1382
1383         return ret;
1384 }
1385
1386 static void cfi_staa_resume(struct mtd_info *mtd)
1387 {
1388         struct map_info *map = mtd->priv;
1389         struct cfi_private *cfi = map->fldrv_priv;
1390         int i;
1391         struct flchip *chip;
1392
1393         for (i=0; i<cfi->numchips; i++) {
1394
1395                 chip = &cfi->chips[i];
1396
1397                 mutex_lock(&chip->mutex);
1398
1399                 /* Go to known state. Chip may have been power cycled */
1400                 if (chip->state == FL_PM_SUSPENDED) {
1401                         map_write(map, CMD(0xFF), 0);
1402                         chip->state = FL_READY;
1403                         wake_up(&chip->wq);
1404                 }
1405
1406                 mutex_unlock(&chip->mutex);
1407         }
1408 }
1409
1410 static void cfi_staa_destroy(struct mtd_info *mtd)
1411 {
1412         struct map_info *map = mtd->priv;
1413         struct cfi_private *cfi = map->fldrv_priv;
1414         kfree(cfi->cmdset_priv);
1415         kfree(cfi);
1416 }
1417
1418 MODULE_LICENSE("GPL");