]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/nand/pxa3xx_nand.c
mtd: nand: pxa3xx-nand: fix random command timeouts
[karo-tx-linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/io.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
28 #include <linux/of.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
31
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
33 #define ARCH_HAS_DMA
34 #endif
35
36 #ifdef ARCH_HAS_DMA
37 #include <mach/dma.h>
38 #endif
39
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
41
42 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE         (2048)
45
46 /*
47  * Define a buffer size for the initial command that detects the flash device:
48  * STATUS, READID and PARAM.
49  * ONFI param page is 256 bytes, and there are three redundant copies
50  * to be read. JEDEC param page is 512 bytes, and there are also three
51  * redundant copies to be read.
52  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
53  */
54 #define INIT_BUFFER_SIZE        2048
55
56 /* registers and bit definitions */
57 #define NDCR            (0x00) /* Control register */
58 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
59 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
60 #define NDSR            (0x14) /* Status Register */
61 #define NDPCR           (0x18) /* Page Count Register */
62 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
63 #define NDBDR1          (0x20) /* Bad Block Register 1 */
64 #define NDECCCTRL       (0x28) /* ECC control */
65 #define NDDB            (0x40) /* Data Buffer */
66 #define NDCB0           (0x48) /* Command Buffer0 */
67 #define NDCB1           (0x4C) /* Command Buffer1 */
68 #define NDCB2           (0x50) /* Command Buffer2 */
69
70 #define NDCR_SPARE_EN           (0x1 << 31)
71 #define NDCR_ECC_EN             (0x1 << 30)
72 #define NDCR_DMA_EN             (0x1 << 29)
73 #define NDCR_ND_RUN             (0x1 << 28)
74 #define NDCR_DWIDTH_C           (0x1 << 27)
75 #define NDCR_DWIDTH_M           (0x1 << 26)
76 #define NDCR_PAGE_SZ            (0x1 << 24)
77 #define NDCR_NCSX               (0x1 << 23)
78 #define NDCR_ND_MODE            (0x3 << 21)
79 #define NDCR_NAND_MODE          (0x0)
80 #define NDCR_CLR_PG_CNT         (0x1 << 20)
81 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
82 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
83 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
84
85 #define NDCR_RA_START           (0x1 << 15)
86 #define NDCR_PG_PER_BLK         (0x1 << 14)
87 #define NDCR_ND_ARB_EN          (0x1 << 12)
88 #define NDCR_INT_MASK           (0xFFF)
89
90 #define NDSR_MASK               (0xfff)
91 #define NDSR_ERR_CNT_OFF        (16)
92 #define NDSR_ERR_CNT_MASK       (0x1f)
93 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
94 #define NDSR_RDY                (0x1 << 12)
95 #define NDSR_FLASH_RDY          (0x1 << 11)
96 #define NDSR_CS0_PAGED          (0x1 << 10)
97 #define NDSR_CS1_PAGED          (0x1 << 9)
98 #define NDSR_CS0_CMDD           (0x1 << 8)
99 #define NDSR_CS1_CMDD           (0x1 << 7)
100 #define NDSR_CS0_BBD            (0x1 << 6)
101 #define NDSR_CS1_BBD            (0x1 << 5)
102 #define NDSR_UNCORERR           (0x1 << 4)
103 #define NDSR_CORERR             (0x1 << 3)
104 #define NDSR_WRDREQ             (0x1 << 2)
105 #define NDSR_RDDREQ             (0x1 << 1)
106 #define NDSR_WRCMDREQ           (0x1)
107
108 #define NDCB0_LEN_OVRD          (0x1 << 28)
109 #define NDCB0_ST_ROW_EN         (0x1 << 26)
110 #define NDCB0_AUTO_RS           (0x1 << 25)
111 #define NDCB0_CSEL              (0x1 << 24)
112 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
113 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
114 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
115 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
116 #define NDCB0_NC                (0x1 << 20)
117 #define NDCB0_DBC               (0x1 << 19)
118 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
119 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
120 #define NDCB0_CMD2_MASK         (0xff << 8)
121 #define NDCB0_CMD1_MASK         (0xff)
122 #define NDCB0_ADDR_CYC_SHIFT    (16)
123
124 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
125 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
126 #define EXT_CMD_TYPE_READ       4 /* Read */
127 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
128 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
129 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
130 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
131
132 /* macros for registers read/write */
133 #define nand_writel(info, off, val)     \
134         writel_relaxed((val), (info)->mmio_base + (off))
135
136 #define nand_readl(info, off)           \
137         readl_relaxed((info)->mmio_base + (off))
138
139 /* error code and state */
140 enum {
141         ERR_NONE        = 0,
142         ERR_DMABUSERR   = -1,
143         ERR_SENDCMD     = -2,
144         ERR_UNCORERR    = -3,
145         ERR_BBERR       = -4,
146         ERR_CORERR      = -5,
147 };
148
149 enum {
150         STATE_IDLE = 0,
151         STATE_PREPARED,
152         STATE_CMD_HANDLE,
153         STATE_DMA_READING,
154         STATE_DMA_WRITING,
155         STATE_DMA_DONE,
156         STATE_PIO_READING,
157         STATE_PIO_WRITING,
158         STATE_CMD_DONE,
159         STATE_READY,
160 };
161
162 enum pxa3xx_nand_variant {
163         PXA3XX_NAND_VARIANT_PXA,
164         PXA3XX_NAND_VARIANT_ARMADA370,
165 };
166
167 struct pxa3xx_nand_host {
168         struct nand_chip        chip;
169         struct mtd_info         *mtd;
170         void                    *info_data;
171
172         /* page size of attached chip */
173         int                     use_ecc;
174         int                     cs;
175
176         /* calculated from pxa3xx_nand_flash data */
177         unsigned int            col_addr_cycles;
178         unsigned int            row_addr_cycles;
179         size_t                  read_id_bytes;
180
181 };
182
183 struct pxa3xx_nand_info {
184         struct nand_hw_control  controller;
185         struct platform_device   *pdev;
186
187         struct clk              *clk;
188         void __iomem            *mmio_base;
189         unsigned long           mmio_phys;
190         struct completion       cmd_complete, dev_ready;
191
192         unsigned int            buf_start;
193         unsigned int            buf_count;
194         unsigned int            buf_size;
195         unsigned int            data_buff_pos;
196         unsigned int            oob_buff_pos;
197
198         /* DMA information */
199         int                     drcmr_dat;
200         int                     drcmr_cmd;
201
202         unsigned char           *data_buff;
203         unsigned char           *oob_buff;
204         dma_addr_t              data_buff_phys;
205         int                     data_dma_ch;
206         struct pxa_dma_desc     *data_desc;
207         dma_addr_t              data_desc_addr;
208
209         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
210         unsigned int            state;
211
212         /*
213          * This driver supports NFCv1 (as found in PXA SoC)
214          * and NFCv2 (as found in Armada 370/XP SoC).
215          */
216         enum pxa3xx_nand_variant variant;
217
218         int                     cs;
219         int                     use_ecc;        /* use HW ECC ? */
220         int                     ecc_bch;        /* using BCH ECC? */
221         int                     use_dma;        /* use DMA ? */
222         int                     use_spare;      /* use spare ? */
223         int                     need_wait;
224
225         unsigned int            data_size;      /* data to be read from FIFO */
226         unsigned int            chunk_size;     /* split commands chunk size */
227         unsigned int            oob_size;
228         unsigned int            spare_size;
229         unsigned int            ecc_size;
230         unsigned int            ecc_err_cnt;
231         unsigned int            max_bitflips;
232         int                     retcode;
233
234         /* cached register value */
235         uint32_t                reg_ndcr;
236         uint32_t                ndtr0cs0;
237         uint32_t                ndtr1cs0;
238
239         /* generated NDCBx register values */
240         uint32_t                ndcb0;
241         uint32_t                ndcb1;
242         uint32_t                ndcb2;
243         uint32_t                ndcb3;
244 };
245
246 static bool use_dma = 1;
247 module_param(use_dma, bool, 0444);
248 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
249
250 static struct pxa3xx_nand_timing timing[] = {
251         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
252         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
253         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
254         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
255 };
256
257 static struct pxa3xx_nand_flash builtin_flash_types[] = {
258 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
259 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
260 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
261 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
262 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
263 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
264 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
265 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
266 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
267 };
268
269 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
270 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
271
272 static struct nand_bbt_descr bbt_main_descr = {
273         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
274                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
275         .offs = 8,
276         .len = 6,
277         .veroffs = 14,
278         .maxblocks = 8,         /* Last 8 blocks in each chip */
279         .pattern = bbt_pattern
280 };
281
282 static struct nand_bbt_descr bbt_mirror_descr = {
283         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
284                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
285         .offs = 8,
286         .len = 6,
287         .veroffs = 14,
288         .maxblocks = 8,         /* Last 8 blocks in each chip */
289         .pattern = bbt_mirror_pattern
290 };
291
292 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
293         .eccbytes = 32,
294         .eccpos = {
295                 32, 33, 34, 35, 36, 37, 38, 39,
296                 40, 41, 42, 43, 44, 45, 46, 47,
297                 48, 49, 50, 51, 52, 53, 54, 55,
298                 56, 57, 58, 59, 60, 61, 62, 63},
299         .oobfree = { {2, 30} }
300 };
301
302 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
303         .eccbytes = 64,
304         .eccpos = {
305                 32,  33,  34,  35,  36,  37,  38,  39,
306                 40,  41,  42,  43,  44,  45,  46,  47,
307                 48,  49,  50,  51,  52,  53,  54,  55,
308                 56,  57,  58,  59,  60,  61,  62,  63,
309                 96,  97,  98,  99,  100, 101, 102, 103,
310                 104, 105, 106, 107, 108, 109, 110, 111,
311                 112, 113, 114, 115, 116, 117, 118, 119,
312                 120, 121, 122, 123, 124, 125, 126, 127},
313         /* Bootrom looks in bytes 0 & 5 for bad blocks */
314         .oobfree = { {6, 26}, { 64, 32} }
315 };
316
317 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
318         .eccbytes = 128,
319         .eccpos = {
320                 32,  33,  34,  35,  36,  37,  38,  39,
321                 40,  41,  42,  43,  44,  45,  46,  47,
322                 48,  49,  50,  51,  52,  53,  54,  55,
323                 56,  57,  58,  59,  60,  61,  62,  63},
324         .oobfree = { }
325 };
326
327 /* Define a default flash type setting serve as flash detecting only */
328 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
329
330 #define NDTR0_tCH(c)    (min((c), 7) << 19)
331 #define NDTR0_tCS(c)    (min((c), 7) << 16)
332 #define NDTR0_tWH(c)    (min((c), 7) << 11)
333 #define NDTR0_tWP(c)    (min((c), 7) << 8)
334 #define NDTR0_tRH(c)    (min((c), 7) << 3)
335 #define NDTR0_tRP(c)    (min((c), 7) << 0)
336
337 #define NDTR1_tR(c)     (min((c), 65535) << 16)
338 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
339 #define NDTR1_tAR(c)    (min((c), 15) << 0)
340
341 /* convert nano-seconds to nand flash controller clock cycles */
342 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
343
344 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
345         {
346                 .compatible = "marvell,pxa3xx-nand",
347                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
348         },
349         {
350                 .compatible = "marvell,armada370-nand",
351                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
352         },
353         {}
354 };
355 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
356
357 static enum pxa3xx_nand_variant
358 pxa3xx_nand_get_variant(struct platform_device *pdev)
359 {
360         const struct of_device_id *of_id =
361                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
362         if (!of_id)
363                 return PXA3XX_NAND_VARIANT_PXA;
364         return (enum pxa3xx_nand_variant)of_id->data;
365 }
366
367 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
368                                    const struct pxa3xx_nand_timing *t)
369 {
370         struct pxa3xx_nand_info *info = host->info_data;
371         unsigned long nand_clk = clk_get_rate(info->clk);
372         uint32_t ndtr0, ndtr1;
373
374         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
375                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
376                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
377                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
378                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
379                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
380
381         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
382                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
383                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
384
385         info->ndtr0cs0 = ndtr0;
386         info->ndtr1cs0 = ndtr1;
387         nand_writel(info, NDTR0CS0, ndtr0);
388         nand_writel(info, NDTR1CS0, ndtr1);
389 }
390
391 /*
392  * Set the data and OOB size, depending on the selected
393  * spare and ECC configuration.
394  * Only applicable to READ0, READOOB and PAGEPROG commands.
395  */
396 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
397                                 struct mtd_info *mtd)
398 {
399         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
400
401         info->data_size = mtd->writesize;
402         if (!oob_enable)
403                 return;
404
405         info->oob_size = info->spare_size;
406         if (!info->use_ecc)
407                 info->oob_size += info->ecc_size;
408 }
409
410 /**
411  * NOTE: it is a must to set ND_RUN firstly, then write
412  * command buffer, otherwise, it does not work.
413  * We enable all the interrupt at the same time, and
414  * let pxa3xx_nand_irq to handle all logic.
415  */
416 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
417 {
418         uint32_t ndcr;
419
420         ndcr = info->reg_ndcr;
421
422         if (info->use_ecc) {
423                 ndcr |= NDCR_ECC_EN;
424                 if (info->ecc_bch)
425                         nand_writel(info, NDECCCTRL, 0x1);
426         } else {
427                 ndcr &= ~NDCR_ECC_EN;
428                 if (info->ecc_bch)
429                         nand_writel(info, NDECCCTRL, 0x0);
430         }
431
432         if (info->use_dma)
433                 ndcr |= NDCR_DMA_EN;
434         else
435                 ndcr &= ~NDCR_DMA_EN;
436
437         if (info->use_spare)
438                 ndcr |= NDCR_SPARE_EN;
439         else
440                 ndcr &= ~NDCR_SPARE_EN;
441
442         ndcr |= NDCR_ND_RUN;
443
444         /* clear status bits and run */
445         nand_writel(info, NDSR, NDSR_MASK);
446         nand_writel(info, NDCR, 0);
447         nand_writel(info, NDCR, ndcr);
448 }
449
450 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
451 {
452         uint32_t ndcr;
453         int timeout = NAND_STOP_DELAY;
454
455         /* wait RUN bit in NDCR become 0 */
456         ndcr = nand_readl(info, NDCR);
457         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
458                 ndcr = nand_readl(info, NDCR);
459                 udelay(1);
460         }
461
462         if (timeout <= 0) {
463                 ndcr &= ~NDCR_ND_RUN;
464                 nand_writel(info, NDCR, ndcr);
465         }
466         /* clear status bits */
467         nand_writel(info, NDSR, NDSR_MASK);
468 }
469
470 static void __maybe_unused
471 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
472 {
473         uint32_t ndcr;
474
475         ndcr = nand_readl(info, NDCR);
476         nand_writel(info, NDCR, ndcr & ~int_mask);
477 }
478
479 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
480 {
481         uint32_t ndcr;
482
483         ndcr = nand_readl(info, NDCR);
484         nand_writel(info, NDCR, ndcr | int_mask);
485 }
486
487 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
488 {
489         if (info->ecc_bch) {
490                 u32 val;
491                 int ret;
492
493                 /*
494                  * According to the datasheet, when reading from NDDB
495                  * with BCH enabled, after each 32 bytes reads, we
496                  * have to make sure that the NDSR.RDDREQ bit is set.
497                  *
498                  * Drain the FIFO 8 32 bits reads at a time, and skip
499                  * the polling on the last read.
500                  */
501                 while (len > 8) {
502                         readsl(info->mmio_base + NDDB, data, 8);
503
504                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
505                                                          val & NDSR_RDDREQ, 1000, 5000);
506                         if (ret) {
507                                 dev_err(&info->pdev->dev,
508                                         "Timeout on RDDREQ while draining the FIFO\n");
509                                 return;
510                         }
511
512                         data += 32;
513                         len -= 8;
514                 }
515         }
516
517         readsl(info->mmio_base + NDDB, data, len);
518 }
519
520 static void handle_data_pio(struct pxa3xx_nand_info *info)
521 {
522         unsigned int do_bytes = min(info->data_size, info->chunk_size);
523
524         switch (info->state) {
525         case STATE_PIO_WRITING:
526                 writesl(info->mmio_base + NDDB,
527                         info->data_buff + info->data_buff_pos,
528                         DIV_ROUND_UP(do_bytes, 4));
529
530                 if (info->oob_size > 0)
531                         writesl(info->mmio_base + NDDB,
532                                 info->oob_buff + info->oob_buff_pos,
533                                 DIV_ROUND_UP(info->oob_size, 4));
534                 break;
535         case STATE_PIO_READING:
536                 drain_fifo(info,
537                            info->data_buff + info->data_buff_pos,
538                            DIV_ROUND_UP(do_bytes, 4));
539
540                 if (info->oob_size > 0)
541                         drain_fifo(info,
542                                    info->oob_buff + info->oob_buff_pos,
543                                    DIV_ROUND_UP(info->oob_size, 4));
544                 break;
545         default:
546                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
547                                 info->state);
548                 BUG();
549         }
550
551         /* Update buffer pointers for multi-page read/write */
552         info->data_buff_pos += do_bytes;
553         info->oob_buff_pos += info->oob_size;
554         info->data_size -= do_bytes;
555 }
556
557 #ifdef ARCH_HAS_DMA
558 static void start_data_dma(struct pxa3xx_nand_info *info)
559 {
560         struct pxa_dma_desc *desc = info->data_desc;
561         int dma_len = ALIGN(info->data_size + info->oob_size, 32);
562
563         desc->ddadr = DDADR_STOP;
564         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
565
566         switch (info->state) {
567         case STATE_DMA_WRITING:
568                 desc->dsadr = info->data_buff_phys;
569                 desc->dtadr = info->mmio_phys + NDDB;
570                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
571                 break;
572         case STATE_DMA_READING:
573                 desc->dtadr = info->data_buff_phys;
574                 desc->dsadr = info->mmio_phys + NDDB;
575                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
576                 break;
577         default:
578                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
579                                 info->state);
580                 BUG();
581         }
582
583         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
584         DDADR(info->data_dma_ch) = info->data_desc_addr;
585         DCSR(info->data_dma_ch) |= DCSR_RUN;
586 }
587
588 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
589 {
590         struct pxa3xx_nand_info *info = data;
591         uint32_t dcsr;
592
593         dcsr = DCSR(channel);
594         DCSR(channel) = dcsr;
595
596         if (dcsr & DCSR_BUSERR) {
597                 info->retcode = ERR_DMABUSERR;
598         }
599
600         info->state = STATE_DMA_DONE;
601         enable_int(info, NDCR_INT_MASK);
602         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
603 }
604 #else
605 static void start_data_dma(struct pxa3xx_nand_info *info)
606 {}
607 #endif
608
609 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
610 {
611         struct pxa3xx_nand_info *info = data;
612
613         handle_data_pio(info);
614
615         info->state = STATE_CMD_DONE;
616         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
617
618         return IRQ_HANDLED;
619 }
620
621 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
622 {
623         struct pxa3xx_nand_info *info = devid;
624         unsigned int status, is_completed = 0, is_ready = 0;
625         unsigned int ready, cmd_done;
626         irqreturn_t ret = IRQ_HANDLED;
627
628         if (info->cs == 0) {
629                 ready           = NDSR_FLASH_RDY;
630                 cmd_done        = NDSR_CS0_CMDD;
631         } else {
632                 ready           = NDSR_RDY;
633                 cmd_done        = NDSR_CS1_CMDD;
634         }
635
636         status = nand_readl(info, NDSR);
637
638         if (status & NDSR_UNCORERR)
639                 info->retcode = ERR_UNCORERR;
640         if (status & NDSR_CORERR) {
641                 info->retcode = ERR_CORERR;
642                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
643                     info->ecc_bch)
644                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
645                 else
646                         info->ecc_err_cnt = 1;
647
648                 /*
649                  * Each chunk composing a page is corrected independently,
650                  * and we need to store maximum number of corrected bitflips
651                  * to return it to the MTD layer in ecc.read_page().
652                  */
653                 info->max_bitflips = max_t(unsigned int,
654                                            info->max_bitflips,
655                                            info->ecc_err_cnt);
656         }
657         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
658                 /* whether use dma to transfer data */
659                 if (info->use_dma) {
660                         disable_int(info, NDCR_INT_MASK);
661                         info->state = (status & NDSR_RDDREQ) ?
662                                       STATE_DMA_READING : STATE_DMA_WRITING;
663                         start_data_dma(info);
664                         goto NORMAL_IRQ_EXIT;
665                 } else {
666                         info->state = (status & NDSR_RDDREQ) ?
667                                       STATE_PIO_READING : STATE_PIO_WRITING;
668                         ret = IRQ_WAKE_THREAD;
669                         goto NORMAL_IRQ_EXIT;
670                 }
671         }
672         if (status & cmd_done) {
673                 info->state = STATE_CMD_DONE;
674                 is_completed = 1;
675         }
676         if (status & ready) {
677                 info->state = STATE_READY;
678                 is_ready = 1;
679         }
680
681         /*
682          * Clear all status bit before issuing the next command, which
683          * can and will alter the status bits and will deserve a new
684          * interrupt on its own. This lets the controller exit the IRQ
685          */
686         nand_writel(info, NDSR, status);
687
688         if (status & NDSR_WRCMDREQ) {
689                 status &= ~NDSR_WRCMDREQ;
690                 info->state = STATE_CMD_HANDLE;
691
692                 /*
693                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
694                  * must be loaded by writing directly either 12 or 16
695                  * bytes directly to NDCB0, four bytes at a time.
696                  *
697                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
698                  * but each NDCBx register can be read.
699                  */
700                 nand_writel(info, NDCB0, info->ndcb0);
701                 nand_writel(info, NDCB0, info->ndcb1);
702                 nand_writel(info, NDCB0, info->ndcb2);
703
704                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
705                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
706                         nand_writel(info, NDCB0, info->ndcb3);
707         }
708
709         if (is_completed)
710                 complete(&info->cmd_complete);
711         if (is_ready)
712                 complete(&info->dev_ready);
713 NORMAL_IRQ_EXIT:
714         return ret;
715 }
716
717 static inline int is_buf_blank(uint8_t *buf, size_t len)
718 {
719         for (; len > 0; len--)
720                 if (*buf++ != 0xff)
721                         return 0;
722         return 1;
723 }
724
725 static void set_command_address(struct pxa3xx_nand_info *info,
726                 unsigned int page_size, uint16_t column, int page_addr)
727 {
728         /* small page addr setting */
729         if (page_size < PAGE_CHUNK_SIZE) {
730                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
731                                 | (column & 0xFF);
732
733                 info->ndcb2 = 0;
734         } else {
735                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
736                                 | (column & 0xFFFF);
737
738                 if (page_addr & 0xFF0000)
739                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
740                 else
741                         info->ndcb2 = 0;
742         }
743 }
744
745 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
746 {
747         struct pxa3xx_nand_host *host = info->host[info->cs];
748         struct mtd_info *mtd = host->mtd;
749
750         /* reset data and oob column point to handle data */
751         info->buf_start         = 0;
752         info->buf_count         = 0;
753         info->oob_size          = 0;
754         info->data_buff_pos     = 0;
755         info->oob_buff_pos      = 0;
756         info->use_ecc           = 0;
757         info->use_spare         = 1;
758         info->retcode           = ERR_NONE;
759         info->ecc_err_cnt       = 0;
760         info->ndcb3             = 0;
761         info->need_wait         = 0;
762
763         switch (command) {
764         case NAND_CMD_READ0:
765         case NAND_CMD_PAGEPROG:
766                 info->use_ecc = 1;
767         case NAND_CMD_READOOB:
768                 pxa3xx_set_datasize(info, mtd);
769                 break;
770         case NAND_CMD_PARAM:
771                 info->use_spare = 0;
772                 break;
773         default:
774                 info->ndcb1 = 0;
775                 info->ndcb2 = 0;
776                 break;
777         }
778
779         /*
780          * If we are about to issue a read command, or about to set
781          * the write address, then clean the data buffer.
782          */
783         if (command == NAND_CMD_READ0 ||
784             command == NAND_CMD_READOOB ||
785             command == NAND_CMD_SEQIN) {
786
787                 info->buf_count = mtd->writesize + mtd->oobsize;
788                 memset(info->data_buff, 0xFF, info->buf_count);
789         }
790
791 }
792
793 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
794                 int ext_cmd_type, uint16_t column, int page_addr)
795 {
796         int addr_cycle, exec_cmd;
797         struct pxa3xx_nand_host *host;
798         struct mtd_info *mtd;
799
800         host = info->host[info->cs];
801         mtd = host->mtd;
802         addr_cycle = 0;
803         exec_cmd = 1;
804
805         if (info->cs != 0)
806                 info->ndcb0 = NDCB0_CSEL;
807         else
808                 info->ndcb0 = 0;
809
810         if (command == NAND_CMD_SEQIN)
811                 exec_cmd = 0;
812
813         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
814                                     + host->col_addr_cycles);
815
816         switch (command) {
817         case NAND_CMD_READOOB:
818         case NAND_CMD_READ0:
819                 info->buf_start = column;
820                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
821                                 | addr_cycle
822                                 | NAND_CMD_READ0;
823
824                 if (command == NAND_CMD_READOOB)
825                         info->buf_start += mtd->writesize;
826
827                 /*
828                  * Multiple page read needs an 'extended command type' field,
829                  * which is either naked-read or last-read according to the
830                  * state.
831                  */
832                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
833                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
834                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
835                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
836                                         | NDCB0_LEN_OVRD
837                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
838                         info->ndcb3 = info->chunk_size +
839                                       info->oob_size;
840                 }
841
842                 set_command_address(info, mtd->writesize, column, page_addr);
843                 break;
844
845         case NAND_CMD_SEQIN:
846
847                 info->buf_start = column;
848                 set_command_address(info, mtd->writesize, 0, page_addr);
849
850                 /*
851                  * Multiple page programming needs to execute the initial
852                  * SEQIN command that sets the page address.
853                  */
854                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
855                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
856                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
857                                 | addr_cycle
858                                 | command;
859                         /* No data transfer in this case */
860                         info->data_size = 0;
861                         exec_cmd = 1;
862                 }
863                 break;
864
865         case NAND_CMD_PAGEPROG:
866                 if (is_buf_blank(info->data_buff,
867                                         (mtd->writesize + mtd->oobsize))) {
868                         exec_cmd = 0;
869                         break;
870                 }
871
872                 /* Second command setting for large pages */
873                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
874                         /*
875                          * Multiple page write uses the 'extended command'
876                          * field. This can be used to issue a command dispatch
877                          * or a naked-write depending on the current stage.
878                          */
879                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
880                                         | NDCB0_LEN_OVRD
881                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
882                         info->ndcb3 = info->chunk_size +
883                                       info->oob_size;
884
885                         /*
886                          * This is the command dispatch that completes a chunked
887                          * page program operation.
888                          */
889                         if (info->data_size == 0) {
890                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
891                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
892                                         | command;
893                                 info->ndcb1 = 0;
894                                 info->ndcb2 = 0;
895                                 info->ndcb3 = 0;
896                         }
897                 } else {
898                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
899                                         | NDCB0_AUTO_RS
900                                         | NDCB0_ST_ROW_EN
901                                         | NDCB0_DBC
902                                         | (NAND_CMD_PAGEPROG << 8)
903                                         | NAND_CMD_SEQIN
904                                         | addr_cycle;
905                 }
906                 break;
907
908         case NAND_CMD_PARAM:
909                 info->buf_count = INIT_BUFFER_SIZE;
910                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
911                                 | NDCB0_ADDR_CYC(1)
912                                 | NDCB0_LEN_OVRD
913                                 | command;
914                 info->ndcb1 = (column & 0xFF);
915                 info->ndcb3 = INIT_BUFFER_SIZE;
916                 info->data_size = INIT_BUFFER_SIZE;
917                 break;
918
919         case NAND_CMD_READID:
920                 info->buf_count = host->read_id_bytes;
921                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
922                                 | NDCB0_ADDR_CYC(1)
923                                 | command;
924                 info->ndcb1 = (column & 0xFF);
925
926                 info->data_size = 8;
927                 break;
928         case NAND_CMD_STATUS:
929                 info->buf_count = 1;
930                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
931                                 | NDCB0_ADDR_CYC(1)
932                                 | command;
933
934                 info->data_size = 8;
935                 break;
936
937         case NAND_CMD_ERASE1:
938                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
939                                 | NDCB0_AUTO_RS
940                                 | NDCB0_ADDR_CYC(3)
941                                 | NDCB0_DBC
942                                 | (NAND_CMD_ERASE2 << 8)
943                                 | NAND_CMD_ERASE1;
944                 info->ndcb1 = page_addr;
945                 info->ndcb2 = 0;
946
947                 break;
948         case NAND_CMD_RESET:
949                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
950                                 | command;
951
952                 break;
953
954         case NAND_CMD_ERASE2:
955                 exec_cmd = 0;
956                 break;
957
958         default:
959                 exec_cmd = 0;
960                 dev_err(&info->pdev->dev, "non-supported command %x\n",
961                                 command);
962                 break;
963         }
964
965         return exec_cmd;
966 }
967
968 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
969                          int column, int page_addr)
970 {
971         struct pxa3xx_nand_host *host = mtd->priv;
972         struct pxa3xx_nand_info *info = host->info_data;
973         int exec_cmd;
974
975         /*
976          * if this is a x16 device ,then convert the input
977          * "byte" address into a "word" address appropriate
978          * for indexing a word-oriented device
979          */
980         if (info->reg_ndcr & NDCR_DWIDTH_M)
981                 column /= 2;
982
983         /*
984          * There may be different NAND chip hooked to
985          * different chip select, so check whether
986          * chip select has been changed, if yes, reset the timing
987          */
988         if (info->cs != host->cs) {
989                 info->cs = host->cs;
990                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
991                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
992         }
993
994         prepare_start_command(info, command);
995
996         info->state = STATE_PREPARED;
997         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
998
999         if (exec_cmd) {
1000                 init_completion(&info->cmd_complete);
1001                 init_completion(&info->dev_ready);
1002                 info->need_wait = 1;
1003                 pxa3xx_nand_start(info);
1004
1005                 if (!wait_for_completion_timeout(&info->cmd_complete,
1006                     CHIP_DELAY_TIMEOUT)) {
1007                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1008                         /* Stop State Machine for next command cycle */
1009                         pxa3xx_nand_stop(info);
1010                 }
1011         }
1012         info->state = STATE_IDLE;
1013 }
1014
1015 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1016                                   const unsigned command,
1017                                   int column, int page_addr)
1018 {
1019         struct pxa3xx_nand_host *host = mtd->priv;
1020         struct pxa3xx_nand_info *info = host->info_data;
1021         int exec_cmd, ext_cmd_type;
1022
1023         /*
1024          * if this is a x16 device then convert the input
1025          * "byte" address into a "word" address appropriate
1026          * for indexing a word-oriented device
1027          */
1028         if (info->reg_ndcr & NDCR_DWIDTH_M)
1029                 column /= 2;
1030
1031         /*
1032          * There may be different NAND chip hooked to
1033          * different chip select, so check whether
1034          * chip select has been changed, if yes, reset the timing
1035          */
1036         if (info->cs != host->cs) {
1037                 info->cs = host->cs;
1038                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1039                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1040         }
1041
1042         /* Select the extended command for the first command */
1043         switch (command) {
1044         case NAND_CMD_READ0:
1045         case NAND_CMD_READOOB:
1046                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1047                 break;
1048         case NAND_CMD_SEQIN:
1049                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1050                 break;
1051         case NAND_CMD_PAGEPROG:
1052                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1053                 break;
1054         default:
1055                 ext_cmd_type = 0;
1056                 break;
1057         }
1058
1059         prepare_start_command(info, command);
1060
1061         /*
1062          * Prepare the "is ready" completion before starting a command
1063          * transaction sequence. If the command is not executed the
1064          * completion will be completed, see below.
1065          *
1066          * We can do that inside the loop because the command variable
1067          * is invariant and thus so is the exec_cmd.
1068          */
1069         info->need_wait = 1;
1070         init_completion(&info->dev_ready);
1071         do {
1072                 info->state = STATE_PREPARED;
1073                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1074                                                column, page_addr);
1075                 if (!exec_cmd) {
1076                         info->need_wait = 0;
1077                         complete(&info->dev_ready);
1078                         break;
1079                 }
1080
1081                 init_completion(&info->cmd_complete);
1082                 pxa3xx_nand_start(info);
1083
1084                 if (!wait_for_completion_timeout(&info->cmd_complete,
1085                     CHIP_DELAY_TIMEOUT)) {
1086                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1087                         /* Stop State Machine for next command cycle */
1088                         pxa3xx_nand_stop(info);
1089                         break;
1090                 }
1091
1092                 /* Check if the sequence is complete */
1093                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1094                         break;
1095
1096                 /*
1097                  * After a splitted program command sequence has issued
1098                  * the command dispatch, the command sequence is complete.
1099                  */
1100                 if (info->data_size == 0 &&
1101                     command == NAND_CMD_PAGEPROG &&
1102                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1103                         break;
1104
1105                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1106                         /* Last read: issue a 'last naked read' */
1107                         if (info->data_size == info->chunk_size)
1108                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1109                         else
1110                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1111
1112                 /*
1113                  * If a splitted program command has no more data to transfer,
1114                  * the command dispatch must be issued to complete.
1115                  */
1116                 } else if (command == NAND_CMD_PAGEPROG &&
1117                            info->data_size == 0) {
1118                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1119                 }
1120         } while (1);
1121
1122         info->state = STATE_IDLE;
1123 }
1124
1125 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1126                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1127 {
1128         chip->write_buf(mtd, buf, mtd->writesize);
1129         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1130
1131         return 0;
1132 }
1133
1134 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1135                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1136                 int page)
1137 {
1138         struct pxa3xx_nand_host *host = mtd->priv;
1139         struct pxa3xx_nand_info *info = host->info_data;
1140
1141         chip->read_buf(mtd, buf, mtd->writesize);
1142         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1143
1144         if (info->retcode == ERR_CORERR && info->use_ecc) {
1145                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1146
1147         } else if (info->retcode == ERR_UNCORERR) {
1148                 /*
1149                  * for blank page (all 0xff), HW will calculate its ECC as
1150                  * 0, which is different from the ECC information within
1151                  * OOB, ignore such uncorrectable errors
1152                  */
1153                 if (is_buf_blank(buf, mtd->writesize))
1154                         info->retcode = ERR_NONE;
1155                 else
1156                         mtd->ecc_stats.failed++;
1157         }
1158
1159         return info->max_bitflips;
1160 }
1161
1162 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1163 {
1164         struct pxa3xx_nand_host *host = mtd->priv;
1165         struct pxa3xx_nand_info *info = host->info_data;
1166         char retval = 0xFF;
1167
1168         if (info->buf_start < info->buf_count)
1169                 /* Has just send a new command? */
1170                 retval = info->data_buff[info->buf_start++];
1171
1172         return retval;
1173 }
1174
1175 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1176 {
1177         struct pxa3xx_nand_host *host = mtd->priv;
1178         struct pxa3xx_nand_info *info = host->info_data;
1179         u16 retval = 0xFFFF;
1180
1181         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1182                 retval = *((u16 *)(info->data_buff+info->buf_start));
1183                 info->buf_start += 2;
1184         }
1185         return retval;
1186 }
1187
1188 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1189 {
1190         struct pxa3xx_nand_host *host = mtd->priv;
1191         struct pxa3xx_nand_info *info = host->info_data;
1192         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1193
1194         memcpy(buf, info->data_buff + info->buf_start, real_len);
1195         info->buf_start += real_len;
1196 }
1197
1198 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1199                 const uint8_t *buf, int len)
1200 {
1201         struct pxa3xx_nand_host *host = mtd->priv;
1202         struct pxa3xx_nand_info *info = host->info_data;
1203         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1204
1205         memcpy(info->data_buff + info->buf_start, buf, real_len);
1206         info->buf_start += real_len;
1207 }
1208
1209 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1210 {
1211         return;
1212 }
1213
1214 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1215 {
1216         struct pxa3xx_nand_host *host = mtd->priv;
1217         struct pxa3xx_nand_info *info = host->info_data;
1218
1219         if (info->need_wait) {
1220                 info->need_wait = 0;
1221                 if (!wait_for_completion_timeout(&info->dev_ready,
1222                     CHIP_DELAY_TIMEOUT)) {
1223                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1224                         return NAND_STATUS_FAIL;
1225                 }
1226         }
1227
1228         /* pxa3xx_nand_send_command has waited for command complete */
1229         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1230                 if (info->retcode == ERR_NONE)
1231                         return 0;
1232                 else
1233                         return NAND_STATUS_FAIL;
1234         }
1235
1236         return NAND_STATUS_READY;
1237 }
1238
1239 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1240                                     const struct pxa3xx_nand_flash *f)
1241 {
1242         struct platform_device *pdev = info->pdev;
1243         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1244         struct pxa3xx_nand_host *host = info->host[info->cs];
1245         uint32_t ndcr = 0x0; /* enable all interrupts */
1246
1247         if (f->page_size != 2048 && f->page_size != 512) {
1248                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1249                 return -EINVAL;
1250         }
1251
1252         if (f->flash_width != 16 && f->flash_width != 8) {
1253                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1254                 return -EINVAL;
1255         }
1256
1257         /* calculate flash information */
1258         host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1259
1260         /* calculate addressing information */
1261         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1262
1263         if (f->num_blocks * f->page_per_block > 65536)
1264                 host->row_addr_cycles = 3;
1265         else
1266                 host->row_addr_cycles = 2;
1267
1268         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1269         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1270         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1271         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1272         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1273         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1274
1275         ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1276         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1277
1278         info->reg_ndcr = ndcr;
1279
1280         pxa3xx_nand_set_timing(host, f->timing);
1281         return 0;
1282 }
1283
1284 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1285 {
1286         /*
1287          * We set 0 by hard coding here, for we don't support keep_config
1288          * when there is more than one chip attached to the controller
1289          */
1290         struct pxa3xx_nand_host *host = info->host[0];
1291         uint32_t ndcr = nand_readl(info, NDCR);
1292
1293         if (ndcr & NDCR_PAGE_SZ) {
1294                 /* Controller's FIFO size */
1295                 info->chunk_size = 2048;
1296                 host->read_id_bytes = 4;
1297         } else {
1298                 info->chunk_size = 512;
1299                 host->read_id_bytes = 2;
1300         }
1301
1302         /* Set an initial chunk size */
1303         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1304         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1305         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1306         return 0;
1307 }
1308
1309 #ifdef ARCH_HAS_DMA
1310 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1311 {
1312         struct platform_device *pdev = info->pdev;
1313         int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1314
1315         if (use_dma == 0) {
1316                 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1317                 if (info->data_buff == NULL)
1318                         return -ENOMEM;
1319                 return 0;
1320         }
1321
1322         info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1323                                 &info->data_buff_phys, GFP_KERNEL);
1324         if (info->data_buff == NULL) {
1325                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1326                 return -ENOMEM;
1327         }
1328
1329         info->data_desc = (void *)info->data_buff + data_desc_offset;
1330         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1331
1332         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1333                                 pxa3xx_nand_data_dma_irq, info);
1334         if (info->data_dma_ch < 0) {
1335                 dev_err(&pdev->dev, "failed to request data dma\n");
1336                 dma_free_coherent(&pdev->dev, info->buf_size,
1337                                 info->data_buff, info->data_buff_phys);
1338                 return info->data_dma_ch;
1339         }
1340
1341         /*
1342          * Now that DMA buffers are allocated we turn on
1343          * DMA proper for I/O operations.
1344          */
1345         info->use_dma = 1;
1346         return 0;
1347 }
1348
1349 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1350 {
1351         struct platform_device *pdev = info->pdev;
1352         if (info->use_dma) {
1353                 pxa_free_dma(info->data_dma_ch);
1354                 dma_free_coherent(&pdev->dev, info->buf_size,
1355                                   info->data_buff, info->data_buff_phys);
1356         } else {
1357                 kfree(info->data_buff);
1358         }
1359 }
1360 #else
1361 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1362 {
1363         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1364         if (info->data_buff == NULL)
1365                 return -ENOMEM;
1366         return 0;
1367 }
1368
1369 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1370 {
1371         kfree(info->data_buff);
1372 }
1373 #endif
1374
1375 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1376 {
1377         struct mtd_info *mtd;
1378         struct nand_chip *chip;
1379         int ret;
1380
1381         mtd = info->host[info->cs]->mtd;
1382         chip = mtd->priv;
1383
1384         /* use the common timing to make a try */
1385         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1386         if (ret)
1387                 return ret;
1388
1389         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1390         ret = chip->waitfunc(mtd, chip);
1391         if (ret & NAND_STATUS_FAIL)
1392                 return -ENODEV;
1393
1394         return 0;
1395 }
1396
1397 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1398                         struct nand_ecc_ctrl *ecc,
1399                         int strength, int ecc_stepsize, int page_size)
1400 {
1401         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1402                 info->chunk_size = 2048;
1403                 info->spare_size = 40;
1404                 info->ecc_size = 24;
1405                 ecc->mode = NAND_ECC_HW;
1406                 ecc->size = 512;
1407                 ecc->strength = 1;
1408
1409         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1410                 info->chunk_size = 512;
1411                 info->spare_size = 8;
1412                 info->ecc_size = 8;
1413                 ecc->mode = NAND_ECC_HW;
1414                 ecc->size = 512;
1415                 ecc->strength = 1;
1416
1417         /*
1418          * Required ECC: 4-bit correction per 512 bytes
1419          * Select: 16-bit correction per 2048 bytes
1420          */
1421         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1422                 info->ecc_bch = 1;
1423                 info->chunk_size = 2048;
1424                 info->spare_size = 32;
1425                 info->ecc_size = 32;
1426                 ecc->mode = NAND_ECC_HW;
1427                 ecc->size = info->chunk_size;
1428                 ecc->layout = &ecc_layout_2KB_bch4bit;
1429                 ecc->strength = 16;
1430
1431         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1432                 info->ecc_bch = 1;
1433                 info->chunk_size = 2048;
1434                 info->spare_size = 32;
1435                 info->ecc_size = 32;
1436                 ecc->mode = NAND_ECC_HW;
1437                 ecc->size = info->chunk_size;
1438                 ecc->layout = &ecc_layout_4KB_bch4bit;
1439                 ecc->strength = 16;
1440
1441         /*
1442          * Required ECC: 8-bit correction per 512 bytes
1443          * Select: 16-bit correction per 1024 bytes
1444          */
1445         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1446                 info->ecc_bch = 1;
1447                 info->chunk_size = 1024;
1448                 info->spare_size = 0;
1449                 info->ecc_size = 32;
1450                 ecc->mode = NAND_ECC_HW;
1451                 ecc->size = info->chunk_size;
1452                 ecc->layout = &ecc_layout_4KB_bch8bit;
1453                 ecc->strength = 16;
1454         } else {
1455                 dev_err(&info->pdev->dev,
1456                         "ECC strength %d at page size %d is not supported\n",
1457                         strength, page_size);
1458                 return -ENODEV;
1459         }
1460
1461         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1462                  ecc->strength, ecc->size);
1463         return 0;
1464 }
1465
1466 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1467 {
1468         struct pxa3xx_nand_host *host = mtd->priv;
1469         struct pxa3xx_nand_info *info = host->info_data;
1470         struct platform_device *pdev = info->pdev;
1471         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1472         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1473         const struct pxa3xx_nand_flash *f = NULL;
1474         struct nand_chip *chip = mtd->priv;
1475         uint32_t id = -1;
1476         uint64_t chipsize;
1477         int i, ret, num;
1478         uint16_t ecc_strength, ecc_step;
1479
1480         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1481                 goto KEEP_CONFIG;
1482
1483         /* Set a default chunk size */
1484         info->chunk_size = 512;
1485
1486         ret = pxa3xx_nand_sensing(info);
1487         if (ret) {
1488                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1489                          info->cs);
1490
1491                 return ret;
1492         }
1493
1494         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1495         id = *((uint16_t *)(info->data_buff));
1496         if (id != 0)
1497                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1498         else {
1499                 dev_warn(&info->pdev->dev,
1500                          "Read out ID 0, potential timing set wrong!!\n");
1501
1502                 return -EINVAL;
1503         }
1504
1505         num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1506         for (i = 0; i < num; i++) {
1507                 if (i < pdata->num_flash)
1508                         f = pdata->flash + i;
1509                 else
1510                         f = &builtin_flash_types[i - pdata->num_flash + 1];
1511
1512                 /* find the chip in default list */
1513                 if (f->chip_id == id)
1514                         break;
1515         }
1516
1517         if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1518                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1519
1520                 return -EINVAL;
1521         }
1522
1523         ret = pxa3xx_nand_config_flash(info, f);
1524         if (ret) {
1525                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1526                 return ret;
1527         }
1528
1529         memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1530
1531         pxa3xx_flash_ids[0].name = f->name;
1532         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1533         pxa3xx_flash_ids[0].pagesize = f->page_size;
1534         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1535         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1536         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1537         if (f->flash_width == 16)
1538                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1539         pxa3xx_flash_ids[1].name = NULL;
1540         def = pxa3xx_flash_ids;
1541 KEEP_CONFIG:
1542         if (info->reg_ndcr & NDCR_DWIDTH_M)
1543                 chip->options |= NAND_BUSWIDTH_16;
1544
1545         /* Device detection must be done with ECC disabled */
1546         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1547                 nand_writel(info, NDECCCTRL, 0x0);
1548
1549         if (nand_scan_ident(mtd, 1, def))
1550                 return -ENODEV;
1551
1552         if (pdata->flash_bbt) {
1553                 /*
1554                  * We'll use a bad block table stored in-flash and don't
1555                  * allow writing the bad block marker to the flash.
1556                  */
1557                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1558                                      NAND_BBT_NO_OOB_BBM;
1559                 chip->bbt_td = &bbt_main_descr;
1560                 chip->bbt_md = &bbt_mirror_descr;
1561         }
1562
1563         /*
1564          * If the page size is bigger than the FIFO size, let's check
1565          * we are given the right variant and then switch to the extended
1566          * (aka splitted) command handling,
1567          */
1568         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1569                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1570                         chip->cmdfunc = nand_cmdfunc_extended;
1571                 } else {
1572                         dev_err(&info->pdev->dev,
1573                                 "unsupported page size on this variant\n");
1574                         return -ENODEV;
1575                 }
1576         }
1577
1578         if (pdata->ecc_strength && pdata->ecc_step_size) {
1579                 ecc_strength = pdata->ecc_strength;
1580                 ecc_step = pdata->ecc_step_size;
1581         } else {
1582                 ecc_strength = chip->ecc_strength_ds;
1583                 ecc_step = chip->ecc_step_ds;
1584         }
1585
1586         /* Set default ECC strength requirements on non-ONFI devices */
1587         if (ecc_strength < 1 && ecc_step < 1) {
1588                 ecc_strength = 1;
1589                 ecc_step = 512;
1590         }
1591
1592         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1593                            ecc_step, mtd->writesize);
1594         if (ret)
1595                 return ret;
1596
1597         /* calculate addressing information */
1598         if (mtd->writesize >= 2048)
1599                 host->col_addr_cycles = 2;
1600         else
1601                 host->col_addr_cycles = 1;
1602
1603         /* release the initial buffer */
1604         kfree(info->data_buff);
1605
1606         /* allocate the real data + oob buffer */
1607         info->buf_size = mtd->writesize + mtd->oobsize;
1608         ret = pxa3xx_nand_init_buff(info);
1609         if (ret)
1610                 return ret;
1611         info->oob_buff = info->data_buff + mtd->writesize;
1612
1613         if ((mtd->size >> chip->page_shift) > 65536)
1614                 host->row_addr_cycles = 3;
1615         else
1616                 host->row_addr_cycles = 2;
1617         return nand_scan_tail(mtd);
1618 }
1619
1620 static int alloc_nand_resource(struct platform_device *pdev)
1621 {
1622         struct pxa3xx_nand_platform_data *pdata;
1623         struct pxa3xx_nand_info *info;
1624         struct pxa3xx_nand_host *host;
1625         struct nand_chip *chip = NULL;
1626         struct mtd_info *mtd;
1627         struct resource *r;
1628         int ret, irq, cs;
1629
1630         pdata = dev_get_platdata(&pdev->dev);
1631         if (pdata->num_cs <= 0)
1632                 return -ENODEV;
1633         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1634                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1635         if (!info)
1636                 return -ENOMEM;
1637
1638         info->pdev = pdev;
1639         info->variant = pxa3xx_nand_get_variant(pdev);
1640         for (cs = 0; cs < pdata->num_cs; cs++) {
1641                 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1642                 chip = (struct nand_chip *)(&mtd[1]);
1643                 host = (struct pxa3xx_nand_host *)chip;
1644                 info->host[cs] = host;
1645                 host->mtd = mtd;
1646                 host->cs = cs;
1647                 host->info_data = info;
1648                 mtd->priv = host;
1649                 mtd->owner = THIS_MODULE;
1650
1651                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1652                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1653                 chip->controller        = &info->controller;
1654                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1655                 chip->select_chip       = pxa3xx_nand_select_chip;
1656                 chip->read_word         = pxa3xx_nand_read_word;
1657                 chip->read_byte         = pxa3xx_nand_read_byte;
1658                 chip->read_buf          = pxa3xx_nand_read_buf;
1659                 chip->write_buf         = pxa3xx_nand_write_buf;
1660                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1661                 chip->cmdfunc           = nand_cmdfunc;
1662         }
1663
1664         spin_lock_init(&chip->controller->lock);
1665         init_waitqueue_head(&chip->controller->wq);
1666         info->clk = devm_clk_get(&pdev->dev, NULL);
1667         if (IS_ERR(info->clk)) {
1668                 dev_err(&pdev->dev, "failed to get nand clock\n");
1669                 return PTR_ERR(info->clk);
1670         }
1671         ret = clk_prepare_enable(info->clk);
1672         if (ret < 0)
1673                 return ret;
1674
1675         if (use_dma) {
1676                 /*
1677                  * This is a dirty hack to make this driver work from
1678                  * devicetree bindings. It can be removed once we have
1679                  * a prober DMA controller framework for DT.
1680                  */
1681                 if (pdev->dev.of_node &&
1682                     of_machine_is_compatible("marvell,pxa3xx")) {
1683                         info->drcmr_dat = 97;
1684                         info->drcmr_cmd = 99;
1685                 } else {
1686                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1687                         if (r == NULL) {
1688                                 dev_err(&pdev->dev,
1689                                         "no resource defined for data DMA\n");
1690                                 ret = -ENXIO;
1691                                 goto fail_disable_clk;
1692                         }
1693                         info->drcmr_dat = r->start;
1694
1695                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1696                         if (r == NULL) {
1697                                 dev_err(&pdev->dev,
1698                                         "no resource defined for cmd DMA\n");
1699                                 ret = -ENXIO;
1700                                 goto fail_disable_clk;
1701                         }
1702                         info->drcmr_cmd = r->start;
1703                 }
1704         }
1705
1706         irq = platform_get_irq(pdev, 0);
1707         if (irq < 0) {
1708                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1709                 ret = -ENXIO;
1710                 goto fail_disable_clk;
1711         }
1712
1713         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1714         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1715         if (IS_ERR(info->mmio_base)) {
1716                 ret = PTR_ERR(info->mmio_base);
1717                 goto fail_disable_clk;
1718         }
1719         info->mmio_phys = r->start;
1720
1721         /* Allocate a buffer to allow flash detection */
1722         info->buf_size = INIT_BUFFER_SIZE;
1723         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1724         if (info->data_buff == NULL) {
1725                 ret = -ENOMEM;
1726                 goto fail_disable_clk;
1727         }
1728
1729         /* initialize all interrupts to be disabled */
1730         disable_int(info, NDSR_MASK);
1731
1732         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1733                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1734                                    pdev->name, info);
1735         if (ret < 0) {
1736                 dev_err(&pdev->dev, "failed to request IRQ\n");
1737                 goto fail_free_buf;
1738         }
1739
1740         platform_set_drvdata(pdev, info);
1741
1742         return 0;
1743
1744 fail_free_buf:
1745         free_irq(irq, info);
1746         kfree(info->data_buff);
1747 fail_disable_clk:
1748         clk_disable_unprepare(info->clk);
1749         return ret;
1750 }
1751
1752 static int pxa3xx_nand_remove(struct platform_device *pdev)
1753 {
1754         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1755         struct pxa3xx_nand_platform_data *pdata;
1756         int irq, cs;
1757
1758         if (!info)
1759                 return 0;
1760
1761         pdata = dev_get_platdata(&pdev->dev);
1762
1763         irq = platform_get_irq(pdev, 0);
1764         if (irq >= 0)
1765                 free_irq(irq, info);
1766         pxa3xx_nand_free_buff(info);
1767
1768         clk_disable_unprepare(info->clk);
1769
1770         for (cs = 0; cs < pdata->num_cs; cs++)
1771                 nand_release(info->host[cs]->mtd);
1772         return 0;
1773 }
1774
1775 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1776 {
1777         struct pxa3xx_nand_platform_data *pdata;
1778         struct device_node *np = pdev->dev.of_node;
1779         const struct of_device_id *of_id =
1780                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1781
1782         if (!of_id)
1783                 return 0;
1784
1785         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1786         if (!pdata)
1787                 return -ENOMEM;
1788
1789         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1790                 pdata->enable_arbiter = 1;
1791         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1792                 pdata->keep_config = 1;
1793         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1794         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1795
1796         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1797         if (pdata->ecc_strength < 0)
1798                 pdata->ecc_strength = 0;
1799
1800         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1801         if (pdata->ecc_step_size < 0)
1802                 pdata->ecc_step_size = 0;
1803
1804         pdev->dev.platform_data = pdata;
1805
1806         return 0;
1807 }
1808
1809 static int pxa3xx_nand_probe(struct platform_device *pdev)
1810 {
1811         struct pxa3xx_nand_platform_data *pdata;
1812         struct mtd_part_parser_data ppdata = {};
1813         struct pxa3xx_nand_info *info;
1814         int ret, cs, probe_success;
1815
1816 #ifndef ARCH_HAS_DMA
1817         if (use_dma) {
1818                 use_dma = 0;
1819                 dev_warn(&pdev->dev,
1820                          "This platform can't do DMA on this device\n");
1821         }
1822 #endif
1823         ret = pxa3xx_nand_probe_dt(pdev);
1824         if (ret)
1825                 return ret;
1826
1827         pdata = dev_get_platdata(&pdev->dev);
1828         if (!pdata) {
1829                 dev_err(&pdev->dev, "no platform data defined\n");
1830                 return -ENODEV;
1831         }
1832
1833         ret = alloc_nand_resource(pdev);
1834         if (ret) {
1835                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1836                 return ret;
1837         }
1838
1839         info = platform_get_drvdata(pdev);
1840         probe_success = 0;
1841         for (cs = 0; cs < pdata->num_cs; cs++) {
1842                 struct mtd_info *mtd = info->host[cs]->mtd;
1843
1844                 /*
1845                  * The mtd name matches the one used in 'mtdparts' kernel
1846                  * parameter. This name cannot be changed or otherwise
1847                  * user's mtd partitions configuration would get broken.
1848                  */
1849                 mtd->name = "pxa3xx_nand-0";
1850                 info->cs = cs;
1851                 ret = pxa3xx_nand_scan(mtd);
1852                 if (ret) {
1853                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1854                                 cs);
1855                         continue;
1856                 }
1857
1858                 ppdata.of_node = pdev->dev.of_node;
1859                 ret = mtd_device_parse_register(mtd, NULL,
1860                                                 &ppdata, pdata->parts[cs],
1861                                                 pdata->nr_parts[cs]);
1862                 if (!ret)
1863                         probe_success = 1;
1864         }
1865
1866         if (!probe_success) {
1867                 pxa3xx_nand_remove(pdev);
1868                 return -ENODEV;
1869         }
1870
1871         return 0;
1872 }
1873
1874 #ifdef CONFIG_PM
1875 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1876 {
1877         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1878         struct pxa3xx_nand_platform_data *pdata;
1879         struct mtd_info *mtd;
1880         int cs;
1881
1882         pdata = dev_get_platdata(&pdev->dev);
1883         if (info->state) {
1884                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1885                 return -EAGAIN;
1886         }
1887
1888         for (cs = 0; cs < pdata->num_cs; cs++) {
1889                 mtd = info->host[cs]->mtd;
1890                 mtd_suspend(mtd);
1891         }
1892
1893         return 0;
1894 }
1895
1896 static int pxa3xx_nand_resume(struct platform_device *pdev)
1897 {
1898         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1899         struct pxa3xx_nand_platform_data *pdata;
1900         struct mtd_info *mtd;
1901         int cs;
1902
1903         pdata = dev_get_platdata(&pdev->dev);
1904         /* We don't want to handle interrupt without calling mtd routine */
1905         disable_int(info, NDCR_INT_MASK);
1906
1907         /*
1908          * Directly set the chip select to a invalid value,
1909          * then the driver would reset the timing according
1910          * to current chip select at the beginning of cmdfunc
1911          */
1912         info->cs = 0xff;
1913
1914         /*
1915          * As the spec says, the NDSR would be updated to 0x1800 when
1916          * doing the nand_clk disable/enable.
1917          * To prevent it damaging state machine of the driver, clear
1918          * all status before resume
1919          */
1920         nand_writel(info, NDSR, NDSR_MASK);
1921         for (cs = 0; cs < pdata->num_cs; cs++) {
1922                 mtd = info->host[cs]->mtd;
1923                 mtd_resume(mtd);
1924         }
1925
1926         return 0;
1927 }
1928 #else
1929 #define pxa3xx_nand_suspend     NULL
1930 #define pxa3xx_nand_resume      NULL
1931 #endif
1932
1933 static struct platform_driver pxa3xx_nand_driver = {
1934         .driver = {
1935                 .name   = "pxa3xx-nand",
1936                 .of_match_table = pxa3xx_nand_dt_ids,
1937         },
1938         .probe          = pxa3xx_nand_probe,
1939         .remove         = pxa3xx_nand_remove,
1940         .suspend        = pxa3xx_nand_suspend,
1941         .resume         = pxa3xx_nand_resume,
1942 };
1943
1944 module_platform_driver(pxa3xx_nand_driver);
1945
1946 MODULE_LICENSE("GPL");
1947 MODULE_DESCRIPTION("PXA3xx NAND controller driver");