]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/nand/pxa3xx_nand.c
Merge tag 'v4.3-rc1' into MTD -next development
[karo-tx-linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
24 #include <linux/io.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
28 #include <linux/of.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
31
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
33 #define ARCH_HAS_DMA
34 #endif
35
36 #ifdef ARCH_HAS_DMA
37 #include <mach/dma.h>
38 #endif
39
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
41
42 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE         (2048)
45
46 /*
47  * Define a buffer size for the initial command that detects the flash device:
48  * STATUS, READID and PARAM.
49  * ONFI param page is 256 bytes, and there are three redundant copies
50  * to be read. JEDEC param page is 512 bytes, and there are also three
51  * redundant copies to be read.
52  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
53  */
54 #define INIT_BUFFER_SIZE        2048
55
56 /* registers and bit definitions */
57 #define NDCR            (0x00) /* Control register */
58 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
59 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
60 #define NDSR            (0x14) /* Status Register */
61 #define NDPCR           (0x18) /* Page Count Register */
62 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
63 #define NDBDR1          (0x20) /* Bad Block Register 1 */
64 #define NDECCCTRL       (0x28) /* ECC control */
65 #define NDDB            (0x40) /* Data Buffer */
66 #define NDCB0           (0x48) /* Command Buffer0 */
67 #define NDCB1           (0x4C) /* Command Buffer1 */
68 #define NDCB2           (0x50) /* Command Buffer2 */
69
70 #define NDCR_SPARE_EN           (0x1 << 31)
71 #define NDCR_ECC_EN             (0x1 << 30)
72 #define NDCR_DMA_EN             (0x1 << 29)
73 #define NDCR_ND_RUN             (0x1 << 28)
74 #define NDCR_DWIDTH_C           (0x1 << 27)
75 #define NDCR_DWIDTH_M           (0x1 << 26)
76 #define NDCR_PAGE_SZ            (0x1 << 24)
77 #define NDCR_NCSX               (0x1 << 23)
78 #define NDCR_ND_MODE            (0x3 << 21)
79 #define NDCR_NAND_MODE          (0x0)
80 #define NDCR_CLR_PG_CNT         (0x1 << 20)
81 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
82 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
83 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
84
85 #define NDCR_RA_START           (0x1 << 15)
86 #define NDCR_PG_PER_BLK         (0x1 << 14)
87 #define NDCR_ND_ARB_EN          (0x1 << 12)
88 #define NDCR_INT_MASK           (0xFFF)
89
90 #define NDSR_MASK               (0xfff)
91 #define NDSR_ERR_CNT_OFF        (16)
92 #define NDSR_ERR_CNT_MASK       (0x1f)
93 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
94 #define NDSR_RDY                (0x1 << 12)
95 #define NDSR_FLASH_RDY          (0x1 << 11)
96 #define NDSR_CS0_PAGED          (0x1 << 10)
97 #define NDSR_CS1_PAGED          (0x1 << 9)
98 #define NDSR_CS0_CMDD           (0x1 << 8)
99 #define NDSR_CS1_CMDD           (0x1 << 7)
100 #define NDSR_CS0_BBD            (0x1 << 6)
101 #define NDSR_CS1_BBD            (0x1 << 5)
102 #define NDSR_UNCORERR           (0x1 << 4)
103 #define NDSR_CORERR             (0x1 << 3)
104 #define NDSR_WRDREQ             (0x1 << 2)
105 #define NDSR_RDDREQ             (0x1 << 1)
106 #define NDSR_WRCMDREQ           (0x1)
107
108 #define NDCB0_LEN_OVRD          (0x1 << 28)
109 #define NDCB0_ST_ROW_EN         (0x1 << 26)
110 #define NDCB0_AUTO_RS           (0x1 << 25)
111 #define NDCB0_CSEL              (0x1 << 24)
112 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
113 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
114 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
115 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
116 #define NDCB0_NC                (0x1 << 20)
117 #define NDCB0_DBC               (0x1 << 19)
118 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
119 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
120 #define NDCB0_CMD2_MASK         (0xff << 8)
121 #define NDCB0_CMD1_MASK         (0xff)
122 #define NDCB0_ADDR_CYC_SHIFT    (16)
123
124 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
125 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
126 #define EXT_CMD_TYPE_READ       4 /* Read */
127 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
128 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
129 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
130 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
131
132 /*
133  * This should be large enough to read 'ONFI' and 'JEDEC'.
134  * Let's use 7 bytes, which is the maximum ID count supported
135  * by the controller (see NDCR_RD_ID_CNT_MASK).
136  */
137 #define READ_ID_BYTES           7
138
139 /* macros for registers read/write */
140 #define nand_writel(info, off, val)     \
141         writel_relaxed((val), (info)->mmio_base + (off))
142
143 #define nand_readl(info, off)           \
144         readl_relaxed((info)->mmio_base + (off))
145
146 /* error code and state */
147 enum {
148         ERR_NONE        = 0,
149         ERR_DMABUSERR   = -1,
150         ERR_SENDCMD     = -2,
151         ERR_UNCORERR    = -3,
152         ERR_BBERR       = -4,
153         ERR_CORERR      = -5,
154 };
155
156 enum {
157         STATE_IDLE = 0,
158         STATE_PREPARED,
159         STATE_CMD_HANDLE,
160         STATE_DMA_READING,
161         STATE_DMA_WRITING,
162         STATE_DMA_DONE,
163         STATE_PIO_READING,
164         STATE_PIO_WRITING,
165         STATE_CMD_DONE,
166         STATE_READY,
167 };
168
169 enum pxa3xx_nand_variant {
170         PXA3XX_NAND_VARIANT_PXA,
171         PXA3XX_NAND_VARIANT_ARMADA370,
172 };
173
174 struct pxa3xx_nand_host {
175         struct nand_chip        chip;
176         struct mtd_info         *mtd;
177         void                    *info_data;
178
179         /* page size of attached chip */
180         int                     use_ecc;
181         int                     cs;
182
183         /* calculated from pxa3xx_nand_flash data */
184         unsigned int            col_addr_cycles;
185         unsigned int            row_addr_cycles;
186 };
187
188 struct pxa3xx_nand_info {
189         struct nand_hw_control  controller;
190         struct platform_device   *pdev;
191
192         struct clk              *clk;
193         void __iomem            *mmio_base;
194         unsigned long           mmio_phys;
195         struct completion       cmd_complete, dev_ready;
196
197         unsigned int            buf_start;
198         unsigned int            buf_count;
199         unsigned int            buf_size;
200         unsigned int            data_buff_pos;
201         unsigned int            oob_buff_pos;
202
203         /* DMA information */
204         int                     drcmr_dat;
205         int                     drcmr_cmd;
206
207         unsigned char           *data_buff;
208         unsigned char           *oob_buff;
209         dma_addr_t              data_buff_phys;
210         int                     data_dma_ch;
211         struct pxa_dma_desc     *data_desc;
212         dma_addr_t              data_desc_addr;
213
214         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
215         unsigned int            state;
216
217         /*
218          * This driver supports NFCv1 (as found in PXA SoC)
219          * and NFCv2 (as found in Armada 370/XP SoC).
220          */
221         enum pxa3xx_nand_variant variant;
222
223         int                     cs;
224         int                     use_ecc;        /* use HW ECC ? */
225         int                     ecc_bch;        /* using BCH ECC? */
226         int                     use_dma;        /* use DMA ? */
227         int                     use_spare;      /* use spare ? */
228         int                     need_wait;
229
230         unsigned int            data_size;      /* data to be read from FIFO */
231         unsigned int            chunk_size;     /* split commands chunk size */
232         unsigned int            oob_size;
233         unsigned int            spare_size;
234         unsigned int            ecc_size;
235         unsigned int            ecc_err_cnt;
236         unsigned int            max_bitflips;
237         int                     retcode;
238
239         /* cached register value */
240         uint32_t                reg_ndcr;
241         uint32_t                ndtr0cs0;
242         uint32_t                ndtr1cs0;
243
244         /* generated NDCBx register values */
245         uint32_t                ndcb0;
246         uint32_t                ndcb1;
247         uint32_t                ndcb2;
248         uint32_t                ndcb3;
249 };
250
251 static bool use_dma = 1;
252 module_param(use_dma, bool, 0444);
253 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
254
255 struct pxa3xx_nand_timing {
256         unsigned int    tCH;  /* Enable signal hold time */
257         unsigned int    tCS;  /* Enable signal setup time */
258         unsigned int    tWH;  /* ND_nWE high duration */
259         unsigned int    tWP;  /* ND_nWE pulse time */
260         unsigned int    tRH;  /* ND_nRE high duration */
261         unsigned int    tRP;  /* ND_nRE pulse width */
262         unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
263         unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
264         unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
265 };
266
267 struct pxa3xx_nand_flash {
268         char            *name;
269         uint32_t        chip_id;
270         unsigned int    page_per_block; /* Pages per block (PG_PER_BLK) */
271         unsigned int    page_size;      /* Page size in bytes (PAGE_SZ) */
272         unsigned int    flash_width;    /* Width of Flash memory (DWIDTH_M) */
273         unsigned int    dfc_width;      /* Width of flash controller(DWIDTH_C) */
274         unsigned int    num_blocks;     /* Number of physical blocks in Flash */
275
276         struct pxa3xx_nand_timing *timing;      /* NAND Flash timing */
277 };
278
279 static struct pxa3xx_nand_timing timing[] = {
280         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
281         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
282         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
283         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
284 };
285
286 static struct pxa3xx_nand_flash builtin_flash_types[] = {
287 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
288 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
289 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
290 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
291 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
292 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
293 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
294 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
295 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
296 };
297
298 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
299 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
300
301 static struct nand_bbt_descr bbt_main_descr = {
302         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
303                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
304         .offs = 8,
305         .len = 6,
306         .veroffs = 14,
307         .maxblocks = 8,         /* Last 8 blocks in each chip */
308         .pattern = bbt_pattern
309 };
310
311 static struct nand_bbt_descr bbt_mirror_descr = {
312         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
314         .offs = 8,
315         .len = 6,
316         .veroffs = 14,
317         .maxblocks = 8,         /* Last 8 blocks in each chip */
318         .pattern = bbt_mirror_pattern
319 };
320
321 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
322         .eccbytes = 32,
323         .eccpos = {
324                 32, 33, 34, 35, 36, 37, 38, 39,
325                 40, 41, 42, 43, 44, 45, 46, 47,
326                 48, 49, 50, 51, 52, 53, 54, 55,
327                 56, 57, 58, 59, 60, 61, 62, 63},
328         .oobfree = { {2, 30} }
329 };
330
331 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
332         .eccbytes = 64,
333         .eccpos = {
334                 32,  33,  34,  35,  36,  37,  38,  39,
335                 40,  41,  42,  43,  44,  45,  46,  47,
336                 48,  49,  50,  51,  52,  53,  54,  55,
337                 56,  57,  58,  59,  60,  61,  62,  63,
338                 96,  97,  98,  99,  100, 101, 102, 103,
339                 104, 105, 106, 107, 108, 109, 110, 111,
340                 112, 113, 114, 115, 116, 117, 118, 119,
341                 120, 121, 122, 123, 124, 125, 126, 127},
342         /* Bootrom looks in bytes 0 & 5 for bad blocks */
343         .oobfree = { {6, 26}, { 64, 32} }
344 };
345
346 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
347         .eccbytes = 128,
348         .eccpos = {
349                 32,  33,  34,  35,  36,  37,  38,  39,
350                 40,  41,  42,  43,  44,  45,  46,  47,
351                 48,  49,  50,  51,  52,  53,  54,  55,
352                 56,  57,  58,  59,  60,  61,  62,  63},
353         .oobfree = { }
354 };
355
356 /* Define a default flash type setting serve as flash detecting only */
357 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
358
359 #define NDTR0_tCH(c)    (min((c), 7) << 19)
360 #define NDTR0_tCS(c)    (min((c), 7) << 16)
361 #define NDTR0_tWH(c)    (min((c), 7) << 11)
362 #define NDTR0_tWP(c)    (min((c), 7) << 8)
363 #define NDTR0_tRH(c)    (min((c), 7) << 3)
364 #define NDTR0_tRP(c)    (min((c), 7) << 0)
365
366 #define NDTR1_tR(c)     (min((c), 65535) << 16)
367 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
368 #define NDTR1_tAR(c)    (min((c), 15) << 0)
369
370 /* convert nano-seconds to nand flash controller clock cycles */
371 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
372
373 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
374         {
375                 .compatible = "marvell,pxa3xx-nand",
376                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
377         },
378         {
379                 .compatible = "marvell,armada370-nand",
380                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
381         },
382         {}
383 };
384 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
385
386 static enum pxa3xx_nand_variant
387 pxa3xx_nand_get_variant(struct platform_device *pdev)
388 {
389         const struct of_device_id *of_id =
390                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
391         if (!of_id)
392                 return PXA3XX_NAND_VARIANT_PXA;
393         return (enum pxa3xx_nand_variant)of_id->data;
394 }
395
396 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
397                                    const struct pxa3xx_nand_timing *t)
398 {
399         struct pxa3xx_nand_info *info = host->info_data;
400         unsigned long nand_clk = clk_get_rate(info->clk);
401         uint32_t ndtr0, ndtr1;
402
403         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
404                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
405                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
406                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
407                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
408                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
409
410         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
411                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
412                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
413
414         info->ndtr0cs0 = ndtr0;
415         info->ndtr1cs0 = ndtr1;
416         nand_writel(info, NDTR0CS0, ndtr0);
417         nand_writel(info, NDTR1CS0, ndtr1);
418 }
419
420 /*
421  * Set the data and OOB size, depending on the selected
422  * spare and ECC configuration.
423  * Only applicable to READ0, READOOB and PAGEPROG commands.
424  */
425 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
426                                 struct mtd_info *mtd)
427 {
428         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
429
430         info->data_size = mtd->writesize;
431         if (!oob_enable)
432                 return;
433
434         info->oob_size = info->spare_size;
435         if (!info->use_ecc)
436                 info->oob_size += info->ecc_size;
437 }
438
439 /**
440  * NOTE: it is a must to set ND_RUN firstly, then write
441  * command buffer, otherwise, it does not work.
442  * We enable all the interrupt at the same time, and
443  * let pxa3xx_nand_irq to handle all logic.
444  */
445 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
446 {
447         uint32_t ndcr;
448
449         ndcr = info->reg_ndcr;
450
451         if (info->use_ecc) {
452                 ndcr |= NDCR_ECC_EN;
453                 if (info->ecc_bch)
454                         nand_writel(info, NDECCCTRL, 0x1);
455         } else {
456                 ndcr &= ~NDCR_ECC_EN;
457                 if (info->ecc_bch)
458                         nand_writel(info, NDECCCTRL, 0x0);
459         }
460
461         if (info->use_dma)
462                 ndcr |= NDCR_DMA_EN;
463         else
464                 ndcr &= ~NDCR_DMA_EN;
465
466         if (info->use_spare)
467                 ndcr |= NDCR_SPARE_EN;
468         else
469                 ndcr &= ~NDCR_SPARE_EN;
470
471         ndcr |= NDCR_ND_RUN;
472
473         /* clear status bits and run */
474         nand_writel(info, NDSR, NDSR_MASK);
475         nand_writel(info, NDCR, 0);
476         nand_writel(info, NDCR, ndcr);
477 }
478
479 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
480 {
481         uint32_t ndcr;
482         int timeout = NAND_STOP_DELAY;
483
484         /* wait RUN bit in NDCR become 0 */
485         ndcr = nand_readl(info, NDCR);
486         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
487                 ndcr = nand_readl(info, NDCR);
488                 udelay(1);
489         }
490
491         if (timeout <= 0) {
492                 ndcr &= ~NDCR_ND_RUN;
493                 nand_writel(info, NDCR, ndcr);
494         }
495         /* clear status bits */
496         nand_writel(info, NDSR, NDSR_MASK);
497 }
498
499 static void __maybe_unused
500 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
501 {
502         uint32_t ndcr;
503
504         ndcr = nand_readl(info, NDCR);
505         nand_writel(info, NDCR, ndcr & ~int_mask);
506 }
507
508 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
509 {
510         uint32_t ndcr;
511
512         ndcr = nand_readl(info, NDCR);
513         nand_writel(info, NDCR, ndcr | int_mask);
514 }
515
516 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
517 {
518         if (info->ecc_bch) {
519                 u32 val;
520                 int ret;
521
522                 /*
523                  * According to the datasheet, when reading from NDDB
524                  * with BCH enabled, after each 32 bytes reads, we
525                  * have to make sure that the NDSR.RDDREQ bit is set.
526                  *
527                  * Drain the FIFO 8 32 bits reads at a time, and skip
528                  * the polling on the last read.
529                  */
530                 while (len > 8) {
531                         readsl(info->mmio_base + NDDB, data, 8);
532
533                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
534                                                          val & NDSR_RDDREQ, 1000, 5000);
535                         if (ret) {
536                                 dev_err(&info->pdev->dev,
537                                         "Timeout on RDDREQ while draining the FIFO\n");
538                                 return;
539                         }
540
541                         data += 32;
542                         len -= 8;
543                 }
544         }
545
546         readsl(info->mmio_base + NDDB, data, len);
547 }
548
549 static void handle_data_pio(struct pxa3xx_nand_info *info)
550 {
551         unsigned int do_bytes = min(info->data_size, info->chunk_size);
552
553         switch (info->state) {
554         case STATE_PIO_WRITING:
555                 writesl(info->mmio_base + NDDB,
556                         info->data_buff + info->data_buff_pos,
557                         DIV_ROUND_UP(do_bytes, 4));
558
559                 if (info->oob_size > 0)
560                         writesl(info->mmio_base + NDDB,
561                                 info->oob_buff + info->oob_buff_pos,
562                                 DIV_ROUND_UP(info->oob_size, 4));
563                 break;
564         case STATE_PIO_READING:
565                 drain_fifo(info,
566                            info->data_buff + info->data_buff_pos,
567                            DIV_ROUND_UP(do_bytes, 4));
568
569                 if (info->oob_size > 0)
570                         drain_fifo(info,
571                                    info->oob_buff + info->oob_buff_pos,
572                                    DIV_ROUND_UP(info->oob_size, 4));
573                 break;
574         default:
575                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
576                                 info->state);
577                 BUG();
578         }
579
580         /* Update buffer pointers for multi-page read/write */
581         info->data_buff_pos += do_bytes;
582         info->oob_buff_pos += info->oob_size;
583         info->data_size -= do_bytes;
584 }
585
586 #ifdef ARCH_HAS_DMA
587 static void start_data_dma(struct pxa3xx_nand_info *info)
588 {
589         struct pxa_dma_desc *desc = info->data_desc;
590         int dma_len = ALIGN(info->data_size + info->oob_size, 32);
591
592         desc->ddadr = DDADR_STOP;
593         desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
594
595         switch (info->state) {
596         case STATE_DMA_WRITING:
597                 desc->dsadr = info->data_buff_phys;
598                 desc->dtadr = info->mmio_phys + NDDB;
599                 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
600                 break;
601         case STATE_DMA_READING:
602                 desc->dtadr = info->data_buff_phys;
603                 desc->dsadr = info->mmio_phys + NDDB;
604                 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
605                 break;
606         default:
607                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
608                                 info->state);
609                 BUG();
610         }
611
612         DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
613         DDADR(info->data_dma_ch) = info->data_desc_addr;
614         DCSR(info->data_dma_ch) |= DCSR_RUN;
615 }
616
617 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
618 {
619         struct pxa3xx_nand_info *info = data;
620         uint32_t dcsr;
621
622         dcsr = DCSR(channel);
623         DCSR(channel) = dcsr;
624
625         if (dcsr & DCSR_BUSERR) {
626                 info->retcode = ERR_DMABUSERR;
627         }
628
629         info->state = STATE_DMA_DONE;
630         enable_int(info, NDCR_INT_MASK);
631         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
632 }
633 #else
634 static void start_data_dma(struct pxa3xx_nand_info *info)
635 {}
636 #endif
637
638 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
639 {
640         struct pxa3xx_nand_info *info = data;
641
642         handle_data_pio(info);
643
644         info->state = STATE_CMD_DONE;
645         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
646
647         return IRQ_HANDLED;
648 }
649
650 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
651 {
652         struct pxa3xx_nand_info *info = devid;
653         unsigned int status, is_completed = 0, is_ready = 0;
654         unsigned int ready, cmd_done;
655         irqreturn_t ret = IRQ_HANDLED;
656
657         if (info->cs == 0) {
658                 ready           = NDSR_FLASH_RDY;
659                 cmd_done        = NDSR_CS0_CMDD;
660         } else {
661                 ready           = NDSR_RDY;
662                 cmd_done        = NDSR_CS1_CMDD;
663         }
664
665         status = nand_readl(info, NDSR);
666
667         if (status & NDSR_UNCORERR)
668                 info->retcode = ERR_UNCORERR;
669         if (status & NDSR_CORERR) {
670                 info->retcode = ERR_CORERR;
671                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
672                     info->ecc_bch)
673                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
674                 else
675                         info->ecc_err_cnt = 1;
676
677                 /*
678                  * Each chunk composing a page is corrected independently,
679                  * and we need to store maximum number of corrected bitflips
680                  * to return it to the MTD layer in ecc.read_page().
681                  */
682                 info->max_bitflips = max_t(unsigned int,
683                                            info->max_bitflips,
684                                            info->ecc_err_cnt);
685         }
686         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
687                 /* whether use dma to transfer data */
688                 if (info->use_dma) {
689                         disable_int(info, NDCR_INT_MASK);
690                         info->state = (status & NDSR_RDDREQ) ?
691                                       STATE_DMA_READING : STATE_DMA_WRITING;
692                         start_data_dma(info);
693                         goto NORMAL_IRQ_EXIT;
694                 } else {
695                         info->state = (status & NDSR_RDDREQ) ?
696                                       STATE_PIO_READING : STATE_PIO_WRITING;
697                         ret = IRQ_WAKE_THREAD;
698                         goto NORMAL_IRQ_EXIT;
699                 }
700         }
701         if (status & cmd_done) {
702                 info->state = STATE_CMD_DONE;
703                 is_completed = 1;
704         }
705         if (status & ready) {
706                 info->state = STATE_READY;
707                 is_ready = 1;
708         }
709
710         /*
711          * Clear all status bit before issuing the next command, which
712          * can and will alter the status bits and will deserve a new
713          * interrupt on its own. This lets the controller exit the IRQ
714          */
715         nand_writel(info, NDSR, status);
716
717         if (status & NDSR_WRCMDREQ) {
718                 status &= ~NDSR_WRCMDREQ;
719                 info->state = STATE_CMD_HANDLE;
720
721                 /*
722                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
723                  * must be loaded by writing directly either 12 or 16
724                  * bytes directly to NDCB0, four bytes at a time.
725                  *
726                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
727                  * but each NDCBx register can be read.
728                  */
729                 nand_writel(info, NDCB0, info->ndcb0);
730                 nand_writel(info, NDCB0, info->ndcb1);
731                 nand_writel(info, NDCB0, info->ndcb2);
732
733                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
734                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
735                         nand_writel(info, NDCB0, info->ndcb3);
736         }
737
738         if (is_completed)
739                 complete(&info->cmd_complete);
740         if (is_ready)
741                 complete(&info->dev_ready);
742 NORMAL_IRQ_EXIT:
743         return ret;
744 }
745
746 static inline int is_buf_blank(uint8_t *buf, size_t len)
747 {
748         for (; len > 0; len--)
749                 if (*buf++ != 0xff)
750                         return 0;
751         return 1;
752 }
753
754 static void set_command_address(struct pxa3xx_nand_info *info,
755                 unsigned int page_size, uint16_t column, int page_addr)
756 {
757         /* small page addr setting */
758         if (page_size < PAGE_CHUNK_SIZE) {
759                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
760                                 | (column & 0xFF);
761
762                 info->ndcb2 = 0;
763         } else {
764                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
765                                 | (column & 0xFFFF);
766
767                 if (page_addr & 0xFF0000)
768                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
769                 else
770                         info->ndcb2 = 0;
771         }
772 }
773
774 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
775 {
776         struct pxa3xx_nand_host *host = info->host[info->cs];
777         struct mtd_info *mtd = host->mtd;
778
779         /* reset data and oob column point to handle data */
780         info->buf_start         = 0;
781         info->buf_count         = 0;
782         info->oob_size          = 0;
783         info->data_buff_pos     = 0;
784         info->oob_buff_pos      = 0;
785         info->use_ecc           = 0;
786         info->use_spare         = 1;
787         info->retcode           = ERR_NONE;
788         info->ecc_err_cnt       = 0;
789         info->ndcb3             = 0;
790         info->need_wait         = 0;
791
792         switch (command) {
793         case NAND_CMD_READ0:
794         case NAND_CMD_PAGEPROG:
795                 info->use_ecc = 1;
796         case NAND_CMD_READOOB:
797                 pxa3xx_set_datasize(info, mtd);
798                 break;
799         case NAND_CMD_PARAM:
800                 info->use_spare = 0;
801                 break;
802         default:
803                 info->ndcb1 = 0;
804                 info->ndcb2 = 0;
805                 break;
806         }
807
808         /*
809          * If we are about to issue a read command, or about to set
810          * the write address, then clean the data buffer.
811          */
812         if (command == NAND_CMD_READ0 ||
813             command == NAND_CMD_READOOB ||
814             command == NAND_CMD_SEQIN) {
815
816                 info->buf_count = mtd->writesize + mtd->oobsize;
817                 memset(info->data_buff, 0xFF, info->buf_count);
818         }
819
820 }
821
822 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
823                 int ext_cmd_type, uint16_t column, int page_addr)
824 {
825         int addr_cycle, exec_cmd;
826         struct pxa3xx_nand_host *host;
827         struct mtd_info *mtd;
828
829         host = info->host[info->cs];
830         mtd = host->mtd;
831         addr_cycle = 0;
832         exec_cmd = 1;
833
834         if (info->cs != 0)
835                 info->ndcb0 = NDCB0_CSEL;
836         else
837                 info->ndcb0 = 0;
838
839         if (command == NAND_CMD_SEQIN)
840                 exec_cmd = 0;
841
842         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
843                                     + host->col_addr_cycles);
844
845         switch (command) {
846         case NAND_CMD_READOOB:
847         case NAND_CMD_READ0:
848                 info->buf_start = column;
849                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
850                                 | addr_cycle
851                                 | NAND_CMD_READ0;
852
853                 if (command == NAND_CMD_READOOB)
854                         info->buf_start += mtd->writesize;
855
856                 /*
857                  * Multiple page read needs an 'extended command type' field,
858                  * which is either naked-read or last-read according to the
859                  * state.
860                  */
861                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
862                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
863                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
864                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
865                                         | NDCB0_LEN_OVRD
866                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
867                         info->ndcb3 = info->chunk_size +
868                                       info->oob_size;
869                 }
870
871                 set_command_address(info, mtd->writesize, column, page_addr);
872                 break;
873
874         case NAND_CMD_SEQIN:
875
876                 info->buf_start = column;
877                 set_command_address(info, mtd->writesize, 0, page_addr);
878
879                 /*
880                  * Multiple page programming needs to execute the initial
881                  * SEQIN command that sets the page address.
882                  */
883                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
884                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
885                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
886                                 | addr_cycle
887                                 | command;
888                         /* No data transfer in this case */
889                         info->data_size = 0;
890                         exec_cmd = 1;
891                 }
892                 break;
893
894         case NAND_CMD_PAGEPROG:
895                 if (is_buf_blank(info->data_buff,
896                                         (mtd->writesize + mtd->oobsize))) {
897                         exec_cmd = 0;
898                         break;
899                 }
900
901                 /* Second command setting for large pages */
902                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
903                         /*
904                          * Multiple page write uses the 'extended command'
905                          * field. This can be used to issue a command dispatch
906                          * or a naked-write depending on the current stage.
907                          */
908                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
909                                         | NDCB0_LEN_OVRD
910                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
911                         info->ndcb3 = info->chunk_size +
912                                       info->oob_size;
913
914                         /*
915                          * This is the command dispatch that completes a chunked
916                          * page program operation.
917                          */
918                         if (info->data_size == 0) {
919                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
920                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
921                                         | command;
922                                 info->ndcb1 = 0;
923                                 info->ndcb2 = 0;
924                                 info->ndcb3 = 0;
925                         }
926                 } else {
927                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
928                                         | NDCB0_AUTO_RS
929                                         | NDCB0_ST_ROW_EN
930                                         | NDCB0_DBC
931                                         | (NAND_CMD_PAGEPROG << 8)
932                                         | NAND_CMD_SEQIN
933                                         | addr_cycle;
934                 }
935                 break;
936
937         case NAND_CMD_PARAM:
938                 info->buf_count = INIT_BUFFER_SIZE;
939                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
940                                 | NDCB0_ADDR_CYC(1)
941                                 | NDCB0_LEN_OVRD
942                                 | command;
943                 info->ndcb1 = (column & 0xFF);
944                 info->ndcb3 = INIT_BUFFER_SIZE;
945                 info->data_size = INIT_BUFFER_SIZE;
946                 break;
947
948         case NAND_CMD_READID:
949                 info->buf_count = READ_ID_BYTES;
950                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
951                                 | NDCB0_ADDR_CYC(1)
952                                 | command;
953                 info->ndcb1 = (column & 0xFF);
954
955                 info->data_size = 8;
956                 break;
957         case NAND_CMD_STATUS:
958                 info->buf_count = 1;
959                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
960                                 | NDCB0_ADDR_CYC(1)
961                                 | command;
962
963                 info->data_size = 8;
964                 break;
965
966         case NAND_CMD_ERASE1:
967                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
968                                 | NDCB0_AUTO_RS
969                                 | NDCB0_ADDR_CYC(3)
970                                 | NDCB0_DBC
971                                 | (NAND_CMD_ERASE2 << 8)
972                                 | NAND_CMD_ERASE1;
973                 info->ndcb1 = page_addr;
974                 info->ndcb2 = 0;
975
976                 break;
977         case NAND_CMD_RESET:
978                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
979                                 | command;
980
981                 break;
982
983         case NAND_CMD_ERASE2:
984                 exec_cmd = 0;
985                 break;
986
987         default:
988                 exec_cmd = 0;
989                 dev_err(&info->pdev->dev, "non-supported command %x\n",
990                                 command);
991                 break;
992         }
993
994         return exec_cmd;
995 }
996
997 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
998                          int column, int page_addr)
999 {
1000         struct pxa3xx_nand_host *host = mtd->priv;
1001         struct pxa3xx_nand_info *info = host->info_data;
1002         int exec_cmd;
1003
1004         /*
1005          * if this is a x16 device ,then convert the input
1006          * "byte" address into a "word" address appropriate
1007          * for indexing a word-oriented device
1008          */
1009         if (info->reg_ndcr & NDCR_DWIDTH_M)
1010                 column /= 2;
1011
1012         /*
1013          * There may be different NAND chip hooked to
1014          * different chip select, so check whether
1015          * chip select has been changed, if yes, reset the timing
1016          */
1017         if (info->cs != host->cs) {
1018                 info->cs = host->cs;
1019                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1020                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1021         }
1022
1023         prepare_start_command(info, command);
1024
1025         info->state = STATE_PREPARED;
1026         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1027
1028         if (exec_cmd) {
1029                 init_completion(&info->cmd_complete);
1030                 init_completion(&info->dev_ready);
1031                 info->need_wait = 1;
1032                 pxa3xx_nand_start(info);
1033
1034                 if (!wait_for_completion_timeout(&info->cmd_complete,
1035                     CHIP_DELAY_TIMEOUT)) {
1036                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1037                         /* Stop State Machine for next command cycle */
1038                         pxa3xx_nand_stop(info);
1039                 }
1040         }
1041         info->state = STATE_IDLE;
1042 }
1043
1044 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1045                                   const unsigned command,
1046                                   int column, int page_addr)
1047 {
1048         struct pxa3xx_nand_host *host = mtd->priv;
1049         struct pxa3xx_nand_info *info = host->info_data;
1050         int exec_cmd, ext_cmd_type;
1051
1052         /*
1053          * if this is a x16 device then convert the input
1054          * "byte" address into a "word" address appropriate
1055          * for indexing a word-oriented device
1056          */
1057         if (info->reg_ndcr & NDCR_DWIDTH_M)
1058                 column /= 2;
1059
1060         /*
1061          * There may be different NAND chip hooked to
1062          * different chip select, so check whether
1063          * chip select has been changed, if yes, reset the timing
1064          */
1065         if (info->cs != host->cs) {
1066                 info->cs = host->cs;
1067                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1068                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1069         }
1070
1071         /* Select the extended command for the first command */
1072         switch (command) {
1073         case NAND_CMD_READ0:
1074         case NAND_CMD_READOOB:
1075                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1076                 break;
1077         case NAND_CMD_SEQIN:
1078                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1079                 break;
1080         case NAND_CMD_PAGEPROG:
1081                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1082                 break;
1083         default:
1084                 ext_cmd_type = 0;
1085                 break;
1086         }
1087
1088         prepare_start_command(info, command);
1089
1090         /*
1091          * Prepare the "is ready" completion before starting a command
1092          * transaction sequence. If the command is not executed the
1093          * completion will be completed, see below.
1094          *
1095          * We can do that inside the loop because the command variable
1096          * is invariant and thus so is the exec_cmd.
1097          */
1098         info->need_wait = 1;
1099         init_completion(&info->dev_ready);
1100         do {
1101                 info->state = STATE_PREPARED;
1102                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1103                                                column, page_addr);
1104                 if (!exec_cmd) {
1105                         info->need_wait = 0;
1106                         complete(&info->dev_ready);
1107                         break;
1108                 }
1109
1110                 init_completion(&info->cmd_complete);
1111                 pxa3xx_nand_start(info);
1112
1113                 if (!wait_for_completion_timeout(&info->cmd_complete,
1114                     CHIP_DELAY_TIMEOUT)) {
1115                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1116                         /* Stop State Machine for next command cycle */
1117                         pxa3xx_nand_stop(info);
1118                         break;
1119                 }
1120
1121                 /* Check if the sequence is complete */
1122                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1123                         break;
1124
1125                 /*
1126                  * After a splitted program command sequence has issued
1127                  * the command dispatch, the command sequence is complete.
1128                  */
1129                 if (info->data_size == 0 &&
1130                     command == NAND_CMD_PAGEPROG &&
1131                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1132                         break;
1133
1134                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1135                         /* Last read: issue a 'last naked read' */
1136                         if (info->data_size == info->chunk_size)
1137                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1138                         else
1139                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1140
1141                 /*
1142                  * If a splitted program command has no more data to transfer,
1143                  * the command dispatch must be issued to complete.
1144                  */
1145                 } else if (command == NAND_CMD_PAGEPROG &&
1146                            info->data_size == 0) {
1147                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1148                 }
1149         } while (1);
1150
1151         info->state = STATE_IDLE;
1152 }
1153
1154 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1155                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1156 {
1157         chip->write_buf(mtd, buf, mtd->writesize);
1158         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1159
1160         return 0;
1161 }
1162
1163 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1164                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1165                 int page)
1166 {
1167         struct pxa3xx_nand_host *host = mtd->priv;
1168         struct pxa3xx_nand_info *info = host->info_data;
1169
1170         chip->read_buf(mtd, buf, mtd->writesize);
1171         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1172
1173         if (info->retcode == ERR_CORERR && info->use_ecc) {
1174                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1175
1176         } else if (info->retcode == ERR_UNCORERR) {
1177                 /*
1178                  * for blank page (all 0xff), HW will calculate its ECC as
1179                  * 0, which is different from the ECC information within
1180                  * OOB, ignore such uncorrectable errors
1181                  */
1182                 if (is_buf_blank(buf, mtd->writesize))
1183                         info->retcode = ERR_NONE;
1184                 else
1185                         mtd->ecc_stats.failed++;
1186         }
1187
1188         return info->max_bitflips;
1189 }
1190
1191 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1192 {
1193         struct pxa3xx_nand_host *host = mtd->priv;
1194         struct pxa3xx_nand_info *info = host->info_data;
1195         char retval = 0xFF;
1196
1197         if (info->buf_start < info->buf_count)
1198                 /* Has just send a new command? */
1199                 retval = info->data_buff[info->buf_start++];
1200
1201         return retval;
1202 }
1203
1204 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1205 {
1206         struct pxa3xx_nand_host *host = mtd->priv;
1207         struct pxa3xx_nand_info *info = host->info_data;
1208         u16 retval = 0xFFFF;
1209
1210         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1211                 retval = *((u16 *)(info->data_buff+info->buf_start));
1212                 info->buf_start += 2;
1213         }
1214         return retval;
1215 }
1216
1217 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1218 {
1219         struct pxa3xx_nand_host *host = mtd->priv;
1220         struct pxa3xx_nand_info *info = host->info_data;
1221         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1222
1223         memcpy(buf, info->data_buff + info->buf_start, real_len);
1224         info->buf_start += real_len;
1225 }
1226
1227 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1228                 const uint8_t *buf, int len)
1229 {
1230         struct pxa3xx_nand_host *host = mtd->priv;
1231         struct pxa3xx_nand_info *info = host->info_data;
1232         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1233
1234         memcpy(info->data_buff + info->buf_start, buf, real_len);
1235         info->buf_start += real_len;
1236 }
1237
1238 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1239 {
1240         return;
1241 }
1242
1243 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1244 {
1245         struct pxa3xx_nand_host *host = mtd->priv;
1246         struct pxa3xx_nand_info *info = host->info_data;
1247
1248         if (info->need_wait) {
1249                 info->need_wait = 0;
1250                 if (!wait_for_completion_timeout(&info->dev_ready,
1251                     CHIP_DELAY_TIMEOUT)) {
1252                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1253                         return NAND_STATUS_FAIL;
1254                 }
1255         }
1256
1257         /* pxa3xx_nand_send_command has waited for command complete */
1258         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1259                 if (info->retcode == ERR_NONE)
1260                         return 0;
1261                 else
1262                         return NAND_STATUS_FAIL;
1263         }
1264
1265         return NAND_STATUS_READY;
1266 }
1267
1268 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1269                                     const struct pxa3xx_nand_flash *f)
1270 {
1271         struct platform_device *pdev = info->pdev;
1272         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1273         struct pxa3xx_nand_host *host = info->host[info->cs];
1274         uint32_t ndcr = 0x0; /* enable all interrupts */
1275
1276         if (f->page_size != 2048 && f->page_size != 512) {
1277                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1278                 return -EINVAL;
1279         }
1280
1281         if (f->flash_width != 16 && f->flash_width != 8) {
1282                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1283                 return -EINVAL;
1284         }
1285
1286         /* calculate addressing information */
1287         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1288
1289         if (f->num_blocks * f->page_per_block > 65536)
1290                 host->row_addr_cycles = 3;
1291         else
1292                 host->row_addr_cycles = 2;
1293
1294         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1295         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1296         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1297         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1298         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1299         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1300
1301         ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1302         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1303
1304         info->reg_ndcr = ndcr;
1305
1306         pxa3xx_nand_set_timing(host, f->timing);
1307         return 0;
1308 }
1309
1310 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1311 {
1312         uint32_t ndcr = nand_readl(info, NDCR);
1313
1314         /* Set an initial chunk size */
1315         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1316         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1317         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1318         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1319         return 0;
1320 }
1321
1322 #ifdef ARCH_HAS_DMA
1323 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1324 {
1325         struct platform_device *pdev = info->pdev;
1326         int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1327
1328         if (use_dma == 0) {
1329                 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1330                 if (info->data_buff == NULL)
1331                         return -ENOMEM;
1332                 return 0;
1333         }
1334
1335         info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1336                                 &info->data_buff_phys, GFP_KERNEL);
1337         if (info->data_buff == NULL) {
1338                 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1339                 return -ENOMEM;
1340         }
1341
1342         info->data_desc = (void *)info->data_buff + data_desc_offset;
1343         info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1344
1345         info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1346                                 pxa3xx_nand_data_dma_irq, info);
1347         if (info->data_dma_ch < 0) {
1348                 dev_err(&pdev->dev, "failed to request data dma\n");
1349                 dma_free_coherent(&pdev->dev, info->buf_size,
1350                                 info->data_buff, info->data_buff_phys);
1351                 return info->data_dma_ch;
1352         }
1353
1354         /*
1355          * Now that DMA buffers are allocated we turn on
1356          * DMA proper for I/O operations.
1357          */
1358         info->use_dma = 1;
1359         return 0;
1360 }
1361
1362 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1363 {
1364         struct platform_device *pdev = info->pdev;
1365         if (info->use_dma) {
1366                 pxa_free_dma(info->data_dma_ch);
1367                 dma_free_coherent(&pdev->dev, info->buf_size,
1368                                   info->data_buff, info->data_buff_phys);
1369         } else {
1370                 kfree(info->data_buff);
1371         }
1372 }
1373 #else
1374 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1375 {
1376         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1377         if (info->data_buff == NULL)
1378                 return -ENOMEM;
1379         return 0;
1380 }
1381
1382 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1383 {
1384         kfree(info->data_buff);
1385 }
1386 #endif
1387
1388 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1389 {
1390         struct mtd_info *mtd;
1391         struct nand_chip *chip;
1392         int ret;
1393
1394         mtd = info->host[info->cs]->mtd;
1395         chip = mtd->priv;
1396
1397         /* use the common timing to make a try */
1398         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1399         if (ret)
1400                 return ret;
1401
1402         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1403         ret = chip->waitfunc(mtd, chip);
1404         if (ret & NAND_STATUS_FAIL)
1405                 return -ENODEV;
1406
1407         return 0;
1408 }
1409
1410 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1411                         struct nand_ecc_ctrl *ecc,
1412                         int strength, int ecc_stepsize, int page_size)
1413 {
1414         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1415                 info->chunk_size = 2048;
1416                 info->spare_size = 40;
1417                 info->ecc_size = 24;
1418                 ecc->mode = NAND_ECC_HW;
1419                 ecc->size = 512;
1420                 ecc->strength = 1;
1421
1422         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1423                 info->chunk_size = 512;
1424                 info->spare_size = 8;
1425                 info->ecc_size = 8;
1426                 ecc->mode = NAND_ECC_HW;
1427                 ecc->size = 512;
1428                 ecc->strength = 1;
1429
1430         /*
1431          * Required ECC: 4-bit correction per 512 bytes
1432          * Select: 16-bit correction per 2048 bytes
1433          */
1434         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1435                 info->ecc_bch = 1;
1436                 info->chunk_size = 2048;
1437                 info->spare_size = 32;
1438                 info->ecc_size = 32;
1439                 ecc->mode = NAND_ECC_HW;
1440                 ecc->size = info->chunk_size;
1441                 ecc->layout = &ecc_layout_2KB_bch4bit;
1442                 ecc->strength = 16;
1443
1444         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1445                 info->ecc_bch = 1;
1446                 info->chunk_size = 2048;
1447                 info->spare_size = 32;
1448                 info->ecc_size = 32;
1449                 ecc->mode = NAND_ECC_HW;
1450                 ecc->size = info->chunk_size;
1451                 ecc->layout = &ecc_layout_4KB_bch4bit;
1452                 ecc->strength = 16;
1453
1454         /*
1455          * Required ECC: 8-bit correction per 512 bytes
1456          * Select: 16-bit correction per 1024 bytes
1457          */
1458         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1459                 info->ecc_bch = 1;
1460                 info->chunk_size = 1024;
1461                 info->spare_size = 0;
1462                 info->ecc_size = 32;
1463                 ecc->mode = NAND_ECC_HW;
1464                 ecc->size = info->chunk_size;
1465                 ecc->layout = &ecc_layout_4KB_bch8bit;
1466                 ecc->strength = 16;
1467         } else {
1468                 dev_err(&info->pdev->dev,
1469                         "ECC strength %d at page size %d is not supported\n",
1470                         strength, page_size);
1471                 return -ENODEV;
1472         }
1473
1474         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1475                  ecc->strength, ecc->size);
1476         return 0;
1477 }
1478
1479 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1480 {
1481         struct pxa3xx_nand_host *host = mtd->priv;
1482         struct pxa3xx_nand_info *info = host->info_data;
1483         struct platform_device *pdev = info->pdev;
1484         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1485         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1486         const struct pxa3xx_nand_flash *f = NULL;
1487         struct nand_chip *chip = mtd->priv;
1488         uint32_t id = -1;
1489         uint64_t chipsize;
1490         int i, ret, num;
1491         uint16_t ecc_strength, ecc_step;
1492
1493         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1494                 goto KEEP_CONFIG;
1495
1496         /* Set a default chunk size */
1497         info->chunk_size = 512;
1498
1499         ret = pxa3xx_nand_sensing(info);
1500         if (ret) {
1501                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1502                          info->cs);
1503
1504                 return ret;
1505         }
1506
1507         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1508         id = *((uint16_t *)(info->data_buff));
1509         if (id != 0)
1510                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1511         else {
1512                 dev_warn(&info->pdev->dev,
1513                          "Read out ID 0, potential timing set wrong!!\n");
1514
1515                 return -EINVAL;
1516         }
1517
1518         num = ARRAY_SIZE(builtin_flash_types) - 1;
1519         for (i = 0; i < num; i++) {
1520                 f = &builtin_flash_types[i + 1];
1521
1522                 /* find the chip in default list */
1523                 if (f->chip_id == id)
1524                         break;
1525         }
1526
1527         if (i >= (ARRAY_SIZE(builtin_flash_types) - 1)) {
1528                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1529
1530                 return -EINVAL;
1531         }
1532
1533         ret = pxa3xx_nand_config_flash(info, f);
1534         if (ret) {
1535                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1536                 return ret;
1537         }
1538
1539         memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1540
1541         pxa3xx_flash_ids[0].name = f->name;
1542         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1543         pxa3xx_flash_ids[0].pagesize = f->page_size;
1544         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1545         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1546         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1547         if (f->flash_width == 16)
1548                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1549         pxa3xx_flash_ids[1].name = NULL;
1550         def = pxa3xx_flash_ids;
1551 KEEP_CONFIG:
1552         if (info->reg_ndcr & NDCR_DWIDTH_M)
1553                 chip->options |= NAND_BUSWIDTH_16;
1554
1555         /* Device detection must be done with ECC disabled */
1556         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1557                 nand_writel(info, NDECCCTRL, 0x0);
1558
1559         if (nand_scan_ident(mtd, 1, def))
1560                 return -ENODEV;
1561
1562         if (pdata->flash_bbt) {
1563                 /*
1564                  * We'll use a bad block table stored in-flash and don't
1565                  * allow writing the bad block marker to the flash.
1566                  */
1567                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1568                                      NAND_BBT_NO_OOB_BBM;
1569                 chip->bbt_td = &bbt_main_descr;
1570                 chip->bbt_md = &bbt_mirror_descr;
1571         }
1572
1573         /*
1574          * If the page size is bigger than the FIFO size, let's check
1575          * we are given the right variant and then switch to the extended
1576          * (aka splitted) command handling,
1577          */
1578         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1579                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1580                         chip->cmdfunc = nand_cmdfunc_extended;
1581                 } else {
1582                         dev_err(&info->pdev->dev,
1583                                 "unsupported page size on this variant\n");
1584                         return -ENODEV;
1585                 }
1586         }
1587
1588         if (pdata->ecc_strength && pdata->ecc_step_size) {
1589                 ecc_strength = pdata->ecc_strength;
1590                 ecc_step = pdata->ecc_step_size;
1591         } else {
1592                 ecc_strength = chip->ecc_strength_ds;
1593                 ecc_step = chip->ecc_step_ds;
1594         }
1595
1596         /* Set default ECC strength requirements on non-ONFI devices */
1597         if (ecc_strength < 1 && ecc_step < 1) {
1598                 ecc_strength = 1;
1599                 ecc_step = 512;
1600         }
1601
1602         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1603                            ecc_step, mtd->writesize);
1604         if (ret)
1605                 return ret;
1606
1607         /* calculate addressing information */
1608         if (mtd->writesize >= 2048)
1609                 host->col_addr_cycles = 2;
1610         else
1611                 host->col_addr_cycles = 1;
1612
1613         /* release the initial buffer */
1614         kfree(info->data_buff);
1615
1616         /* allocate the real data + oob buffer */
1617         info->buf_size = mtd->writesize + mtd->oobsize;
1618         ret = pxa3xx_nand_init_buff(info);
1619         if (ret)
1620                 return ret;
1621         info->oob_buff = info->data_buff + mtd->writesize;
1622
1623         if ((mtd->size >> chip->page_shift) > 65536)
1624                 host->row_addr_cycles = 3;
1625         else
1626                 host->row_addr_cycles = 2;
1627         return nand_scan_tail(mtd);
1628 }
1629
1630 static int alloc_nand_resource(struct platform_device *pdev)
1631 {
1632         struct pxa3xx_nand_platform_data *pdata;
1633         struct pxa3xx_nand_info *info;
1634         struct pxa3xx_nand_host *host;
1635         struct nand_chip *chip = NULL;
1636         struct mtd_info *mtd;
1637         struct resource *r;
1638         int ret, irq, cs;
1639
1640         pdata = dev_get_platdata(&pdev->dev);
1641         if (pdata->num_cs <= 0)
1642                 return -ENODEV;
1643         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1644                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1645         if (!info)
1646                 return -ENOMEM;
1647
1648         info->pdev = pdev;
1649         info->variant = pxa3xx_nand_get_variant(pdev);
1650         for (cs = 0; cs < pdata->num_cs; cs++) {
1651                 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1652                 chip = (struct nand_chip *)(&mtd[1]);
1653                 host = (struct pxa3xx_nand_host *)chip;
1654                 info->host[cs] = host;
1655                 host->mtd = mtd;
1656                 host->cs = cs;
1657                 host->info_data = info;
1658                 mtd->priv = host;
1659                 mtd->owner = THIS_MODULE;
1660
1661                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1662                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1663                 chip->controller        = &info->controller;
1664                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1665                 chip->select_chip       = pxa3xx_nand_select_chip;
1666                 chip->read_word         = pxa3xx_nand_read_word;
1667                 chip->read_byte         = pxa3xx_nand_read_byte;
1668                 chip->read_buf          = pxa3xx_nand_read_buf;
1669                 chip->write_buf         = pxa3xx_nand_write_buf;
1670                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1671                 chip->cmdfunc           = nand_cmdfunc;
1672         }
1673
1674         spin_lock_init(&chip->controller->lock);
1675         init_waitqueue_head(&chip->controller->wq);
1676         info->clk = devm_clk_get(&pdev->dev, NULL);
1677         if (IS_ERR(info->clk)) {
1678                 dev_err(&pdev->dev, "failed to get nand clock\n");
1679                 return PTR_ERR(info->clk);
1680         }
1681         ret = clk_prepare_enable(info->clk);
1682         if (ret < 0)
1683                 return ret;
1684
1685         if (use_dma) {
1686                 /*
1687                  * This is a dirty hack to make this driver work from
1688                  * devicetree bindings. It can be removed once we have
1689                  * a prober DMA controller framework for DT.
1690                  */
1691                 if (pdev->dev.of_node &&
1692                     of_machine_is_compatible("marvell,pxa3xx")) {
1693                         info->drcmr_dat = 97;
1694                         info->drcmr_cmd = 99;
1695                 } else {
1696                         r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1697                         if (r == NULL) {
1698                                 dev_err(&pdev->dev,
1699                                         "no resource defined for data DMA\n");
1700                                 ret = -ENXIO;
1701                                 goto fail_disable_clk;
1702                         }
1703                         info->drcmr_dat = r->start;
1704
1705                         r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1706                         if (r == NULL) {
1707                                 dev_err(&pdev->dev,
1708                                         "no resource defined for cmd DMA\n");
1709                                 ret = -ENXIO;
1710                                 goto fail_disable_clk;
1711                         }
1712                         info->drcmr_cmd = r->start;
1713                 }
1714         }
1715
1716         irq = platform_get_irq(pdev, 0);
1717         if (irq < 0) {
1718                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1719                 ret = -ENXIO;
1720                 goto fail_disable_clk;
1721         }
1722
1723         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1724         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1725         if (IS_ERR(info->mmio_base)) {
1726                 ret = PTR_ERR(info->mmio_base);
1727                 goto fail_disable_clk;
1728         }
1729         info->mmio_phys = r->start;
1730
1731         /* Allocate a buffer to allow flash detection */
1732         info->buf_size = INIT_BUFFER_SIZE;
1733         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1734         if (info->data_buff == NULL) {
1735                 ret = -ENOMEM;
1736                 goto fail_disable_clk;
1737         }
1738
1739         /* initialize all interrupts to be disabled */
1740         disable_int(info, NDSR_MASK);
1741
1742         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1743                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1744                                    pdev->name, info);
1745         if (ret < 0) {
1746                 dev_err(&pdev->dev, "failed to request IRQ\n");
1747                 goto fail_free_buf;
1748         }
1749
1750         platform_set_drvdata(pdev, info);
1751
1752         return 0;
1753
1754 fail_free_buf:
1755         free_irq(irq, info);
1756         kfree(info->data_buff);
1757 fail_disable_clk:
1758         clk_disable_unprepare(info->clk);
1759         return ret;
1760 }
1761
1762 static int pxa3xx_nand_remove(struct platform_device *pdev)
1763 {
1764         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1765         struct pxa3xx_nand_platform_data *pdata;
1766         int irq, cs;
1767
1768         if (!info)
1769                 return 0;
1770
1771         pdata = dev_get_platdata(&pdev->dev);
1772
1773         irq = platform_get_irq(pdev, 0);
1774         if (irq >= 0)
1775                 free_irq(irq, info);
1776         pxa3xx_nand_free_buff(info);
1777
1778         clk_disable_unprepare(info->clk);
1779
1780         for (cs = 0; cs < pdata->num_cs; cs++)
1781                 nand_release(info->host[cs]->mtd);
1782         return 0;
1783 }
1784
1785 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1786 {
1787         struct pxa3xx_nand_platform_data *pdata;
1788         struct device_node *np = pdev->dev.of_node;
1789         const struct of_device_id *of_id =
1790                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1791
1792         if (!of_id)
1793                 return 0;
1794
1795         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1796         if (!pdata)
1797                 return -ENOMEM;
1798
1799         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1800                 pdata->enable_arbiter = 1;
1801         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1802                 pdata->keep_config = 1;
1803         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1804         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1805
1806         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1807         if (pdata->ecc_strength < 0)
1808                 pdata->ecc_strength = 0;
1809
1810         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1811         if (pdata->ecc_step_size < 0)
1812                 pdata->ecc_step_size = 0;
1813
1814         pdev->dev.platform_data = pdata;
1815
1816         return 0;
1817 }
1818
1819 static int pxa3xx_nand_probe(struct platform_device *pdev)
1820 {
1821         struct pxa3xx_nand_platform_data *pdata;
1822         struct mtd_part_parser_data ppdata = {};
1823         struct pxa3xx_nand_info *info;
1824         int ret, cs, probe_success;
1825
1826 #ifndef ARCH_HAS_DMA
1827         if (use_dma) {
1828                 use_dma = 0;
1829                 dev_warn(&pdev->dev,
1830                          "This platform can't do DMA on this device\n");
1831         }
1832 #endif
1833         ret = pxa3xx_nand_probe_dt(pdev);
1834         if (ret)
1835                 return ret;
1836
1837         pdata = dev_get_platdata(&pdev->dev);
1838         if (!pdata) {
1839                 dev_err(&pdev->dev, "no platform data defined\n");
1840                 return -ENODEV;
1841         }
1842
1843         ret = alloc_nand_resource(pdev);
1844         if (ret) {
1845                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1846                 return ret;
1847         }
1848
1849         info = platform_get_drvdata(pdev);
1850         probe_success = 0;
1851         for (cs = 0; cs < pdata->num_cs; cs++) {
1852                 struct mtd_info *mtd = info->host[cs]->mtd;
1853
1854                 /*
1855                  * The mtd name matches the one used in 'mtdparts' kernel
1856                  * parameter. This name cannot be changed or otherwise
1857                  * user's mtd partitions configuration would get broken.
1858                  */
1859                 mtd->name = "pxa3xx_nand-0";
1860                 info->cs = cs;
1861                 ret = pxa3xx_nand_scan(mtd);
1862                 if (ret) {
1863                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1864                                 cs);
1865                         continue;
1866                 }
1867
1868                 ppdata.of_node = pdev->dev.of_node;
1869                 ret = mtd_device_parse_register(mtd, NULL,
1870                                                 &ppdata, pdata->parts[cs],
1871                                                 pdata->nr_parts[cs]);
1872                 if (!ret)
1873                         probe_success = 1;
1874         }
1875
1876         if (!probe_success) {
1877                 pxa3xx_nand_remove(pdev);
1878                 return -ENODEV;
1879         }
1880
1881         return 0;
1882 }
1883
1884 #ifdef CONFIG_PM
1885 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1886 {
1887         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1888         struct pxa3xx_nand_platform_data *pdata;
1889         struct mtd_info *mtd;
1890         int cs;
1891
1892         pdata = dev_get_platdata(&pdev->dev);
1893         if (info->state) {
1894                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1895                 return -EAGAIN;
1896         }
1897
1898         for (cs = 0; cs < pdata->num_cs; cs++) {
1899                 mtd = info->host[cs]->mtd;
1900                 mtd_suspend(mtd);
1901         }
1902
1903         return 0;
1904 }
1905
1906 static int pxa3xx_nand_resume(struct platform_device *pdev)
1907 {
1908         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1909         struct pxa3xx_nand_platform_data *pdata;
1910         struct mtd_info *mtd;
1911         int cs;
1912
1913         pdata = dev_get_platdata(&pdev->dev);
1914         /* We don't want to handle interrupt without calling mtd routine */
1915         disable_int(info, NDCR_INT_MASK);
1916
1917         /*
1918          * Directly set the chip select to a invalid value,
1919          * then the driver would reset the timing according
1920          * to current chip select at the beginning of cmdfunc
1921          */
1922         info->cs = 0xff;
1923
1924         /*
1925          * As the spec says, the NDSR would be updated to 0x1800 when
1926          * doing the nand_clk disable/enable.
1927          * To prevent it damaging state machine of the driver, clear
1928          * all status before resume
1929          */
1930         nand_writel(info, NDSR, NDSR_MASK);
1931         for (cs = 0; cs < pdata->num_cs; cs++) {
1932                 mtd = info->host[cs]->mtd;
1933                 mtd_resume(mtd);
1934         }
1935
1936         return 0;
1937 }
1938 #else
1939 #define pxa3xx_nand_suspend     NULL
1940 #define pxa3xx_nand_resume      NULL
1941 #endif
1942
1943 static struct platform_driver pxa3xx_nand_driver = {
1944         .driver = {
1945                 .name   = "pxa3xx-nand",
1946                 .of_match_table = pxa3xx_nand_dt_ids,
1947         },
1948         .probe          = pxa3xx_nand_probe,
1949         .remove         = pxa3xx_nand_remove,
1950         .suspend        = pxa3xx_nand_suspend,
1951         .resume         = pxa3xx_nand_resume,
1952 };
1953
1954 module_platform_driver(pxa3xx_nand_driver);
1955
1956 MODULE_LICENSE("GPL");
1957 MODULE_DESCRIPTION("PXA3xx NAND controller driver");