]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/nand/pxa3xx_nand.c
da563cdd59f5c5a6236ad9f6726da5a32925db10
[karo-tx-linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
35 #define ARCH_HAS_DMA
36 #endif
37
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
39
40 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE         (2048)
43
44 /*
45  * Define a buffer size for the initial command that detects the flash device:
46  * STATUS, READID and PARAM.
47  * ONFI param page is 256 bytes, and there are three redundant copies
48  * to be read. JEDEC param page is 512 bytes, and there are also three
49  * redundant copies to be read.
50  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
51  */
52 #define INIT_BUFFER_SIZE        2048
53
54 /* registers and bit definitions */
55 #define NDCR            (0x00) /* Control register */
56 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR            (0x14) /* Status Register */
59 #define NDPCR           (0x18) /* Page Count Register */
60 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1          (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL       (0x28) /* ECC control */
63 #define NDDB            (0x40) /* Data Buffer */
64 #define NDCB0           (0x48) /* Command Buffer0 */
65 #define NDCB1           (0x4C) /* Command Buffer1 */
66 #define NDCB2           (0x50) /* Command Buffer2 */
67
68 #define NDCR_SPARE_EN           (0x1 << 31)
69 #define NDCR_ECC_EN             (0x1 << 30)
70 #define NDCR_DMA_EN             (0x1 << 29)
71 #define NDCR_ND_RUN             (0x1 << 28)
72 #define NDCR_DWIDTH_C           (0x1 << 27)
73 #define NDCR_DWIDTH_M           (0x1 << 26)
74 #define NDCR_PAGE_SZ            (0x1 << 24)
75 #define NDCR_NCSX               (0x1 << 23)
76 #define NDCR_ND_MODE            (0x3 << 21)
77 #define NDCR_NAND_MODE          (0x0)
78 #define NDCR_CLR_PG_CNT         (0x1 << 20)
79 #define NDCR_STOP_ON_UNCOR      (0x1 << 19)
80 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
81 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
82
83 #define NDCR_RA_START           (0x1 << 15)
84 #define NDCR_PG_PER_BLK         (0x1 << 14)
85 #define NDCR_ND_ARB_EN          (0x1 << 12)
86 #define NDCR_INT_MASK           (0xFFF)
87
88 #define NDSR_MASK               (0xfff)
89 #define NDSR_ERR_CNT_OFF        (16)
90 #define NDSR_ERR_CNT_MASK       (0x1f)
91 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
92 #define NDSR_RDY                (0x1 << 12)
93 #define NDSR_FLASH_RDY          (0x1 << 11)
94 #define NDSR_CS0_PAGED          (0x1 << 10)
95 #define NDSR_CS1_PAGED          (0x1 << 9)
96 #define NDSR_CS0_CMDD           (0x1 << 8)
97 #define NDSR_CS1_CMDD           (0x1 << 7)
98 #define NDSR_CS0_BBD            (0x1 << 6)
99 #define NDSR_CS1_BBD            (0x1 << 5)
100 #define NDSR_UNCORERR           (0x1 << 4)
101 #define NDSR_CORERR             (0x1 << 3)
102 #define NDSR_WRDREQ             (0x1 << 2)
103 #define NDSR_RDDREQ             (0x1 << 1)
104 #define NDSR_WRCMDREQ           (0x1)
105
106 #define NDCB0_LEN_OVRD          (0x1 << 28)
107 #define NDCB0_ST_ROW_EN         (0x1 << 26)
108 #define NDCB0_AUTO_RS           (0x1 << 25)
109 #define NDCB0_CSEL              (0x1 << 24)
110 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
111 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
112 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
113 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
114 #define NDCB0_NC                (0x1 << 20)
115 #define NDCB0_DBC               (0x1 << 19)
116 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
117 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
118 #define NDCB0_CMD2_MASK         (0xff << 8)
119 #define NDCB0_CMD1_MASK         (0xff)
120 #define NDCB0_ADDR_CYC_SHIFT    (16)
121
122 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
123 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
124 #define EXT_CMD_TYPE_READ       4 /* Read */
125 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
126 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
127 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
128 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
129
130 /*
131  * This should be large enough to read 'ONFI' and 'JEDEC'.
132  * Let's use 7 bytes, which is the maximum ID count supported
133  * by the controller (see NDCR_RD_ID_CNT_MASK).
134  */
135 #define READ_ID_BYTES           7
136
137 /* macros for registers read/write */
138 #define nand_writel(info, off, val)     \
139         writel_relaxed((val), (info)->mmio_base + (off))
140
141 #define nand_readl(info, off)           \
142         readl_relaxed((info)->mmio_base + (off))
143
144 /* error code and state */
145 enum {
146         ERR_NONE        = 0,
147         ERR_DMABUSERR   = -1,
148         ERR_SENDCMD     = -2,
149         ERR_UNCORERR    = -3,
150         ERR_BBERR       = -4,
151         ERR_CORERR      = -5,
152 };
153
154 enum {
155         STATE_IDLE = 0,
156         STATE_PREPARED,
157         STATE_CMD_HANDLE,
158         STATE_DMA_READING,
159         STATE_DMA_WRITING,
160         STATE_DMA_DONE,
161         STATE_PIO_READING,
162         STATE_PIO_WRITING,
163         STATE_CMD_DONE,
164         STATE_READY,
165 };
166
167 enum pxa3xx_nand_variant {
168         PXA3XX_NAND_VARIANT_PXA,
169         PXA3XX_NAND_VARIANT_ARMADA370,
170 };
171
172 struct pxa3xx_nand_host {
173         struct nand_chip        chip;
174         struct mtd_info         *mtd;
175         void                    *info_data;
176
177         /* page size of attached chip */
178         int                     use_ecc;
179         int                     cs;
180
181         /* calculated from pxa3xx_nand_flash data */
182         unsigned int            col_addr_cycles;
183         unsigned int            row_addr_cycles;
184 };
185
186 struct pxa3xx_nand_info {
187         struct nand_hw_control  controller;
188         struct platform_device   *pdev;
189
190         struct clk              *clk;
191         void __iomem            *mmio_base;
192         unsigned long           mmio_phys;
193         struct completion       cmd_complete, dev_ready;
194
195         unsigned int            buf_start;
196         unsigned int            buf_count;
197         unsigned int            buf_size;
198         unsigned int            data_buff_pos;
199         unsigned int            oob_buff_pos;
200
201         /* DMA information */
202         struct scatterlist      sg;
203         enum dma_data_direction dma_dir;
204         struct dma_chan         *dma_chan;
205         dma_cookie_t            dma_cookie;
206         int                     drcmr_dat;
207         int                     drcmr_cmd;
208
209         unsigned char           *data_buff;
210         unsigned char           *oob_buff;
211         dma_addr_t              data_buff_phys;
212         int                     data_dma_ch;
213
214         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
215         unsigned int            state;
216
217         /*
218          * This driver supports NFCv1 (as found in PXA SoC)
219          * and NFCv2 (as found in Armada 370/XP SoC).
220          */
221         enum pxa3xx_nand_variant variant;
222
223         int                     cs;
224         int                     use_ecc;        /* use HW ECC ? */
225         int                     ecc_bch;        /* using BCH ECC? */
226         int                     use_dma;        /* use DMA ? */
227         int                     use_spare;      /* use spare ? */
228         int                     need_wait;
229
230         unsigned int            data_size;      /* data to be read from FIFO */
231         unsigned int            chunk_size;     /* split commands chunk size */
232         unsigned int            oob_size;
233         unsigned int            spare_size;
234         unsigned int            ecc_size;
235         unsigned int            ecc_err_cnt;
236         unsigned int            max_bitflips;
237         int                     retcode;
238
239         /* cached register value */
240         uint32_t                reg_ndcr;
241         uint32_t                ndtr0cs0;
242         uint32_t                ndtr1cs0;
243
244         /* generated NDCBx register values */
245         uint32_t                ndcb0;
246         uint32_t                ndcb1;
247         uint32_t                ndcb2;
248         uint32_t                ndcb3;
249 };
250
251 static bool use_dma = 1;
252 module_param(use_dma, bool, 0444);
253 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
254
255 struct pxa3xx_nand_timing {
256         unsigned int    tCH;  /* Enable signal hold time */
257         unsigned int    tCS;  /* Enable signal setup time */
258         unsigned int    tWH;  /* ND_nWE high duration */
259         unsigned int    tWP;  /* ND_nWE pulse time */
260         unsigned int    tRH;  /* ND_nRE high duration */
261         unsigned int    tRP;  /* ND_nRE pulse width */
262         unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
263         unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
264         unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
265 };
266
267 struct pxa3xx_nand_flash {
268         char            *name;
269         uint32_t        chip_id;
270         unsigned int    page_per_block; /* Pages per block (PG_PER_BLK) */
271         unsigned int    page_size;      /* Page size in bytes (PAGE_SZ) */
272         unsigned int    flash_width;    /* Width of Flash memory (DWIDTH_M) */
273         unsigned int    dfc_width;      /* Width of flash controller(DWIDTH_C) */
274         unsigned int    num_blocks;     /* Number of physical blocks in Flash */
275
276         struct pxa3xx_nand_timing *timing;      /* NAND Flash timing */
277 };
278
279 static struct pxa3xx_nand_timing timing[] = {
280         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
281         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
282         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
283         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
284 };
285
286 static struct pxa3xx_nand_flash builtin_flash_types[] = {
287 { "DEFAULT FLASH",      0,   0, 2048,  8,  8,    0, &timing[0] },
288 { "64MiB 16-bit",  0x46ec,  32,  512, 16, 16, 4096, &timing[1] },
289 { "256MiB 8-bit",  0xdaec,  64, 2048,  8,  8, 2048, &timing[1] },
290 { "4GiB 8-bit",    0xd7ec, 128, 4096,  8,  8, 8192, &timing[1] },
291 { "128MiB 8-bit",  0xa12c,  64, 2048,  8,  8, 1024, &timing[2] },
292 { "128MiB 16-bit", 0xb12c,  64, 2048, 16, 16, 1024, &timing[2] },
293 { "512MiB 8-bit",  0xdc2c,  64, 2048,  8,  8, 4096, &timing[2] },
294 { "512MiB 16-bit", 0xcc2c,  64, 2048, 16, 16, 4096, &timing[2] },
295 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
296 };
297
298 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
299 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
300
301 static struct nand_bbt_descr bbt_main_descr = {
302         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
303                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
304         .offs = 8,
305         .len = 6,
306         .veroffs = 14,
307         .maxblocks = 8,         /* Last 8 blocks in each chip */
308         .pattern = bbt_pattern
309 };
310
311 static struct nand_bbt_descr bbt_mirror_descr = {
312         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
314         .offs = 8,
315         .len = 6,
316         .veroffs = 14,
317         .maxblocks = 8,         /* Last 8 blocks in each chip */
318         .pattern = bbt_mirror_pattern
319 };
320
321 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
322         .eccbytes = 32,
323         .eccpos = {
324                 32, 33, 34, 35, 36, 37, 38, 39,
325                 40, 41, 42, 43, 44, 45, 46, 47,
326                 48, 49, 50, 51, 52, 53, 54, 55,
327                 56, 57, 58, 59, 60, 61, 62, 63},
328         .oobfree = { {2, 30} }
329 };
330
331 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
332         .eccbytes = 64,
333         .eccpos = {
334                 32,  33,  34,  35,  36,  37,  38,  39,
335                 40,  41,  42,  43,  44,  45,  46,  47,
336                 48,  49,  50,  51,  52,  53,  54,  55,
337                 56,  57,  58,  59,  60,  61,  62,  63,
338                 96,  97,  98,  99,  100, 101, 102, 103,
339                 104, 105, 106, 107, 108, 109, 110, 111,
340                 112, 113, 114, 115, 116, 117, 118, 119,
341                 120, 121, 122, 123, 124, 125, 126, 127},
342         /* Bootrom looks in bytes 0 & 5 for bad blocks */
343         .oobfree = { {6, 26}, { 64, 32} }
344 };
345
346 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
347         .eccbytes = 128,
348         .eccpos = {
349                 32,  33,  34,  35,  36,  37,  38,  39,
350                 40,  41,  42,  43,  44,  45,  46,  47,
351                 48,  49,  50,  51,  52,  53,  54,  55,
352                 56,  57,  58,  59,  60,  61,  62,  63},
353         .oobfree = { }
354 };
355
356 /* Define a default flash type setting serve as flash detecting only */
357 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
358
359 #define NDTR0_tCH(c)    (min((c), 7) << 19)
360 #define NDTR0_tCS(c)    (min((c), 7) << 16)
361 #define NDTR0_tWH(c)    (min((c), 7) << 11)
362 #define NDTR0_tWP(c)    (min((c), 7) << 8)
363 #define NDTR0_tRH(c)    (min((c), 7) << 3)
364 #define NDTR0_tRP(c)    (min((c), 7) << 0)
365
366 #define NDTR1_tR(c)     (min((c), 65535) << 16)
367 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
368 #define NDTR1_tAR(c)    (min((c), 15) << 0)
369
370 /* convert nano-seconds to nand flash controller clock cycles */
371 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
372
373 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
374         {
375                 .compatible = "marvell,pxa3xx-nand",
376                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
377         },
378         {
379                 .compatible = "marvell,armada370-nand",
380                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
381         },
382         {}
383 };
384 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
385
386 static enum pxa3xx_nand_variant
387 pxa3xx_nand_get_variant(struct platform_device *pdev)
388 {
389         const struct of_device_id *of_id =
390                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
391         if (!of_id)
392                 return PXA3XX_NAND_VARIANT_PXA;
393         return (enum pxa3xx_nand_variant)of_id->data;
394 }
395
396 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
397                                    const struct pxa3xx_nand_timing *t)
398 {
399         struct pxa3xx_nand_info *info = host->info_data;
400         unsigned long nand_clk = clk_get_rate(info->clk);
401         uint32_t ndtr0, ndtr1;
402
403         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
404                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
405                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
406                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
407                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
408                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
409
410         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
411                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
412                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
413
414         info->ndtr0cs0 = ndtr0;
415         info->ndtr1cs0 = ndtr1;
416         nand_writel(info, NDTR0CS0, ndtr0);
417         nand_writel(info, NDTR1CS0, ndtr1);
418 }
419
420 /*
421  * Set the data and OOB size, depending on the selected
422  * spare and ECC configuration.
423  * Only applicable to READ0, READOOB and PAGEPROG commands.
424  */
425 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
426                                 struct mtd_info *mtd)
427 {
428         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
429
430         info->data_size = mtd->writesize;
431         if (!oob_enable)
432                 return;
433
434         info->oob_size = info->spare_size;
435         if (!info->use_ecc)
436                 info->oob_size += info->ecc_size;
437 }
438
439 /**
440  * NOTE: it is a must to set ND_RUN firstly, then write
441  * command buffer, otherwise, it does not work.
442  * We enable all the interrupt at the same time, and
443  * let pxa3xx_nand_irq to handle all logic.
444  */
445 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
446 {
447         uint32_t ndcr;
448
449         ndcr = info->reg_ndcr;
450
451         if (info->use_ecc) {
452                 ndcr |= NDCR_ECC_EN;
453                 if (info->ecc_bch)
454                         nand_writel(info, NDECCCTRL, 0x1);
455         } else {
456                 ndcr &= ~NDCR_ECC_EN;
457                 if (info->ecc_bch)
458                         nand_writel(info, NDECCCTRL, 0x0);
459         }
460
461         if (info->use_dma)
462                 ndcr |= NDCR_DMA_EN;
463         else
464                 ndcr &= ~NDCR_DMA_EN;
465
466         if (info->use_spare)
467                 ndcr |= NDCR_SPARE_EN;
468         else
469                 ndcr &= ~NDCR_SPARE_EN;
470
471         ndcr |= NDCR_ND_RUN;
472
473         /* clear status bits and run */
474         nand_writel(info, NDSR, NDSR_MASK);
475         nand_writel(info, NDCR, 0);
476         nand_writel(info, NDCR, ndcr);
477 }
478
479 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
480 {
481         uint32_t ndcr;
482         int timeout = NAND_STOP_DELAY;
483
484         /* wait RUN bit in NDCR become 0 */
485         ndcr = nand_readl(info, NDCR);
486         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
487                 ndcr = nand_readl(info, NDCR);
488                 udelay(1);
489         }
490
491         if (timeout <= 0) {
492                 ndcr &= ~NDCR_ND_RUN;
493                 nand_writel(info, NDCR, ndcr);
494         }
495         if (info->dma_chan)
496                 dmaengine_terminate_all(info->dma_chan);
497
498         /* clear status bits */
499         nand_writel(info, NDSR, NDSR_MASK);
500 }
501
502 static void __maybe_unused
503 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
504 {
505         uint32_t ndcr;
506
507         ndcr = nand_readl(info, NDCR);
508         nand_writel(info, NDCR, ndcr & ~int_mask);
509 }
510
511 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
512 {
513         uint32_t ndcr;
514
515         ndcr = nand_readl(info, NDCR);
516         nand_writel(info, NDCR, ndcr | int_mask);
517 }
518
519 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
520 {
521         if (info->ecc_bch) {
522                 u32 val;
523                 int ret;
524
525                 /*
526                  * According to the datasheet, when reading from NDDB
527                  * with BCH enabled, after each 32 bytes reads, we
528                  * have to make sure that the NDSR.RDDREQ bit is set.
529                  *
530                  * Drain the FIFO 8 32 bits reads at a time, and skip
531                  * the polling on the last read.
532                  */
533                 while (len > 8) {
534                         readsl(info->mmio_base + NDDB, data, 8);
535
536                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
537                                                          val & NDSR_RDDREQ, 1000, 5000);
538                         if (ret) {
539                                 dev_err(&info->pdev->dev,
540                                         "Timeout on RDDREQ while draining the FIFO\n");
541                                 return;
542                         }
543
544                         data += 32;
545                         len -= 8;
546                 }
547         }
548
549         readsl(info->mmio_base + NDDB, data, len);
550 }
551
552 static void handle_data_pio(struct pxa3xx_nand_info *info)
553 {
554         unsigned int do_bytes = min(info->data_size, info->chunk_size);
555
556         switch (info->state) {
557         case STATE_PIO_WRITING:
558                 writesl(info->mmio_base + NDDB,
559                         info->data_buff + info->data_buff_pos,
560                         DIV_ROUND_UP(do_bytes, 4));
561
562                 if (info->oob_size > 0)
563                         writesl(info->mmio_base + NDDB,
564                                 info->oob_buff + info->oob_buff_pos,
565                                 DIV_ROUND_UP(info->oob_size, 4));
566                 break;
567         case STATE_PIO_READING:
568                 drain_fifo(info,
569                            info->data_buff + info->data_buff_pos,
570                            DIV_ROUND_UP(do_bytes, 4));
571
572                 if (info->oob_size > 0)
573                         drain_fifo(info,
574                                    info->oob_buff + info->oob_buff_pos,
575                                    DIV_ROUND_UP(info->oob_size, 4));
576                 break;
577         default:
578                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
579                                 info->state);
580                 BUG();
581         }
582
583         /* Update buffer pointers for multi-page read/write */
584         info->data_buff_pos += do_bytes;
585         info->oob_buff_pos += info->oob_size;
586         info->data_size -= do_bytes;
587 }
588
589 static void pxa3xx_nand_data_dma_irq(void *data)
590 {
591         struct pxa3xx_nand_info *info = data;
592         struct dma_tx_state state;
593         enum dma_status status;
594
595         status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
596         if (likely(status == DMA_COMPLETE)) {
597                 info->state = STATE_DMA_DONE;
598         } else {
599                 dev_err(&info->pdev->dev, "DMA error on data channel\n");
600                 info->retcode = ERR_DMABUSERR;
601         }
602         dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
603
604         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
605         enable_int(info, NDCR_INT_MASK);
606 }
607
608 static void start_data_dma(struct pxa3xx_nand_info *info)
609 {
610         enum dma_transfer_direction direction;
611         struct dma_async_tx_descriptor *tx;
612
613         switch (info->state) {
614         case STATE_DMA_WRITING:
615                 info->dma_dir = DMA_TO_DEVICE;
616                 direction = DMA_MEM_TO_DEV;
617                 break;
618         case STATE_DMA_READING:
619                 info->dma_dir = DMA_FROM_DEVICE;
620                 direction = DMA_DEV_TO_MEM;
621                 break;
622         default:
623                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
624                                 info->state);
625                 BUG();
626         }
627         info->sg.length = info->data_size +
628                 (info->oob_size ? info->spare_size + info->ecc_size : 0);
629         dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
630
631         tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
632                                      DMA_PREP_INTERRUPT);
633         if (!tx) {
634                 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
635                 return;
636         }
637         tx->callback = pxa3xx_nand_data_dma_irq;
638         tx->callback_param = info;
639         info->dma_cookie = dmaengine_submit(tx);
640         dma_async_issue_pending(info->dma_chan);
641         dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
642                 __func__, direction, info->dma_cookie, info->sg.length);
643 }
644
645 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
646 {
647         struct pxa3xx_nand_info *info = data;
648
649         handle_data_pio(info);
650
651         info->state = STATE_CMD_DONE;
652         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
653
654         return IRQ_HANDLED;
655 }
656
657 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
658 {
659         struct pxa3xx_nand_info *info = devid;
660         unsigned int status, is_completed = 0, is_ready = 0;
661         unsigned int ready, cmd_done;
662         irqreturn_t ret = IRQ_HANDLED;
663
664         if (info->cs == 0) {
665                 ready           = NDSR_FLASH_RDY;
666                 cmd_done        = NDSR_CS0_CMDD;
667         } else {
668                 ready           = NDSR_RDY;
669                 cmd_done        = NDSR_CS1_CMDD;
670         }
671
672         status = nand_readl(info, NDSR);
673
674         if (status & NDSR_UNCORERR)
675                 info->retcode = ERR_UNCORERR;
676         if (status & NDSR_CORERR) {
677                 info->retcode = ERR_CORERR;
678                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
679                     info->ecc_bch)
680                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
681                 else
682                         info->ecc_err_cnt = 1;
683
684                 /*
685                  * Each chunk composing a page is corrected independently,
686                  * and we need to store maximum number of corrected bitflips
687                  * to return it to the MTD layer in ecc.read_page().
688                  */
689                 info->max_bitflips = max_t(unsigned int,
690                                            info->max_bitflips,
691                                            info->ecc_err_cnt);
692         }
693         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
694                 /* whether use dma to transfer data */
695                 if (info->use_dma) {
696                         disable_int(info, NDCR_INT_MASK);
697                         info->state = (status & NDSR_RDDREQ) ?
698                                       STATE_DMA_READING : STATE_DMA_WRITING;
699                         start_data_dma(info);
700                         goto NORMAL_IRQ_EXIT;
701                 } else {
702                         info->state = (status & NDSR_RDDREQ) ?
703                                       STATE_PIO_READING : STATE_PIO_WRITING;
704                         ret = IRQ_WAKE_THREAD;
705                         goto NORMAL_IRQ_EXIT;
706                 }
707         }
708         if (status & cmd_done) {
709                 info->state = STATE_CMD_DONE;
710                 is_completed = 1;
711         }
712         if (status & ready) {
713                 info->state = STATE_READY;
714                 is_ready = 1;
715         }
716
717         /*
718          * Clear all status bit before issuing the next command, which
719          * can and will alter the status bits and will deserve a new
720          * interrupt on its own. This lets the controller exit the IRQ
721          */
722         nand_writel(info, NDSR, status);
723
724         if (status & NDSR_WRCMDREQ) {
725                 status &= ~NDSR_WRCMDREQ;
726                 info->state = STATE_CMD_HANDLE;
727
728                 /*
729                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
730                  * must be loaded by writing directly either 12 or 16
731                  * bytes directly to NDCB0, four bytes at a time.
732                  *
733                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
734                  * but each NDCBx register can be read.
735                  */
736                 nand_writel(info, NDCB0, info->ndcb0);
737                 nand_writel(info, NDCB0, info->ndcb1);
738                 nand_writel(info, NDCB0, info->ndcb2);
739
740                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
741                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
742                         nand_writel(info, NDCB0, info->ndcb3);
743         }
744
745         if (is_completed)
746                 complete(&info->cmd_complete);
747         if (is_ready)
748                 complete(&info->dev_ready);
749 NORMAL_IRQ_EXIT:
750         return ret;
751 }
752
753 static inline int is_buf_blank(uint8_t *buf, size_t len)
754 {
755         for (; len > 0; len--)
756                 if (*buf++ != 0xff)
757                         return 0;
758         return 1;
759 }
760
761 static void set_command_address(struct pxa3xx_nand_info *info,
762                 unsigned int page_size, uint16_t column, int page_addr)
763 {
764         /* small page addr setting */
765         if (page_size < PAGE_CHUNK_SIZE) {
766                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
767                                 | (column & 0xFF);
768
769                 info->ndcb2 = 0;
770         } else {
771                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
772                                 | (column & 0xFFFF);
773
774                 if (page_addr & 0xFF0000)
775                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
776                 else
777                         info->ndcb2 = 0;
778         }
779 }
780
781 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
782 {
783         struct pxa3xx_nand_host *host = info->host[info->cs];
784         struct mtd_info *mtd = host->mtd;
785
786         /* reset data and oob column point to handle data */
787         info->buf_start         = 0;
788         info->buf_count         = 0;
789         info->oob_size          = 0;
790         info->data_buff_pos     = 0;
791         info->oob_buff_pos      = 0;
792         info->use_ecc           = 0;
793         info->use_spare         = 1;
794         info->retcode           = ERR_NONE;
795         info->ecc_err_cnt       = 0;
796         info->ndcb3             = 0;
797         info->need_wait         = 0;
798
799         switch (command) {
800         case NAND_CMD_READ0:
801         case NAND_CMD_PAGEPROG:
802                 info->use_ecc = 1;
803         case NAND_CMD_READOOB:
804                 pxa3xx_set_datasize(info, mtd);
805                 break;
806         case NAND_CMD_PARAM:
807                 info->use_spare = 0;
808                 break;
809         default:
810                 info->ndcb1 = 0;
811                 info->ndcb2 = 0;
812                 break;
813         }
814
815         /*
816          * If we are about to issue a read command, or about to set
817          * the write address, then clean the data buffer.
818          */
819         if (command == NAND_CMD_READ0 ||
820             command == NAND_CMD_READOOB ||
821             command == NAND_CMD_SEQIN) {
822
823                 info->buf_count = mtd->writesize + mtd->oobsize;
824                 memset(info->data_buff, 0xFF, info->buf_count);
825         }
826
827 }
828
829 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
830                 int ext_cmd_type, uint16_t column, int page_addr)
831 {
832         int addr_cycle, exec_cmd;
833         struct pxa3xx_nand_host *host;
834         struct mtd_info *mtd;
835
836         host = info->host[info->cs];
837         mtd = host->mtd;
838         addr_cycle = 0;
839         exec_cmd = 1;
840
841         if (info->cs != 0)
842                 info->ndcb0 = NDCB0_CSEL;
843         else
844                 info->ndcb0 = 0;
845
846         if (command == NAND_CMD_SEQIN)
847                 exec_cmd = 0;
848
849         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
850                                     + host->col_addr_cycles);
851
852         switch (command) {
853         case NAND_CMD_READOOB:
854         case NAND_CMD_READ0:
855                 info->buf_start = column;
856                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
857                                 | addr_cycle
858                                 | NAND_CMD_READ0;
859
860                 if (command == NAND_CMD_READOOB)
861                         info->buf_start += mtd->writesize;
862
863                 /*
864                  * Multiple page read needs an 'extended command type' field,
865                  * which is either naked-read or last-read according to the
866                  * state.
867                  */
868                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
869                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
870                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
871                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
872                                         | NDCB0_LEN_OVRD
873                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
874                         info->ndcb3 = info->chunk_size +
875                                       info->oob_size;
876                 }
877
878                 set_command_address(info, mtd->writesize, column, page_addr);
879                 break;
880
881         case NAND_CMD_SEQIN:
882
883                 info->buf_start = column;
884                 set_command_address(info, mtd->writesize, 0, page_addr);
885
886                 /*
887                  * Multiple page programming needs to execute the initial
888                  * SEQIN command that sets the page address.
889                  */
890                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
891                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
892                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
893                                 | addr_cycle
894                                 | command;
895                         /* No data transfer in this case */
896                         info->data_size = 0;
897                         exec_cmd = 1;
898                 }
899                 break;
900
901         case NAND_CMD_PAGEPROG:
902                 if (is_buf_blank(info->data_buff,
903                                         (mtd->writesize + mtd->oobsize))) {
904                         exec_cmd = 0;
905                         break;
906                 }
907
908                 /* Second command setting for large pages */
909                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
910                         /*
911                          * Multiple page write uses the 'extended command'
912                          * field. This can be used to issue a command dispatch
913                          * or a naked-write depending on the current stage.
914                          */
915                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
916                                         | NDCB0_LEN_OVRD
917                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
918                         info->ndcb3 = info->chunk_size +
919                                       info->oob_size;
920
921                         /*
922                          * This is the command dispatch that completes a chunked
923                          * page program operation.
924                          */
925                         if (info->data_size == 0) {
926                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
927                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
928                                         | command;
929                                 info->ndcb1 = 0;
930                                 info->ndcb2 = 0;
931                                 info->ndcb3 = 0;
932                         }
933                 } else {
934                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
935                                         | NDCB0_AUTO_RS
936                                         | NDCB0_ST_ROW_EN
937                                         | NDCB0_DBC
938                                         | (NAND_CMD_PAGEPROG << 8)
939                                         | NAND_CMD_SEQIN
940                                         | addr_cycle;
941                 }
942                 break;
943
944         case NAND_CMD_PARAM:
945                 info->buf_count = INIT_BUFFER_SIZE;
946                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
947                                 | NDCB0_ADDR_CYC(1)
948                                 | NDCB0_LEN_OVRD
949                                 | command;
950                 info->ndcb1 = (column & 0xFF);
951                 info->ndcb3 = INIT_BUFFER_SIZE;
952                 info->data_size = INIT_BUFFER_SIZE;
953                 break;
954
955         case NAND_CMD_READID:
956                 info->buf_count = READ_ID_BYTES;
957                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
958                                 | NDCB0_ADDR_CYC(1)
959                                 | command;
960                 info->ndcb1 = (column & 0xFF);
961
962                 info->data_size = 8;
963                 break;
964         case NAND_CMD_STATUS:
965                 info->buf_count = 1;
966                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
967                                 | NDCB0_ADDR_CYC(1)
968                                 | command;
969
970                 info->data_size = 8;
971                 break;
972
973         case NAND_CMD_ERASE1:
974                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
975                                 | NDCB0_AUTO_RS
976                                 | NDCB0_ADDR_CYC(3)
977                                 | NDCB0_DBC
978                                 | (NAND_CMD_ERASE2 << 8)
979                                 | NAND_CMD_ERASE1;
980                 info->ndcb1 = page_addr;
981                 info->ndcb2 = 0;
982
983                 break;
984         case NAND_CMD_RESET:
985                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
986                                 | command;
987
988                 break;
989
990         case NAND_CMD_ERASE2:
991                 exec_cmd = 0;
992                 break;
993
994         default:
995                 exec_cmd = 0;
996                 dev_err(&info->pdev->dev, "non-supported command %x\n",
997                                 command);
998                 break;
999         }
1000
1001         return exec_cmd;
1002 }
1003
1004 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1005                          int column, int page_addr)
1006 {
1007         struct pxa3xx_nand_host *host = mtd->priv;
1008         struct pxa3xx_nand_info *info = host->info_data;
1009         int exec_cmd;
1010
1011         /*
1012          * if this is a x16 device ,then convert the input
1013          * "byte" address into a "word" address appropriate
1014          * for indexing a word-oriented device
1015          */
1016         if (info->reg_ndcr & NDCR_DWIDTH_M)
1017                 column /= 2;
1018
1019         /*
1020          * There may be different NAND chip hooked to
1021          * different chip select, so check whether
1022          * chip select has been changed, if yes, reset the timing
1023          */
1024         if (info->cs != host->cs) {
1025                 info->cs = host->cs;
1026                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1027                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1028         }
1029
1030         prepare_start_command(info, command);
1031
1032         info->state = STATE_PREPARED;
1033         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1034
1035         if (exec_cmd) {
1036                 init_completion(&info->cmd_complete);
1037                 init_completion(&info->dev_ready);
1038                 info->need_wait = 1;
1039                 pxa3xx_nand_start(info);
1040
1041                 if (!wait_for_completion_timeout(&info->cmd_complete,
1042                     CHIP_DELAY_TIMEOUT)) {
1043                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1044                         /* Stop State Machine for next command cycle */
1045                         pxa3xx_nand_stop(info);
1046                 }
1047         }
1048         info->state = STATE_IDLE;
1049 }
1050
1051 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1052                                   const unsigned command,
1053                                   int column, int page_addr)
1054 {
1055         struct pxa3xx_nand_host *host = mtd->priv;
1056         struct pxa3xx_nand_info *info = host->info_data;
1057         int exec_cmd, ext_cmd_type;
1058
1059         /*
1060          * if this is a x16 device then convert the input
1061          * "byte" address into a "word" address appropriate
1062          * for indexing a word-oriented device
1063          */
1064         if (info->reg_ndcr & NDCR_DWIDTH_M)
1065                 column /= 2;
1066
1067         /*
1068          * There may be different NAND chip hooked to
1069          * different chip select, so check whether
1070          * chip select has been changed, if yes, reset the timing
1071          */
1072         if (info->cs != host->cs) {
1073                 info->cs = host->cs;
1074                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1075                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1076         }
1077
1078         /* Select the extended command for the first command */
1079         switch (command) {
1080         case NAND_CMD_READ0:
1081         case NAND_CMD_READOOB:
1082                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1083                 break;
1084         case NAND_CMD_SEQIN:
1085                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1086                 break;
1087         case NAND_CMD_PAGEPROG:
1088                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1089                 break;
1090         default:
1091                 ext_cmd_type = 0;
1092                 break;
1093         }
1094
1095         prepare_start_command(info, command);
1096
1097         /*
1098          * Prepare the "is ready" completion before starting a command
1099          * transaction sequence. If the command is not executed the
1100          * completion will be completed, see below.
1101          *
1102          * We can do that inside the loop because the command variable
1103          * is invariant and thus so is the exec_cmd.
1104          */
1105         info->need_wait = 1;
1106         init_completion(&info->dev_ready);
1107         do {
1108                 info->state = STATE_PREPARED;
1109                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1110                                                column, page_addr);
1111                 if (!exec_cmd) {
1112                         info->need_wait = 0;
1113                         complete(&info->dev_ready);
1114                         break;
1115                 }
1116
1117                 init_completion(&info->cmd_complete);
1118                 pxa3xx_nand_start(info);
1119
1120                 if (!wait_for_completion_timeout(&info->cmd_complete,
1121                     CHIP_DELAY_TIMEOUT)) {
1122                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1123                         /* Stop State Machine for next command cycle */
1124                         pxa3xx_nand_stop(info);
1125                         break;
1126                 }
1127
1128                 /* Check if the sequence is complete */
1129                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1130                         break;
1131
1132                 /*
1133                  * After a splitted program command sequence has issued
1134                  * the command dispatch, the command sequence is complete.
1135                  */
1136                 if (info->data_size == 0 &&
1137                     command == NAND_CMD_PAGEPROG &&
1138                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1139                         break;
1140
1141                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1142                         /* Last read: issue a 'last naked read' */
1143                         if (info->data_size == info->chunk_size)
1144                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1145                         else
1146                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1147
1148                 /*
1149                  * If a splitted program command has no more data to transfer,
1150                  * the command dispatch must be issued to complete.
1151                  */
1152                 } else if (command == NAND_CMD_PAGEPROG &&
1153                            info->data_size == 0) {
1154                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1155                 }
1156         } while (1);
1157
1158         info->state = STATE_IDLE;
1159 }
1160
1161 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1162                 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1163 {
1164         chip->write_buf(mtd, buf, mtd->writesize);
1165         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1166
1167         return 0;
1168 }
1169
1170 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1171                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1172                 int page)
1173 {
1174         struct pxa3xx_nand_host *host = mtd->priv;
1175         struct pxa3xx_nand_info *info = host->info_data;
1176
1177         chip->read_buf(mtd, buf, mtd->writesize);
1178         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1179
1180         if (info->retcode == ERR_CORERR && info->use_ecc) {
1181                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1182
1183         } else if (info->retcode == ERR_UNCORERR) {
1184                 /*
1185                  * for blank page (all 0xff), HW will calculate its ECC as
1186                  * 0, which is different from the ECC information within
1187                  * OOB, ignore such uncorrectable errors
1188                  */
1189                 if (is_buf_blank(buf, mtd->writesize))
1190                         info->retcode = ERR_NONE;
1191                 else
1192                         mtd->ecc_stats.failed++;
1193         }
1194
1195         return info->max_bitflips;
1196 }
1197
1198 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1199 {
1200         struct pxa3xx_nand_host *host = mtd->priv;
1201         struct pxa3xx_nand_info *info = host->info_data;
1202         char retval = 0xFF;
1203
1204         if (info->buf_start < info->buf_count)
1205                 /* Has just send a new command? */
1206                 retval = info->data_buff[info->buf_start++];
1207
1208         return retval;
1209 }
1210
1211 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1212 {
1213         struct pxa3xx_nand_host *host = mtd->priv;
1214         struct pxa3xx_nand_info *info = host->info_data;
1215         u16 retval = 0xFFFF;
1216
1217         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1218                 retval = *((u16 *)(info->data_buff+info->buf_start));
1219                 info->buf_start += 2;
1220         }
1221         return retval;
1222 }
1223
1224 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1225 {
1226         struct pxa3xx_nand_host *host = mtd->priv;
1227         struct pxa3xx_nand_info *info = host->info_data;
1228         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1229
1230         memcpy(buf, info->data_buff + info->buf_start, real_len);
1231         info->buf_start += real_len;
1232 }
1233
1234 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1235                 const uint8_t *buf, int len)
1236 {
1237         struct pxa3xx_nand_host *host = mtd->priv;
1238         struct pxa3xx_nand_info *info = host->info_data;
1239         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1240
1241         memcpy(info->data_buff + info->buf_start, buf, real_len);
1242         info->buf_start += real_len;
1243 }
1244
1245 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1246 {
1247         return;
1248 }
1249
1250 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1251 {
1252         struct pxa3xx_nand_host *host = mtd->priv;
1253         struct pxa3xx_nand_info *info = host->info_data;
1254
1255         if (info->need_wait) {
1256                 info->need_wait = 0;
1257                 if (!wait_for_completion_timeout(&info->dev_ready,
1258                     CHIP_DELAY_TIMEOUT)) {
1259                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1260                         return NAND_STATUS_FAIL;
1261                 }
1262         }
1263
1264         /* pxa3xx_nand_send_command has waited for command complete */
1265         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1266                 if (info->retcode == ERR_NONE)
1267                         return 0;
1268                 else
1269                         return NAND_STATUS_FAIL;
1270         }
1271
1272         return NAND_STATUS_READY;
1273 }
1274
1275 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1276                                     const struct pxa3xx_nand_flash *f)
1277 {
1278         struct platform_device *pdev = info->pdev;
1279         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1280         struct pxa3xx_nand_host *host = info->host[info->cs];
1281         uint32_t ndcr = 0x0; /* enable all interrupts */
1282
1283         if (f->page_size != 2048 && f->page_size != 512) {
1284                 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1285                 return -EINVAL;
1286         }
1287
1288         if (f->flash_width != 16 && f->flash_width != 8) {
1289                 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1290                 return -EINVAL;
1291         }
1292
1293         /* calculate addressing information */
1294         host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1295
1296         if (f->num_blocks * f->page_per_block > 65536)
1297                 host->row_addr_cycles = 3;
1298         else
1299                 host->row_addr_cycles = 2;
1300
1301         ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1302         ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1303         ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1304         ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1305         ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1306         ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1307
1308         ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1309         ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1310
1311         info->reg_ndcr = ndcr;
1312
1313         pxa3xx_nand_set_timing(host, f->timing);
1314         return 0;
1315 }
1316
1317 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1318 {
1319         uint32_t ndcr = nand_readl(info, NDCR);
1320
1321         /* Set an initial chunk size */
1322         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1323         info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1324         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1325         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1326         return 0;
1327 }
1328
1329 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1330 {
1331         struct platform_device *pdev = info->pdev;
1332         struct dma_slave_config config;
1333         dma_cap_mask_t mask;
1334         struct pxad_param param;
1335         int ret;
1336
1337         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1338         if (info->data_buff == NULL)
1339                 return -ENOMEM;
1340         if (use_dma == 0)
1341                 return 0;
1342
1343         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1344         if (ret)
1345                 return ret;
1346
1347         sg_init_one(&info->sg, info->data_buff, info->buf_size);
1348         dma_cap_zero(mask);
1349         dma_cap_set(DMA_SLAVE, mask);
1350         param.prio = PXAD_PRIO_LOWEST;
1351         param.drcmr = info->drcmr_dat;
1352         info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1353                                                           &param, &pdev->dev,
1354                                                           "data");
1355         if (!info->dma_chan) {
1356                 dev_err(&pdev->dev, "unable to request data dma channel\n");
1357                 return -ENODEV;
1358         }
1359
1360         memset(&config, 0, sizeof(config));
1361         config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1362         config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1363         config.src_addr = info->mmio_phys + NDDB;
1364         config.dst_addr = info->mmio_phys + NDDB;
1365         config.src_maxburst = 32;
1366         config.dst_maxburst = 32;
1367         ret = dmaengine_slave_config(info->dma_chan, &config);
1368         if (ret < 0) {
1369                 dev_err(&info->pdev->dev,
1370                         "dma channel configuration failed: %d\n",
1371                         ret);
1372                 return ret;
1373         }
1374
1375         /*
1376          * Now that DMA buffers are allocated we turn on
1377          * DMA proper for I/O operations.
1378          */
1379         info->use_dma = 1;
1380         return 0;
1381 }
1382
1383 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1384 {
1385         if (info->use_dma) {
1386                 dmaengine_terminate_all(info->dma_chan);
1387                 dma_release_channel(info->dma_chan);
1388         }
1389         kfree(info->data_buff);
1390 }
1391
1392 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1393 {
1394         struct mtd_info *mtd;
1395         struct nand_chip *chip;
1396         int ret;
1397
1398         mtd = info->host[info->cs]->mtd;
1399         chip = mtd->priv;
1400
1401         /* use the common timing to make a try */
1402         ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1403         if (ret)
1404                 return ret;
1405
1406         chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1407         ret = chip->waitfunc(mtd, chip);
1408         if (ret & NAND_STATUS_FAIL)
1409                 return -ENODEV;
1410
1411         return 0;
1412 }
1413
1414 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1415                         struct nand_ecc_ctrl *ecc,
1416                         int strength, int ecc_stepsize, int page_size)
1417 {
1418         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1419                 info->chunk_size = 2048;
1420                 info->spare_size = 40;
1421                 info->ecc_size = 24;
1422                 ecc->mode = NAND_ECC_HW;
1423                 ecc->size = 512;
1424                 ecc->strength = 1;
1425
1426         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1427                 info->chunk_size = 512;
1428                 info->spare_size = 8;
1429                 info->ecc_size = 8;
1430                 ecc->mode = NAND_ECC_HW;
1431                 ecc->size = 512;
1432                 ecc->strength = 1;
1433
1434         /*
1435          * Required ECC: 4-bit correction per 512 bytes
1436          * Select: 16-bit correction per 2048 bytes
1437          */
1438         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1439                 info->ecc_bch = 1;
1440                 info->chunk_size = 2048;
1441                 info->spare_size = 32;
1442                 info->ecc_size = 32;
1443                 ecc->mode = NAND_ECC_HW;
1444                 ecc->size = info->chunk_size;
1445                 ecc->layout = &ecc_layout_2KB_bch4bit;
1446                 ecc->strength = 16;
1447
1448         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1449                 info->ecc_bch = 1;
1450                 info->chunk_size = 2048;
1451                 info->spare_size = 32;
1452                 info->ecc_size = 32;
1453                 ecc->mode = NAND_ECC_HW;
1454                 ecc->size = info->chunk_size;
1455                 ecc->layout = &ecc_layout_4KB_bch4bit;
1456                 ecc->strength = 16;
1457
1458         /*
1459          * Required ECC: 8-bit correction per 512 bytes
1460          * Select: 16-bit correction per 1024 bytes
1461          */
1462         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1463                 info->ecc_bch = 1;
1464                 info->chunk_size = 1024;
1465                 info->spare_size = 0;
1466                 info->ecc_size = 32;
1467                 ecc->mode = NAND_ECC_HW;
1468                 ecc->size = info->chunk_size;
1469                 ecc->layout = &ecc_layout_4KB_bch8bit;
1470                 ecc->strength = 16;
1471         } else {
1472                 dev_err(&info->pdev->dev,
1473                         "ECC strength %d at page size %d is not supported\n",
1474                         strength, page_size);
1475                 return -ENODEV;
1476         }
1477
1478         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1479                  ecc->strength, ecc->size);
1480         return 0;
1481 }
1482
1483 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1484 {
1485         struct pxa3xx_nand_host *host = mtd->priv;
1486         struct pxa3xx_nand_info *info = host->info_data;
1487         struct platform_device *pdev = info->pdev;
1488         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1489         struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1490         const struct pxa3xx_nand_flash *f = NULL;
1491         struct nand_chip *chip = mtd->priv;
1492         uint32_t id = -1;
1493         uint64_t chipsize;
1494         int i, ret, num;
1495         uint16_t ecc_strength, ecc_step;
1496
1497         if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1498                 goto KEEP_CONFIG;
1499
1500         /* Set a default chunk size */
1501         info->chunk_size = 512;
1502
1503         ret = pxa3xx_nand_sensing(info);
1504         if (ret) {
1505                 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1506                          info->cs);
1507
1508                 return ret;
1509         }
1510
1511         chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1512         id = *((uint16_t *)(info->data_buff));
1513         if (id != 0)
1514                 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1515         else {
1516                 dev_warn(&info->pdev->dev,
1517                          "Read out ID 0, potential timing set wrong!!\n");
1518
1519                 return -EINVAL;
1520         }
1521
1522         num = ARRAY_SIZE(builtin_flash_types) - 1;
1523         for (i = 0; i < num; i++) {
1524                 f = &builtin_flash_types[i + 1];
1525
1526                 /* find the chip in default list */
1527                 if (f->chip_id == id)
1528                         break;
1529         }
1530
1531         if (i >= (ARRAY_SIZE(builtin_flash_types) - 1)) {
1532                 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1533
1534                 return -EINVAL;
1535         }
1536
1537         ret = pxa3xx_nand_config_flash(info, f);
1538         if (ret) {
1539                 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1540                 return ret;
1541         }
1542
1543         memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1544
1545         pxa3xx_flash_ids[0].name = f->name;
1546         pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1547         pxa3xx_flash_ids[0].pagesize = f->page_size;
1548         chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1549         pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1550         pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1551         if (f->flash_width == 16)
1552                 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1553         pxa3xx_flash_ids[1].name = NULL;
1554         def = pxa3xx_flash_ids;
1555 KEEP_CONFIG:
1556         if (info->reg_ndcr & NDCR_DWIDTH_M)
1557                 chip->options |= NAND_BUSWIDTH_16;
1558
1559         /* Device detection must be done with ECC disabled */
1560         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1561                 nand_writel(info, NDECCCTRL, 0x0);
1562
1563         if (nand_scan_ident(mtd, 1, def))
1564                 return -ENODEV;
1565
1566         if (pdata->flash_bbt) {
1567                 /*
1568                  * We'll use a bad block table stored in-flash and don't
1569                  * allow writing the bad block marker to the flash.
1570                  */
1571                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1572                                      NAND_BBT_NO_OOB_BBM;
1573                 chip->bbt_td = &bbt_main_descr;
1574                 chip->bbt_md = &bbt_mirror_descr;
1575         }
1576
1577         /*
1578          * If the page size is bigger than the FIFO size, let's check
1579          * we are given the right variant and then switch to the extended
1580          * (aka splitted) command handling,
1581          */
1582         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1583                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1584                         chip->cmdfunc = nand_cmdfunc_extended;
1585                 } else {
1586                         dev_err(&info->pdev->dev,
1587                                 "unsupported page size on this variant\n");
1588                         return -ENODEV;
1589                 }
1590         }
1591
1592         if (pdata->ecc_strength && pdata->ecc_step_size) {
1593                 ecc_strength = pdata->ecc_strength;
1594                 ecc_step = pdata->ecc_step_size;
1595         } else {
1596                 ecc_strength = chip->ecc_strength_ds;
1597                 ecc_step = chip->ecc_step_ds;
1598         }
1599
1600         /* Set default ECC strength requirements on non-ONFI devices */
1601         if (ecc_strength < 1 && ecc_step < 1) {
1602                 ecc_strength = 1;
1603                 ecc_step = 512;
1604         }
1605
1606         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1607                            ecc_step, mtd->writesize);
1608         if (ret)
1609                 return ret;
1610
1611         /* calculate addressing information */
1612         if (mtd->writesize >= 2048)
1613                 host->col_addr_cycles = 2;
1614         else
1615                 host->col_addr_cycles = 1;
1616
1617         /* release the initial buffer */
1618         kfree(info->data_buff);
1619
1620         /* allocate the real data + oob buffer */
1621         info->buf_size = mtd->writesize + mtd->oobsize;
1622         ret = pxa3xx_nand_init_buff(info);
1623         if (ret)
1624                 return ret;
1625         info->oob_buff = info->data_buff + mtd->writesize;
1626
1627         if ((mtd->size >> chip->page_shift) > 65536)
1628                 host->row_addr_cycles = 3;
1629         else
1630                 host->row_addr_cycles = 2;
1631         return nand_scan_tail(mtd);
1632 }
1633
1634 static int alloc_nand_resource(struct platform_device *pdev)
1635 {
1636         struct pxa3xx_nand_platform_data *pdata;
1637         struct pxa3xx_nand_info *info;
1638         struct pxa3xx_nand_host *host;
1639         struct nand_chip *chip = NULL;
1640         struct mtd_info *mtd;
1641         struct resource *r;
1642         int ret, irq, cs;
1643
1644         pdata = dev_get_platdata(&pdev->dev);
1645         if (pdata->num_cs <= 0)
1646                 return -ENODEV;
1647         info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1648                             sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1649         if (!info)
1650                 return -ENOMEM;
1651
1652         info->pdev = pdev;
1653         info->variant = pxa3xx_nand_get_variant(pdev);
1654         for (cs = 0; cs < pdata->num_cs; cs++) {
1655                 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1656                 chip = (struct nand_chip *)(&mtd[1]);
1657                 host = (struct pxa3xx_nand_host *)chip;
1658                 info->host[cs] = host;
1659                 host->mtd = mtd;
1660                 host->cs = cs;
1661                 host->info_data = info;
1662                 mtd->priv = host;
1663                 mtd->owner = THIS_MODULE;
1664
1665                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1666                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1667                 chip->controller        = &info->controller;
1668                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1669                 chip->select_chip       = pxa3xx_nand_select_chip;
1670                 chip->read_word         = pxa3xx_nand_read_word;
1671                 chip->read_byte         = pxa3xx_nand_read_byte;
1672                 chip->read_buf          = pxa3xx_nand_read_buf;
1673                 chip->write_buf         = pxa3xx_nand_write_buf;
1674                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1675                 chip->cmdfunc           = nand_cmdfunc;
1676         }
1677
1678         spin_lock_init(&chip->controller->lock);
1679         init_waitqueue_head(&chip->controller->wq);
1680         info->clk = devm_clk_get(&pdev->dev, NULL);
1681         if (IS_ERR(info->clk)) {
1682                 dev_err(&pdev->dev, "failed to get nand clock\n");
1683                 return PTR_ERR(info->clk);
1684         }
1685         ret = clk_prepare_enable(info->clk);
1686         if (ret < 0)
1687                 return ret;
1688
1689         if (use_dma) {
1690                 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1691                 if (r == NULL) {
1692                         dev_err(&pdev->dev,
1693                                 "no resource defined for data DMA\n");
1694                         ret = -ENXIO;
1695                         goto fail_disable_clk;
1696                 }
1697                 info->drcmr_dat = r->start;
1698
1699                 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1700                 if (r == NULL) {
1701                         dev_err(&pdev->dev,
1702                                 "no resource defined for cmd DMA\n");
1703                         ret = -ENXIO;
1704                         goto fail_disable_clk;
1705                 }
1706                 info->drcmr_cmd = r->start;
1707         }
1708
1709         irq = platform_get_irq(pdev, 0);
1710         if (irq < 0) {
1711                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1712                 ret = -ENXIO;
1713                 goto fail_disable_clk;
1714         }
1715
1716         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1717         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1718         if (IS_ERR(info->mmio_base)) {
1719                 ret = PTR_ERR(info->mmio_base);
1720                 goto fail_disable_clk;
1721         }
1722         info->mmio_phys = r->start;
1723
1724         /* Allocate a buffer to allow flash detection */
1725         info->buf_size = INIT_BUFFER_SIZE;
1726         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1727         if (info->data_buff == NULL) {
1728                 ret = -ENOMEM;
1729                 goto fail_disable_clk;
1730         }
1731
1732         /* initialize all interrupts to be disabled */
1733         disable_int(info, NDSR_MASK);
1734
1735         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1736                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1737                                    pdev->name, info);
1738         if (ret < 0) {
1739                 dev_err(&pdev->dev, "failed to request IRQ\n");
1740                 goto fail_free_buf;
1741         }
1742
1743         platform_set_drvdata(pdev, info);
1744
1745         return 0;
1746
1747 fail_free_buf:
1748         free_irq(irq, info);
1749         kfree(info->data_buff);
1750 fail_disable_clk:
1751         clk_disable_unprepare(info->clk);
1752         return ret;
1753 }
1754
1755 static int pxa3xx_nand_remove(struct platform_device *pdev)
1756 {
1757         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1758         struct pxa3xx_nand_platform_data *pdata;
1759         int irq, cs;
1760
1761         if (!info)
1762                 return 0;
1763
1764         pdata = dev_get_platdata(&pdev->dev);
1765
1766         irq = platform_get_irq(pdev, 0);
1767         if (irq >= 0)
1768                 free_irq(irq, info);
1769         pxa3xx_nand_free_buff(info);
1770
1771         clk_disable_unprepare(info->clk);
1772
1773         for (cs = 0; cs < pdata->num_cs; cs++)
1774                 nand_release(info->host[cs]->mtd);
1775         return 0;
1776 }
1777
1778 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1779 {
1780         struct pxa3xx_nand_platform_data *pdata;
1781         struct device_node *np = pdev->dev.of_node;
1782         const struct of_device_id *of_id =
1783                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1784
1785         if (!of_id)
1786                 return 0;
1787
1788         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1789         if (!pdata)
1790                 return -ENOMEM;
1791
1792         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1793                 pdata->enable_arbiter = 1;
1794         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1795                 pdata->keep_config = 1;
1796         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1797         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1798
1799         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1800         if (pdata->ecc_strength < 0)
1801                 pdata->ecc_strength = 0;
1802
1803         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1804         if (pdata->ecc_step_size < 0)
1805                 pdata->ecc_step_size = 0;
1806
1807         pdev->dev.platform_data = pdata;
1808
1809         return 0;
1810 }
1811
1812 static int pxa3xx_nand_probe(struct platform_device *pdev)
1813 {
1814         struct pxa3xx_nand_platform_data *pdata;
1815         struct mtd_part_parser_data ppdata = {};
1816         struct pxa3xx_nand_info *info;
1817         int ret, cs, probe_success, dma_available;
1818
1819         dma_available = IS_ENABLED(CONFIG_ARM) &&
1820                 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1821         if (use_dma && !dma_available) {
1822                 use_dma = 0;
1823                 dev_warn(&pdev->dev,
1824                          "This platform can't do DMA on this device\n");
1825         }
1826
1827         ret = pxa3xx_nand_probe_dt(pdev);
1828         if (ret)
1829                 return ret;
1830
1831         pdata = dev_get_platdata(&pdev->dev);
1832         if (!pdata) {
1833                 dev_err(&pdev->dev, "no platform data defined\n");
1834                 return -ENODEV;
1835         }
1836
1837         ret = alloc_nand_resource(pdev);
1838         if (ret) {
1839                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1840                 return ret;
1841         }
1842
1843         info = platform_get_drvdata(pdev);
1844         probe_success = 0;
1845         for (cs = 0; cs < pdata->num_cs; cs++) {
1846                 struct mtd_info *mtd = info->host[cs]->mtd;
1847
1848                 /*
1849                  * The mtd name matches the one used in 'mtdparts' kernel
1850                  * parameter. This name cannot be changed or otherwise
1851                  * user's mtd partitions configuration would get broken.
1852                  */
1853                 mtd->name = "pxa3xx_nand-0";
1854                 info->cs = cs;
1855                 ret = pxa3xx_nand_scan(mtd);
1856                 if (ret) {
1857                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1858                                 cs);
1859                         continue;
1860                 }
1861
1862                 ppdata.of_node = pdev->dev.of_node;
1863                 ret = mtd_device_parse_register(mtd, NULL,
1864                                                 &ppdata, pdata->parts[cs],
1865                                                 pdata->nr_parts[cs]);
1866                 if (!ret)
1867                         probe_success = 1;
1868         }
1869
1870         if (!probe_success) {
1871                 pxa3xx_nand_remove(pdev);
1872                 return -ENODEV;
1873         }
1874
1875         return 0;
1876 }
1877
1878 #ifdef CONFIG_PM
1879 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1880 {
1881         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1882         struct pxa3xx_nand_platform_data *pdata;
1883         struct mtd_info *mtd;
1884         int cs;
1885
1886         pdata = dev_get_platdata(&pdev->dev);
1887         if (info->state) {
1888                 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1889                 return -EAGAIN;
1890         }
1891
1892         for (cs = 0; cs < pdata->num_cs; cs++) {
1893                 mtd = info->host[cs]->mtd;
1894                 mtd_suspend(mtd);
1895         }
1896
1897         return 0;
1898 }
1899
1900 static int pxa3xx_nand_resume(struct platform_device *pdev)
1901 {
1902         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1903         struct pxa3xx_nand_platform_data *pdata;
1904         struct mtd_info *mtd;
1905         int cs;
1906
1907         pdata = dev_get_platdata(&pdev->dev);
1908         /* We don't want to handle interrupt without calling mtd routine */
1909         disable_int(info, NDCR_INT_MASK);
1910
1911         /*
1912          * Directly set the chip select to a invalid value,
1913          * then the driver would reset the timing according
1914          * to current chip select at the beginning of cmdfunc
1915          */
1916         info->cs = 0xff;
1917
1918         /*
1919          * As the spec says, the NDSR would be updated to 0x1800 when
1920          * doing the nand_clk disable/enable.
1921          * To prevent it damaging state machine of the driver, clear
1922          * all status before resume
1923          */
1924         nand_writel(info, NDSR, NDSR_MASK);
1925         for (cs = 0; cs < pdata->num_cs; cs++) {
1926                 mtd = info->host[cs]->mtd;
1927                 mtd_resume(mtd);
1928         }
1929
1930         return 0;
1931 }
1932 #else
1933 #define pxa3xx_nand_suspend     NULL
1934 #define pxa3xx_nand_resume      NULL
1935 #endif
1936
1937 static struct platform_driver pxa3xx_nand_driver = {
1938         .driver = {
1939                 .name   = "pxa3xx-nand",
1940                 .of_match_table = pxa3xx_nand_dt_ids,
1941         },
1942         .probe          = pxa3xx_nand_probe,
1943         .remove         = pxa3xx_nand_remove,
1944         .suspend        = pxa3xx_nand_suspend,
1945         .resume         = pxa3xx_nand_resume,
1946 };
1947
1948 module_platform_driver(pxa3xx_nand_driver);
1949
1950 MODULE_LICENSE("GPL");
1951 MODULE_DESCRIPTION("PXA3xx NAND controller driver");