]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/nand/pxa3xx_nand.c
Merge remote-tracking branch 'edac-amd/for-next'
[karo-tx-linux.git] / drivers / mtd / nand / pxa3xx_nand.c
1 /*
2  * drivers/mtd/nand/pxa3xx_nand.c
3  *
4  * Copyright © 2005 Intel Corporation
5  * Copyright © 2006 Marvell International Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33 #include <linux/platform_data/mtd-nand-pxa3xx.h>
34
35 #define CHIP_DELAY_TIMEOUT      msecs_to_jiffies(200)
36 #define NAND_STOP_DELAY         msecs_to_jiffies(40)
37 #define PAGE_CHUNK_SIZE         (2048)
38
39 /*
40  * Define a buffer size for the initial command that detects the flash device:
41  * STATUS, READID and PARAM.
42  * ONFI param page is 256 bytes, and there are three redundant copies
43  * to be read. JEDEC param page is 512 bytes, and there are also three
44  * redundant copies to be read.
45  * Hence this buffer should be at least 512 x 3. Let's pick 2048.
46  */
47 #define INIT_BUFFER_SIZE        2048
48
49 /* registers and bit definitions */
50 #define NDCR            (0x00) /* Control register */
51 #define NDTR0CS0        (0x04) /* Timing Parameter 0 for CS0 */
52 #define NDTR1CS0        (0x0C) /* Timing Parameter 1 for CS0 */
53 #define NDSR            (0x14) /* Status Register */
54 #define NDPCR           (0x18) /* Page Count Register */
55 #define NDBDR0          (0x1C) /* Bad Block Register 0 */
56 #define NDBDR1          (0x20) /* Bad Block Register 1 */
57 #define NDECCCTRL       (0x28) /* ECC control */
58 #define NDDB            (0x40) /* Data Buffer */
59 #define NDCB0           (0x48) /* Command Buffer0 */
60 #define NDCB1           (0x4C) /* Command Buffer1 */
61 #define NDCB2           (0x50) /* Command Buffer2 */
62
63 #define NDCR_SPARE_EN           (0x1 << 31)
64 #define NDCR_ECC_EN             (0x1 << 30)
65 #define NDCR_DMA_EN             (0x1 << 29)
66 #define NDCR_ND_RUN             (0x1 << 28)
67 #define NDCR_DWIDTH_C           (0x1 << 27)
68 #define NDCR_DWIDTH_M           (0x1 << 26)
69 #define NDCR_PAGE_SZ            (0x1 << 24)
70 #define NDCR_NCSX               (0x1 << 23)
71 #define NDCR_ND_MODE            (0x3 << 21)
72 #define NDCR_NAND_MODE          (0x0)
73 #define NDCR_CLR_PG_CNT         (0x1 << 20)
74 #define NFCV1_NDCR_ARB_CNTL     (0x1 << 19)
75 #define NFCV2_NDCR_STOP_ON_UNCOR        (0x1 << 19)
76 #define NDCR_RD_ID_CNT_MASK     (0x7 << 16)
77 #define NDCR_RD_ID_CNT(x)       (((x) << 16) & NDCR_RD_ID_CNT_MASK)
78
79 #define NDCR_RA_START           (0x1 << 15)
80 #define NDCR_PG_PER_BLK         (0x1 << 14)
81 #define NDCR_ND_ARB_EN          (0x1 << 12)
82 #define NDCR_INT_MASK           (0xFFF)
83
84 #define NDSR_MASK               (0xfff)
85 #define NDSR_ERR_CNT_OFF        (16)
86 #define NDSR_ERR_CNT_MASK       (0x1f)
87 #define NDSR_ERR_CNT(sr)        ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
88 #define NDSR_RDY                (0x1 << 12)
89 #define NDSR_FLASH_RDY          (0x1 << 11)
90 #define NDSR_CS0_PAGED          (0x1 << 10)
91 #define NDSR_CS1_PAGED          (0x1 << 9)
92 #define NDSR_CS0_CMDD           (0x1 << 8)
93 #define NDSR_CS1_CMDD           (0x1 << 7)
94 #define NDSR_CS0_BBD            (0x1 << 6)
95 #define NDSR_CS1_BBD            (0x1 << 5)
96 #define NDSR_UNCORERR           (0x1 << 4)
97 #define NDSR_CORERR             (0x1 << 3)
98 #define NDSR_WRDREQ             (0x1 << 2)
99 #define NDSR_RDDREQ             (0x1 << 1)
100 #define NDSR_WRCMDREQ           (0x1)
101
102 #define NDCB0_LEN_OVRD          (0x1 << 28)
103 #define NDCB0_ST_ROW_EN         (0x1 << 26)
104 #define NDCB0_AUTO_RS           (0x1 << 25)
105 #define NDCB0_CSEL              (0x1 << 24)
106 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107 #define NDCB0_EXT_CMD_TYPE(x)   (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
108 #define NDCB0_CMD_TYPE_MASK     (0x7 << 21)
109 #define NDCB0_CMD_TYPE(x)       (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110 #define NDCB0_NC                (0x1 << 20)
111 #define NDCB0_DBC               (0x1 << 19)
112 #define NDCB0_ADDR_CYC_MASK     (0x7 << 16)
113 #define NDCB0_ADDR_CYC(x)       (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114 #define NDCB0_CMD2_MASK         (0xff << 8)
115 #define NDCB0_CMD1_MASK         (0xff)
116 #define NDCB0_ADDR_CYC_SHIFT    (16)
117
118 #define EXT_CMD_TYPE_DISPATCH   6 /* Command dispatch */
119 #define EXT_CMD_TYPE_NAKED_RW   5 /* Naked read or Naked write */
120 #define EXT_CMD_TYPE_READ       4 /* Read */
121 #define EXT_CMD_TYPE_DISP_WR    4 /* Command dispatch with write */
122 #define EXT_CMD_TYPE_FINAL      3 /* Final command */
123 #define EXT_CMD_TYPE_LAST_RW    1 /* Last naked read/write */
124 #define EXT_CMD_TYPE_MONO       0 /* Monolithic read/write */
125
126 /*
127  * This should be large enough to read 'ONFI' and 'JEDEC'.
128  * Let's use 7 bytes, which is the maximum ID count supported
129  * by the controller (see NDCR_RD_ID_CNT_MASK).
130  */
131 #define READ_ID_BYTES           7
132
133 /* macros for registers read/write */
134 #define nand_writel(info, off, val)                                     \
135         do {                                                            \
136                 dev_vdbg(&info->pdev->dev,                              \
137                          "%s():%d nand_writel(0x%x, 0x%04x)\n",         \
138                          __func__, __LINE__, (val), (off));             \
139                 writel_relaxed((val), (info)->mmio_base + (off));       \
140         } while (0)
141
142 #define nand_readl(info, off)                                           \
143         ({                                                              \
144                 unsigned int _v;                                        \
145                 _v = readl_relaxed((info)->mmio_base + (off));          \
146                 dev_vdbg(&info->pdev->dev,                              \
147                          "%s():%d nand_readl(0x%04x) = 0x%x\n",         \
148                          __func__, __LINE__, (off), _v);                \
149                 _v;                                                     \
150         })
151
152 /* error code and state */
153 enum {
154         ERR_NONE        = 0,
155         ERR_DMABUSERR   = -1,
156         ERR_SENDCMD     = -2,
157         ERR_UNCORERR    = -3,
158         ERR_BBERR       = -4,
159         ERR_CORERR      = -5,
160 };
161
162 enum {
163         STATE_IDLE = 0,
164         STATE_PREPARED,
165         STATE_CMD_HANDLE,
166         STATE_DMA_READING,
167         STATE_DMA_WRITING,
168         STATE_DMA_DONE,
169         STATE_PIO_READING,
170         STATE_PIO_WRITING,
171         STATE_CMD_DONE,
172         STATE_READY,
173 };
174
175 enum pxa3xx_nand_variant {
176         PXA3XX_NAND_VARIANT_PXA,
177         PXA3XX_NAND_VARIANT_ARMADA370,
178 };
179
180 struct pxa3xx_nand_host {
181         struct nand_chip        chip;
182         void                    *info_data;
183
184         /* page size of attached chip */
185         int                     use_ecc;
186         int                     cs;
187
188         /* calculated from pxa3xx_nand_flash data */
189         unsigned int            col_addr_cycles;
190         unsigned int            row_addr_cycles;
191 };
192
193 struct pxa3xx_nand_info {
194         struct nand_hw_control  controller;
195         struct platform_device   *pdev;
196
197         struct clk              *clk;
198         void __iomem            *mmio_base;
199         unsigned long           mmio_phys;
200         struct completion       cmd_complete, dev_ready;
201
202         unsigned int            buf_start;
203         unsigned int            buf_count;
204         unsigned int            buf_size;
205         unsigned int            data_buff_pos;
206         unsigned int            oob_buff_pos;
207
208         /* DMA information */
209         struct scatterlist      sg;
210         enum dma_data_direction dma_dir;
211         struct dma_chan         *dma_chan;
212         dma_cookie_t            dma_cookie;
213         int                     drcmr_dat;
214         int                     drcmr_cmd;
215
216         unsigned char           *data_buff;
217         unsigned char           *oob_buff;
218         dma_addr_t              data_buff_phys;
219         int                     data_dma_ch;
220
221         struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
222         unsigned int            state;
223
224         /*
225          * This driver supports NFCv1 (as found in PXA SoC)
226          * and NFCv2 (as found in Armada 370/XP SoC).
227          */
228         enum pxa3xx_nand_variant variant;
229
230         int                     cs;
231         int                     use_ecc;        /* use HW ECC ? */
232         int                     ecc_bch;        /* using BCH ECC? */
233         int                     use_dma;        /* use DMA ? */
234         int                     use_spare;      /* use spare ? */
235         int                     need_wait;
236
237         unsigned int            data_size;      /* data to be read from FIFO */
238         unsigned int            chunk_size;     /* split commands chunk size */
239         unsigned int            oob_size;
240         unsigned int            spare_size;
241         unsigned int            ecc_size;
242         unsigned int            ecc_err_cnt;
243         unsigned int            max_bitflips;
244         int                     retcode;
245
246         /* cached register value */
247         uint32_t                reg_ndcr;
248         uint32_t                ndtr0cs0;
249         uint32_t                ndtr1cs0;
250
251         /* generated NDCBx register values */
252         uint32_t                ndcb0;
253         uint32_t                ndcb1;
254         uint32_t                ndcb2;
255         uint32_t                ndcb3;
256 };
257
258 static bool use_dma = 1;
259 module_param(use_dma, bool, 0444);
260 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
261
262 struct pxa3xx_nand_timing {
263         unsigned int    tCH;  /* Enable signal hold time */
264         unsigned int    tCS;  /* Enable signal setup time */
265         unsigned int    tWH;  /* ND_nWE high duration */
266         unsigned int    tWP;  /* ND_nWE pulse time */
267         unsigned int    tRH;  /* ND_nRE high duration */
268         unsigned int    tRP;  /* ND_nRE pulse width */
269         unsigned int    tR;   /* ND_nWE high to ND_nRE low for read */
270         unsigned int    tWHR; /* ND_nWE high to ND_nRE low for status read */
271         unsigned int    tAR;  /* ND_ALE low to ND_nRE low delay */
272 };
273
274 struct pxa3xx_nand_flash {
275         uint32_t        chip_id;
276         unsigned int    flash_width;    /* Width of Flash memory (DWIDTH_M) */
277         unsigned int    dfc_width;      /* Width of flash controller(DWIDTH_C) */
278         struct pxa3xx_nand_timing *timing;      /* NAND Flash timing */
279 };
280
281 static struct pxa3xx_nand_timing timing[] = {
282         { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
283         { 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
284         { 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
285         { 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
286 };
287
288 static struct pxa3xx_nand_flash builtin_flash_types[] = {
289         { 0x46ec, 16, 16, &timing[1] },
290         { 0xdaec,  8,  8, &timing[1] },
291         { 0xd7ec,  8,  8, &timing[1] },
292         { 0xa12c,  8,  8, &timing[2] },
293         { 0xb12c, 16, 16, &timing[2] },
294         { 0xdc2c,  8,  8, &timing[2] },
295         { 0xcc2c, 16, 16, &timing[2] },
296         { 0xba20, 16, 16, &timing[3] },
297 };
298
299 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301
302 static struct nand_bbt_descr bbt_main_descr = {
303         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305         .offs = 8,
306         .len = 6,
307         .veroffs = 14,
308         .maxblocks = 8,         /* Last 8 blocks in each chip */
309         .pattern = bbt_pattern
310 };
311
312 static struct nand_bbt_descr bbt_mirror_descr = {
313         .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314                 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315         .offs = 8,
316         .len = 6,
317         .veroffs = 14,
318         .maxblocks = 8,         /* Last 8 blocks in each chip */
319         .pattern = bbt_mirror_pattern
320 };
321
322 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
323         .eccbytes = 32,
324         .eccpos = {
325                 32, 33, 34, 35, 36, 37, 38, 39,
326                 40, 41, 42, 43, 44, 45, 46, 47,
327                 48, 49, 50, 51, 52, 53, 54, 55,
328                 56, 57, 58, 59, 60, 61, 62, 63},
329         .oobfree = { {2, 30} }
330 };
331
332 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
333         .eccbytes = 64,
334         .eccpos = {
335                 32,  33,  34,  35,  36,  37,  38,  39,
336                 40,  41,  42,  43,  44,  45,  46,  47,
337                 48,  49,  50,  51,  52,  53,  54,  55,
338                 56,  57,  58,  59,  60,  61,  62,  63,
339                 96,  97,  98,  99,  100, 101, 102, 103,
340                 104, 105, 106, 107, 108, 109, 110, 111,
341                 112, 113, 114, 115, 116, 117, 118, 119,
342                 120, 121, 122, 123, 124, 125, 126, 127},
343         /* Bootrom looks in bytes 0 & 5 for bad blocks */
344         .oobfree = { {6, 26}, { 64, 32} }
345 };
346
347 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
348         .eccbytes = 128,
349         .eccpos = {
350                 32,  33,  34,  35,  36,  37,  38,  39,
351                 40,  41,  42,  43,  44,  45,  46,  47,
352                 48,  49,  50,  51,  52,  53,  54,  55,
353                 56,  57,  58,  59,  60,  61,  62,  63},
354         .oobfree = { }
355 };
356
357 #define NDTR0_tCH(c)    (min((c), 7) << 19)
358 #define NDTR0_tCS(c)    (min((c), 7) << 16)
359 #define NDTR0_tWH(c)    (min((c), 7) << 11)
360 #define NDTR0_tWP(c)    (min((c), 7) << 8)
361 #define NDTR0_tRH(c)    (min((c), 7) << 3)
362 #define NDTR0_tRP(c)    (min((c), 7) << 0)
363
364 #define NDTR1_tR(c)     (min((c), 65535) << 16)
365 #define NDTR1_tWHR(c)   (min((c), 15) << 4)
366 #define NDTR1_tAR(c)    (min((c), 15) << 0)
367
368 /* convert nano-seconds to nand flash controller clock cycles */
369 #define ns2cycle(ns, clk)       (int)((ns) * (clk / 1000000) / 1000)
370
371 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
372         {
373                 .compatible = "marvell,pxa3xx-nand",
374                 .data       = (void *)PXA3XX_NAND_VARIANT_PXA,
375         },
376         {
377                 .compatible = "marvell,armada370-nand",
378                 .data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
379         },
380         {}
381 };
382 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
383
384 static enum pxa3xx_nand_variant
385 pxa3xx_nand_get_variant(struct platform_device *pdev)
386 {
387         const struct of_device_id *of_id =
388                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
389         if (!of_id)
390                 return PXA3XX_NAND_VARIANT_PXA;
391         return (enum pxa3xx_nand_variant)of_id->data;
392 }
393
394 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
395                                    const struct pxa3xx_nand_timing *t)
396 {
397         struct pxa3xx_nand_info *info = host->info_data;
398         unsigned long nand_clk = clk_get_rate(info->clk);
399         uint32_t ndtr0, ndtr1;
400
401         ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
402                 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
403                 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
404                 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
405                 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
406                 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
407
408         ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
409                 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
410                 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
411
412         info->ndtr0cs0 = ndtr0;
413         info->ndtr1cs0 = ndtr1;
414         nand_writel(info, NDTR0CS0, ndtr0);
415         nand_writel(info, NDTR1CS0, ndtr1);
416 }
417
418 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
419                                        const struct nand_sdr_timings *t)
420 {
421         struct pxa3xx_nand_info *info = host->info_data;
422         struct nand_chip *chip = &host->chip;
423         unsigned long nand_clk = clk_get_rate(info->clk);
424         uint32_t ndtr0, ndtr1;
425
426         u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
427         u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
428         u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
429         u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
430         u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
431         u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
432         u32 tR = chip->chip_delay * 1000;
433         u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
434         u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
435
436         /* fallback to a default value if tR = 0 */
437         if (!tR)
438                 tR = 20000;
439
440         ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
441                 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
442                 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
443                 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
444                 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
445                 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
446
447         ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
448                 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
449                 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
450
451         info->ndtr0cs0 = ndtr0;
452         info->ndtr1cs0 = ndtr1;
453         nand_writel(info, NDTR0CS0, ndtr0);
454         nand_writel(info, NDTR1CS0, ndtr1);
455 }
456
457 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
458                                            unsigned int *flash_width,
459                                            unsigned int *dfc_width)
460 {
461         struct nand_chip *chip = &host->chip;
462         struct pxa3xx_nand_info *info = host->info_data;
463         const struct pxa3xx_nand_flash *f = NULL;
464         struct mtd_info *mtd = nand_to_mtd(&host->chip);
465         int i, id, ntypes;
466
467         ntypes = ARRAY_SIZE(builtin_flash_types);
468
469         chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
470
471         id = chip->read_byte(mtd);
472         id |= chip->read_byte(mtd) << 0x8;
473
474         for (i = 0; i < ntypes; i++) {
475                 f = &builtin_flash_types[i];
476
477                 if (f->chip_id == id)
478                         break;
479         }
480
481         if (i == ntypes) {
482                 dev_err(&info->pdev->dev, "Error: timings not found\n");
483                 return -EINVAL;
484         }
485
486         pxa3xx_nand_set_timing(host, f->timing);
487
488         *flash_width = f->flash_width;
489         *dfc_width = f->dfc_width;
490
491         return 0;
492 }
493
494 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
495                                          int mode)
496 {
497         const struct nand_sdr_timings *timings;
498
499         mode = fls(mode) - 1;
500         if (mode < 0)
501                 mode = 0;
502
503         timings = onfi_async_timing_mode_to_sdr_timings(mode);
504         if (IS_ERR(timings))
505                 return PTR_ERR(timings);
506
507         pxa3xx_nand_set_sdr_timing(host, timings);
508
509         return 0;
510 }
511
512 static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
513 {
514         struct nand_chip *chip = &host->chip;
515         struct pxa3xx_nand_info *info = host->info_data;
516         unsigned int flash_width = 0, dfc_width = 0;
517         int mode, err;
518
519         mode = onfi_get_async_timing_mode(chip);
520         if (mode == ONFI_TIMING_MODE_UNKNOWN) {
521                 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
522                                                       &dfc_width);
523                 if (err)
524                         return err;
525
526                 if (flash_width == 16) {
527                         info->reg_ndcr |= NDCR_DWIDTH_M;
528                         chip->options |= NAND_BUSWIDTH_16;
529                 }
530
531                 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
532         } else {
533                 err = pxa3xx_nand_init_timings_onfi(host, mode);
534                 if (err)
535                         return err;
536         }
537
538         return 0;
539 }
540
541 /*
542  * Set the data and OOB size, depending on the selected
543  * spare and ECC configuration.
544  * Only applicable to READ0, READOOB and PAGEPROG commands.
545  */
546 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
547                                 struct mtd_info *mtd)
548 {
549         int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
550
551         info->data_size = mtd->writesize;
552         if (!oob_enable)
553                 return;
554
555         info->oob_size = info->spare_size;
556         if (!info->use_ecc)
557                 info->oob_size += info->ecc_size;
558 }
559
560 /**
561  * NOTE: it is a must to set ND_RUN firstly, then write
562  * command buffer, otherwise, it does not work.
563  * We enable all the interrupt at the same time, and
564  * let pxa3xx_nand_irq to handle all logic.
565  */
566 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
567 {
568         uint32_t ndcr;
569
570         ndcr = info->reg_ndcr;
571
572         if (info->use_ecc) {
573                 ndcr |= NDCR_ECC_EN;
574                 if (info->ecc_bch)
575                         nand_writel(info, NDECCCTRL, 0x1);
576         } else {
577                 ndcr &= ~NDCR_ECC_EN;
578                 if (info->ecc_bch)
579                         nand_writel(info, NDECCCTRL, 0x0);
580         }
581
582         if (info->use_dma)
583                 ndcr |= NDCR_DMA_EN;
584         else
585                 ndcr &= ~NDCR_DMA_EN;
586
587         if (info->use_spare)
588                 ndcr |= NDCR_SPARE_EN;
589         else
590                 ndcr &= ~NDCR_SPARE_EN;
591
592         ndcr |= NDCR_ND_RUN;
593
594         /* clear status bits and run */
595         nand_writel(info, NDSR, NDSR_MASK);
596         nand_writel(info, NDCR, 0);
597         nand_writel(info, NDCR, ndcr);
598 }
599
600 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
601 {
602         uint32_t ndcr;
603         int timeout = NAND_STOP_DELAY;
604
605         /* wait RUN bit in NDCR become 0 */
606         ndcr = nand_readl(info, NDCR);
607         while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
608                 ndcr = nand_readl(info, NDCR);
609                 udelay(1);
610         }
611
612         if (timeout <= 0) {
613                 ndcr &= ~NDCR_ND_RUN;
614                 nand_writel(info, NDCR, ndcr);
615         }
616         if (info->dma_chan)
617                 dmaengine_terminate_all(info->dma_chan);
618
619         /* clear status bits */
620         nand_writel(info, NDSR, NDSR_MASK);
621 }
622
623 static void __maybe_unused
624 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
625 {
626         uint32_t ndcr;
627
628         ndcr = nand_readl(info, NDCR);
629         nand_writel(info, NDCR, ndcr & ~int_mask);
630 }
631
632 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
633 {
634         uint32_t ndcr;
635
636         ndcr = nand_readl(info, NDCR);
637         nand_writel(info, NDCR, ndcr | int_mask);
638 }
639
640 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
641 {
642         if (info->ecc_bch) {
643                 u32 val;
644                 int ret;
645
646                 /*
647                  * According to the datasheet, when reading from NDDB
648                  * with BCH enabled, after each 32 bytes reads, we
649                  * have to make sure that the NDSR.RDDREQ bit is set.
650                  *
651                  * Drain the FIFO 8 32 bits reads at a time, and skip
652                  * the polling on the last read.
653                  */
654                 while (len > 8) {
655                         ioread32_rep(info->mmio_base + NDDB, data, 8);
656
657                         ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
658                                                          val & NDSR_RDDREQ, 1000, 5000);
659                         if (ret) {
660                                 dev_err(&info->pdev->dev,
661                                         "Timeout on RDDREQ while draining the FIFO\n");
662                                 return;
663                         }
664
665                         data += 32;
666                         len -= 8;
667                 }
668         }
669
670         ioread32_rep(info->mmio_base + NDDB, data, len);
671 }
672
673 static void handle_data_pio(struct pxa3xx_nand_info *info)
674 {
675         unsigned int do_bytes = min(info->data_size, info->chunk_size);
676
677         switch (info->state) {
678         case STATE_PIO_WRITING:
679                 writesl(info->mmio_base + NDDB,
680                         info->data_buff + info->data_buff_pos,
681                         DIV_ROUND_UP(do_bytes, 4));
682
683                 if (info->oob_size > 0)
684                         writesl(info->mmio_base + NDDB,
685                                 info->oob_buff + info->oob_buff_pos,
686                                 DIV_ROUND_UP(info->oob_size, 4));
687                 break;
688         case STATE_PIO_READING:
689                 drain_fifo(info,
690                            info->data_buff + info->data_buff_pos,
691                            DIV_ROUND_UP(do_bytes, 4));
692
693                 if (info->oob_size > 0)
694                         drain_fifo(info,
695                                    info->oob_buff + info->oob_buff_pos,
696                                    DIV_ROUND_UP(info->oob_size, 4));
697                 break;
698         default:
699                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
700                                 info->state);
701                 BUG();
702         }
703
704         /* Update buffer pointers for multi-page read/write */
705         info->data_buff_pos += do_bytes;
706         info->oob_buff_pos += info->oob_size;
707         info->data_size -= do_bytes;
708 }
709
710 static void pxa3xx_nand_data_dma_irq(void *data)
711 {
712         struct pxa3xx_nand_info *info = data;
713         struct dma_tx_state state;
714         enum dma_status status;
715
716         status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
717         if (likely(status == DMA_COMPLETE)) {
718                 info->state = STATE_DMA_DONE;
719         } else {
720                 dev_err(&info->pdev->dev, "DMA error on data channel\n");
721                 info->retcode = ERR_DMABUSERR;
722         }
723         dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
724
725         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
726         enable_int(info, NDCR_INT_MASK);
727 }
728
729 static void start_data_dma(struct pxa3xx_nand_info *info)
730 {
731         enum dma_transfer_direction direction;
732         struct dma_async_tx_descriptor *tx;
733
734         switch (info->state) {
735         case STATE_DMA_WRITING:
736                 info->dma_dir = DMA_TO_DEVICE;
737                 direction = DMA_MEM_TO_DEV;
738                 break;
739         case STATE_DMA_READING:
740                 info->dma_dir = DMA_FROM_DEVICE;
741                 direction = DMA_DEV_TO_MEM;
742                 break;
743         default:
744                 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
745                                 info->state);
746                 BUG();
747         }
748         info->sg.length = info->data_size +
749                 (info->oob_size ? info->spare_size + info->ecc_size : 0);
750         dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
751
752         tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
753                                      DMA_PREP_INTERRUPT);
754         if (!tx) {
755                 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
756                 return;
757         }
758         tx->callback = pxa3xx_nand_data_dma_irq;
759         tx->callback_param = info;
760         info->dma_cookie = dmaengine_submit(tx);
761         dma_async_issue_pending(info->dma_chan);
762         dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
763                 __func__, direction, info->dma_cookie, info->sg.length);
764 }
765
766 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
767 {
768         struct pxa3xx_nand_info *info = data;
769
770         handle_data_pio(info);
771
772         info->state = STATE_CMD_DONE;
773         nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
774
775         return IRQ_HANDLED;
776 }
777
778 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
779 {
780         struct pxa3xx_nand_info *info = devid;
781         unsigned int status, is_completed = 0, is_ready = 0;
782         unsigned int ready, cmd_done;
783         irqreturn_t ret = IRQ_HANDLED;
784
785         if (info->cs == 0) {
786                 ready           = NDSR_FLASH_RDY;
787                 cmd_done        = NDSR_CS0_CMDD;
788         } else {
789                 ready           = NDSR_RDY;
790                 cmd_done        = NDSR_CS1_CMDD;
791         }
792
793         status = nand_readl(info, NDSR);
794
795         if (status & NDSR_UNCORERR)
796                 info->retcode = ERR_UNCORERR;
797         if (status & NDSR_CORERR) {
798                 info->retcode = ERR_CORERR;
799                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
800                     info->ecc_bch)
801                         info->ecc_err_cnt = NDSR_ERR_CNT(status);
802                 else
803                         info->ecc_err_cnt = 1;
804
805                 /*
806                  * Each chunk composing a page is corrected independently,
807                  * and we need to store maximum number of corrected bitflips
808                  * to return it to the MTD layer in ecc.read_page().
809                  */
810                 info->max_bitflips = max_t(unsigned int,
811                                            info->max_bitflips,
812                                            info->ecc_err_cnt);
813         }
814         if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
815                 /* whether use dma to transfer data */
816                 if (info->use_dma) {
817                         disable_int(info, NDCR_INT_MASK);
818                         info->state = (status & NDSR_RDDREQ) ?
819                                       STATE_DMA_READING : STATE_DMA_WRITING;
820                         start_data_dma(info);
821                         goto NORMAL_IRQ_EXIT;
822                 } else {
823                         info->state = (status & NDSR_RDDREQ) ?
824                                       STATE_PIO_READING : STATE_PIO_WRITING;
825                         ret = IRQ_WAKE_THREAD;
826                         goto NORMAL_IRQ_EXIT;
827                 }
828         }
829         if (status & cmd_done) {
830                 info->state = STATE_CMD_DONE;
831                 is_completed = 1;
832         }
833         if (status & ready) {
834                 info->state = STATE_READY;
835                 is_ready = 1;
836         }
837
838         /*
839          * Clear all status bit before issuing the next command, which
840          * can and will alter the status bits and will deserve a new
841          * interrupt on its own. This lets the controller exit the IRQ
842          */
843         nand_writel(info, NDSR, status);
844
845         if (status & NDSR_WRCMDREQ) {
846                 status &= ~NDSR_WRCMDREQ;
847                 info->state = STATE_CMD_HANDLE;
848
849                 /*
850                  * Command buffer registers NDCB{0-2} (and optionally NDCB3)
851                  * must be loaded by writing directly either 12 or 16
852                  * bytes directly to NDCB0, four bytes at a time.
853                  *
854                  * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
855                  * but each NDCBx register can be read.
856                  */
857                 nand_writel(info, NDCB0, info->ndcb0);
858                 nand_writel(info, NDCB0, info->ndcb1);
859                 nand_writel(info, NDCB0, info->ndcb2);
860
861                 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
862                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
863                         nand_writel(info, NDCB0, info->ndcb3);
864         }
865
866         if (is_completed)
867                 complete(&info->cmd_complete);
868         if (is_ready)
869                 complete(&info->dev_ready);
870 NORMAL_IRQ_EXIT:
871         return ret;
872 }
873
874 static inline int is_buf_blank(uint8_t *buf, size_t len)
875 {
876         for (; len > 0; len--)
877                 if (*buf++ != 0xff)
878                         return 0;
879         return 1;
880 }
881
882 static void set_command_address(struct pxa3xx_nand_info *info,
883                 unsigned int page_size, uint16_t column, int page_addr)
884 {
885         /* small page addr setting */
886         if (page_size < PAGE_CHUNK_SIZE) {
887                 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
888                                 | (column & 0xFF);
889
890                 info->ndcb2 = 0;
891         } else {
892                 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
893                                 | (column & 0xFFFF);
894
895                 if (page_addr & 0xFF0000)
896                         info->ndcb2 = (page_addr & 0xFF0000) >> 16;
897                 else
898                         info->ndcb2 = 0;
899         }
900 }
901
902 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
903 {
904         struct pxa3xx_nand_host *host = info->host[info->cs];
905         struct mtd_info *mtd = nand_to_mtd(&host->chip);
906
907         /* reset data and oob column point to handle data */
908         info->buf_start         = 0;
909         info->buf_count         = 0;
910         info->oob_size          = 0;
911         info->data_buff_pos     = 0;
912         info->oob_buff_pos      = 0;
913         info->use_ecc           = 0;
914         info->use_spare         = 1;
915         info->retcode           = ERR_NONE;
916         info->ecc_err_cnt       = 0;
917         info->ndcb3             = 0;
918         info->need_wait         = 0;
919
920         switch (command) {
921         case NAND_CMD_READ0:
922         case NAND_CMD_PAGEPROG:
923                 info->use_ecc = 1;
924         case NAND_CMD_READOOB:
925                 pxa3xx_set_datasize(info, mtd);
926                 break;
927         case NAND_CMD_PARAM:
928                 info->use_spare = 0;
929                 break;
930         default:
931                 info->ndcb1 = 0;
932                 info->ndcb2 = 0;
933                 break;
934         }
935
936         /*
937          * If we are about to issue a read command, or about to set
938          * the write address, then clean the data buffer.
939          */
940         if (command == NAND_CMD_READ0 ||
941             command == NAND_CMD_READOOB ||
942             command == NAND_CMD_SEQIN) {
943
944                 info->buf_count = mtd->writesize + mtd->oobsize;
945                 memset(info->data_buff, 0xFF, info->buf_count);
946         }
947
948 }
949
950 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
951                 int ext_cmd_type, uint16_t column, int page_addr)
952 {
953         int addr_cycle, exec_cmd;
954         struct pxa3xx_nand_host *host;
955         struct mtd_info *mtd;
956
957         host = info->host[info->cs];
958         mtd = nand_to_mtd(&host->chip);
959         addr_cycle = 0;
960         exec_cmd = 1;
961
962         if (info->cs != 0)
963                 info->ndcb0 = NDCB0_CSEL;
964         else
965                 info->ndcb0 = 0;
966
967         if (command == NAND_CMD_SEQIN)
968                 exec_cmd = 0;
969
970         addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
971                                     + host->col_addr_cycles);
972
973         switch (command) {
974         case NAND_CMD_READOOB:
975         case NAND_CMD_READ0:
976                 info->buf_start = column;
977                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
978                                 | addr_cycle
979                                 | NAND_CMD_READ0;
980
981                 if (command == NAND_CMD_READOOB)
982                         info->buf_start += mtd->writesize;
983
984                 /*
985                  * Multiple page read needs an 'extended command type' field,
986                  * which is either naked-read or last-read according to the
987                  * state.
988                  */
989                 if (mtd->writesize == PAGE_CHUNK_SIZE) {
990                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
991                 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
992                         info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
993                                         | NDCB0_LEN_OVRD
994                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
995                         info->ndcb3 = info->chunk_size +
996                                       info->oob_size;
997                 }
998
999                 set_command_address(info, mtd->writesize, column, page_addr);
1000                 break;
1001
1002         case NAND_CMD_SEQIN:
1003
1004                 info->buf_start = column;
1005                 set_command_address(info, mtd->writesize, 0, page_addr);
1006
1007                 /*
1008                  * Multiple page programming needs to execute the initial
1009                  * SEQIN command that sets the page address.
1010                  */
1011                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1012                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1013                                 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1014                                 | addr_cycle
1015                                 | command;
1016                         /* No data transfer in this case */
1017                         info->data_size = 0;
1018                         exec_cmd = 1;
1019                 }
1020                 break;
1021
1022         case NAND_CMD_PAGEPROG:
1023                 if (is_buf_blank(info->data_buff,
1024                                         (mtd->writesize + mtd->oobsize))) {
1025                         exec_cmd = 0;
1026                         break;
1027                 }
1028
1029                 /* Second command setting for large pages */
1030                 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1031                         /*
1032                          * Multiple page write uses the 'extended command'
1033                          * field. This can be used to issue a command dispatch
1034                          * or a naked-write depending on the current stage.
1035                          */
1036                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1037                                         | NDCB0_LEN_OVRD
1038                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1039                         info->ndcb3 = info->chunk_size +
1040                                       info->oob_size;
1041
1042                         /*
1043                          * This is the command dispatch that completes a chunked
1044                          * page program operation.
1045                          */
1046                         if (info->data_size == 0) {
1047                                 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1048                                         | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1049                                         | command;
1050                                 info->ndcb1 = 0;
1051                                 info->ndcb2 = 0;
1052                                 info->ndcb3 = 0;
1053                         }
1054                 } else {
1055                         info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1056                                         | NDCB0_AUTO_RS
1057                                         | NDCB0_ST_ROW_EN
1058                                         | NDCB0_DBC
1059                                         | (NAND_CMD_PAGEPROG << 8)
1060                                         | NAND_CMD_SEQIN
1061                                         | addr_cycle;
1062                 }
1063                 break;
1064
1065         case NAND_CMD_PARAM:
1066                 info->buf_count = INIT_BUFFER_SIZE;
1067                 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1068                                 | NDCB0_ADDR_CYC(1)
1069                                 | NDCB0_LEN_OVRD
1070                                 | command;
1071                 info->ndcb1 = (column & 0xFF);
1072                 info->ndcb3 = INIT_BUFFER_SIZE;
1073                 info->data_size = INIT_BUFFER_SIZE;
1074                 break;
1075
1076         case NAND_CMD_READID:
1077                 info->buf_count = READ_ID_BYTES;
1078                 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1079                                 | NDCB0_ADDR_CYC(1)
1080                                 | command;
1081                 info->ndcb1 = (column & 0xFF);
1082
1083                 info->data_size = 8;
1084                 break;
1085         case NAND_CMD_STATUS:
1086                 info->buf_count = 1;
1087                 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1088                                 | NDCB0_ADDR_CYC(1)
1089                                 | command;
1090
1091                 info->data_size = 8;
1092                 break;
1093
1094         case NAND_CMD_ERASE1:
1095                 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1096                                 | NDCB0_AUTO_RS
1097                                 | NDCB0_ADDR_CYC(3)
1098                                 | NDCB0_DBC
1099                                 | (NAND_CMD_ERASE2 << 8)
1100                                 | NAND_CMD_ERASE1;
1101                 info->ndcb1 = page_addr;
1102                 info->ndcb2 = 0;
1103
1104                 break;
1105         case NAND_CMD_RESET:
1106                 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1107                                 | command;
1108
1109                 break;
1110
1111         case NAND_CMD_ERASE2:
1112                 exec_cmd = 0;
1113                 break;
1114
1115         default:
1116                 exec_cmd = 0;
1117                 dev_err(&info->pdev->dev, "non-supported command %x\n",
1118                                 command);
1119                 break;
1120         }
1121
1122         return exec_cmd;
1123 }
1124
1125 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1126                          int column, int page_addr)
1127 {
1128         struct nand_chip *chip = mtd_to_nand(mtd);
1129         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1130         struct pxa3xx_nand_info *info = host->info_data;
1131         int exec_cmd;
1132
1133         /*
1134          * if this is a x16 device ,then convert the input
1135          * "byte" address into a "word" address appropriate
1136          * for indexing a word-oriented device
1137          */
1138         if (info->reg_ndcr & NDCR_DWIDTH_M)
1139                 column /= 2;
1140
1141         /*
1142          * There may be different NAND chip hooked to
1143          * different chip select, so check whether
1144          * chip select has been changed, if yes, reset the timing
1145          */
1146         if (info->cs != host->cs) {
1147                 info->cs = host->cs;
1148                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1149                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1150         }
1151
1152         prepare_start_command(info, command);
1153
1154         info->state = STATE_PREPARED;
1155         exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1156
1157         if (exec_cmd) {
1158                 init_completion(&info->cmd_complete);
1159                 init_completion(&info->dev_ready);
1160                 info->need_wait = 1;
1161                 pxa3xx_nand_start(info);
1162
1163                 if (!wait_for_completion_timeout(&info->cmd_complete,
1164                     CHIP_DELAY_TIMEOUT)) {
1165                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1166                         /* Stop State Machine for next command cycle */
1167                         pxa3xx_nand_stop(info);
1168                 }
1169         }
1170         info->state = STATE_IDLE;
1171 }
1172
1173 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1174                                   const unsigned command,
1175                                   int column, int page_addr)
1176 {
1177         struct nand_chip *chip = mtd_to_nand(mtd);
1178         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1179         struct pxa3xx_nand_info *info = host->info_data;
1180         int exec_cmd, ext_cmd_type;
1181
1182         /*
1183          * if this is a x16 device then convert the input
1184          * "byte" address into a "word" address appropriate
1185          * for indexing a word-oriented device
1186          */
1187         if (info->reg_ndcr & NDCR_DWIDTH_M)
1188                 column /= 2;
1189
1190         /*
1191          * There may be different NAND chip hooked to
1192          * different chip select, so check whether
1193          * chip select has been changed, if yes, reset the timing
1194          */
1195         if (info->cs != host->cs) {
1196                 info->cs = host->cs;
1197                 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1198                 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1199         }
1200
1201         /* Select the extended command for the first command */
1202         switch (command) {
1203         case NAND_CMD_READ0:
1204         case NAND_CMD_READOOB:
1205                 ext_cmd_type = EXT_CMD_TYPE_MONO;
1206                 break;
1207         case NAND_CMD_SEQIN:
1208                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1209                 break;
1210         case NAND_CMD_PAGEPROG:
1211                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1212                 break;
1213         default:
1214                 ext_cmd_type = 0;
1215                 break;
1216         }
1217
1218         prepare_start_command(info, command);
1219
1220         /*
1221          * Prepare the "is ready" completion before starting a command
1222          * transaction sequence. If the command is not executed the
1223          * completion will be completed, see below.
1224          *
1225          * We can do that inside the loop because the command variable
1226          * is invariant and thus so is the exec_cmd.
1227          */
1228         info->need_wait = 1;
1229         init_completion(&info->dev_ready);
1230         do {
1231                 info->state = STATE_PREPARED;
1232                 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1233                                                column, page_addr);
1234                 if (!exec_cmd) {
1235                         info->need_wait = 0;
1236                         complete(&info->dev_ready);
1237                         break;
1238                 }
1239
1240                 init_completion(&info->cmd_complete);
1241                 pxa3xx_nand_start(info);
1242
1243                 if (!wait_for_completion_timeout(&info->cmd_complete,
1244                     CHIP_DELAY_TIMEOUT)) {
1245                         dev_err(&info->pdev->dev, "Wait time out!!!\n");
1246                         /* Stop State Machine for next command cycle */
1247                         pxa3xx_nand_stop(info);
1248                         break;
1249                 }
1250
1251                 /* Check if the sequence is complete */
1252                 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1253                         break;
1254
1255                 /*
1256                  * After a splitted program command sequence has issued
1257                  * the command dispatch, the command sequence is complete.
1258                  */
1259                 if (info->data_size == 0 &&
1260                     command == NAND_CMD_PAGEPROG &&
1261                     ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1262                         break;
1263
1264                 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1265                         /* Last read: issue a 'last naked read' */
1266                         if (info->data_size == info->chunk_size)
1267                                 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1268                         else
1269                                 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1270
1271                 /*
1272                  * If a splitted program command has no more data to transfer,
1273                  * the command dispatch must be issued to complete.
1274                  */
1275                 } else if (command == NAND_CMD_PAGEPROG &&
1276                            info->data_size == 0) {
1277                                 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1278                 }
1279         } while (1);
1280
1281         info->state = STATE_IDLE;
1282 }
1283
1284 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1285                 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1286                 int page)
1287 {
1288         chip->write_buf(mtd, buf, mtd->writesize);
1289         chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1290
1291         return 0;
1292 }
1293
1294 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1295                 struct nand_chip *chip, uint8_t *buf, int oob_required,
1296                 int page)
1297 {
1298         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1299         struct pxa3xx_nand_info *info = host->info_data;
1300
1301         chip->read_buf(mtd, buf, mtd->writesize);
1302         chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1303
1304         if (info->retcode == ERR_CORERR && info->use_ecc) {
1305                 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1306
1307         } else if (info->retcode == ERR_UNCORERR) {
1308                 /*
1309                  * for blank page (all 0xff), HW will calculate its ECC as
1310                  * 0, which is different from the ECC information within
1311                  * OOB, ignore such uncorrectable errors
1312                  */
1313                 if (is_buf_blank(buf, mtd->writesize))
1314                         info->retcode = ERR_NONE;
1315                 else
1316                         mtd->ecc_stats.failed++;
1317         }
1318
1319         return info->max_bitflips;
1320 }
1321
1322 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1323 {
1324         struct nand_chip *chip = mtd_to_nand(mtd);
1325         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1326         struct pxa3xx_nand_info *info = host->info_data;
1327         char retval = 0xFF;
1328
1329         if (info->buf_start < info->buf_count)
1330                 /* Has just send a new command? */
1331                 retval = info->data_buff[info->buf_start++];
1332
1333         return retval;
1334 }
1335
1336 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1337 {
1338         struct nand_chip *chip = mtd_to_nand(mtd);
1339         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1340         struct pxa3xx_nand_info *info = host->info_data;
1341         u16 retval = 0xFFFF;
1342
1343         if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1344                 retval = *((u16 *)(info->data_buff+info->buf_start));
1345                 info->buf_start += 2;
1346         }
1347         return retval;
1348 }
1349
1350 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1351 {
1352         struct nand_chip *chip = mtd_to_nand(mtd);
1353         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1354         struct pxa3xx_nand_info *info = host->info_data;
1355         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1356
1357         memcpy(buf, info->data_buff + info->buf_start, real_len);
1358         info->buf_start += real_len;
1359 }
1360
1361 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1362                 const uint8_t *buf, int len)
1363 {
1364         struct nand_chip *chip = mtd_to_nand(mtd);
1365         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1366         struct pxa3xx_nand_info *info = host->info_data;
1367         int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1368
1369         memcpy(info->data_buff + info->buf_start, buf, real_len);
1370         info->buf_start += real_len;
1371 }
1372
1373 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1374 {
1375         return;
1376 }
1377
1378 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1379 {
1380         struct nand_chip *chip = mtd_to_nand(mtd);
1381         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1382         struct pxa3xx_nand_info *info = host->info_data;
1383
1384         if (info->need_wait) {
1385                 info->need_wait = 0;
1386                 if (!wait_for_completion_timeout(&info->dev_ready,
1387                     CHIP_DELAY_TIMEOUT)) {
1388                         dev_err(&info->pdev->dev, "Ready time out!!!\n");
1389                         return NAND_STATUS_FAIL;
1390                 }
1391         }
1392
1393         /* pxa3xx_nand_send_command has waited for command complete */
1394         if (this->state == FL_WRITING || this->state == FL_ERASING) {
1395                 if (info->retcode == ERR_NONE)
1396                         return 0;
1397                 else
1398                         return NAND_STATUS_FAIL;
1399         }
1400
1401         return NAND_STATUS_READY;
1402 }
1403
1404 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1405 {
1406         struct pxa3xx_nand_host *host = info->host[info->cs];
1407         struct platform_device *pdev = info->pdev;
1408         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1409         const struct nand_sdr_timings *timings;
1410
1411         /* Configure default flash values */
1412         info->chunk_size = PAGE_CHUNK_SIZE;
1413         info->reg_ndcr = 0x0; /* enable all interrupts */
1414         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1415         info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1416         info->reg_ndcr |= NDCR_SPARE_EN;
1417
1418         /* use the common timing to make a try */
1419         timings = onfi_async_timing_mode_to_sdr_timings(0);
1420         if (IS_ERR(timings))
1421                 return PTR_ERR(timings);
1422
1423         pxa3xx_nand_set_sdr_timing(host, timings);
1424         return 0;
1425 }
1426
1427 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1428 {
1429         struct pxa3xx_nand_host *host = info->host[info->cs];
1430         struct nand_chip *chip = &host->chip;
1431         struct mtd_info *mtd = nand_to_mtd(chip);
1432
1433         info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1434         info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1435         info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1436 }
1437
1438 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1439 {
1440         struct platform_device *pdev = info->pdev;
1441         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1442         uint32_t ndcr = nand_readl(info, NDCR);
1443
1444         /* Set an initial chunk size */
1445         info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1446         info->reg_ndcr = ndcr &
1447                 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1448         info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1449         info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1450         info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1451 }
1452
1453 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1454 {
1455         struct platform_device *pdev = info->pdev;
1456         struct dma_slave_config config;
1457         dma_cap_mask_t mask;
1458         struct pxad_param param;
1459         int ret;
1460
1461         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1462         if (info->data_buff == NULL)
1463                 return -ENOMEM;
1464         if (use_dma == 0)
1465                 return 0;
1466
1467         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1468         if (ret)
1469                 return ret;
1470
1471         sg_init_one(&info->sg, info->data_buff, info->buf_size);
1472         dma_cap_zero(mask);
1473         dma_cap_set(DMA_SLAVE, mask);
1474         param.prio = PXAD_PRIO_LOWEST;
1475         param.drcmr = info->drcmr_dat;
1476         info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1477                                                           &param, &pdev->dev,
1478                                                           "data");
1479         if (!info->dma_chan) {
1480                 dev_err(&pdev->dev, "unable to request data dma channel\n");
1481                 return -ENODEV;
1482         }
1483
1484         memset(&config, 0, sizeof(config));
1485         config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1486         config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1487         config.src_addr = info->mmio_phys + NDDB;
1488         config.dst_addr = info->mmio_phys + NDDB;
1489         config.src_maxburst = 32;
1490         config.dst_maxburst = 32;
1491         ret = dmaengine_slave_config(info->dma_chan, &config);
1492         if (ret < 0) {
1493                 dev_err(&info->pdev->dev,
1494                         "dma channel configuration failed: %d\n",
1495                         ret);
1496                 return ret;
1497         }
1498
1499         /*
1500          * Now that DMA buffers are allocated we turn on
1501          * DMA proper for I/O operations.
1502          */
1503         info->use_dma = 1;
1504         return 0;
1505 }
1506
1507 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1508 {
1509         if (info->use_dma) {
1510                 dmaengine_terminate_all(info->dma_chan);
1511                 dma_release_channel(info->dma_chan);
1512         }
1513         kfree(info->data_buff);
1514 }
1515
1516 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1517                         struct nand_ecc_ctrl *ecc,
1518                         int strength, int ecc_stepsize, int page_size)
1519 {
1520         if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1521                 info->chunk_size = 2048;
1522                 info->spare_size = 40;
1523                 info->ecc_size = 24;
1524                 ecc->mode = NAND_ECC_HW;
1525                 ecc->size = 512;
1526                 ecc->strength = 1;
1527
1528         } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1529                 info->chunk_size = 512;
1530                 info->spare_size = 8;
1531                 info->ecc_size = 8;
1532                 ecc->mode = NAND_ECC_HW;
1533                 ecc->size = 512;
1534                 ecc->strength = 1;
1535
1536         /*
1537          * Required ECC: 4-bit correction per 512 bytes
1538          * Select: 16-bit correction per 2048 bytes
1539          */
1540         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1541                 info->ecc_bch = 1;
1542                 info->chunk_size = 2048;
1543                 info->spare_size = 32;
1544                 info->ecc_size = 32;
1545                 ecc->mode = NAND_ECC_HW;
1546                 ecc->size = info->chunk_size;
1547                 ecc->layout = &ecc_layout_2KB_bch4bit;
1548                 ecc->strength = 16;
1549
1550         } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1551                 info->ecc_bch = 1;
1552                 info->chunk_size = 2048;
1553                 info->spare_size = 32;
1554                 info->ecc_size = 32;
1555                 ecc->mode = NAND_ECC_HW;
1556                 ecc->size = info->chunk_size;
1557                 ecc->layout = &ecc_layout_4KB_bch4bit;
1558                 ecc->strength = 16;
1559
1560         /*
1561          * Required ECC: 8-bit correction per 512 bytes
1562          * Select: 16-bit correction per 1024 bytes
1563          */
1564         } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1565                 info->ecc_bch = 1;
1566                 info->chunk_size = 1024;
1567                 info->spare_size = 0;
1568                 info->ecc_size = 32;
1569                 ecc->mode = NAND_ECC_HW;
1570                 ecc->size = info->chunk_size;
1571                 ecc->layout = &ecc_layout_4KB_bch8bit;
1572                 ecc->strength = 16;
1573         } else {
1574                 dev_err(&info->pdev->dev,
1575                         "ECC strength %d at page size %d is not supported\n",
1576                         strength, page_size);
1577                 return -ENODEV;
1578         }
1579
1580         dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1581                  ecc->strength, ecc->size);
1582         return 0;
1583 }
1584
1585 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1586 {
1587         struct nand_chip *chip = mtd_to_nand(mtd);
1588         struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1589         struct pxa3xx_nand_info *info = host->info_data;
1590         struct platform_device *pdev = info->pdev;
1591         struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1592         int ret;
1593         uint16_t ecc_strength, ecc_step;
1594
1595         if (pdata->keep_config) {
1596                 pxa3xx_nand_detect_config(info);
1597         } else {
1598                 ret = pxa3xx_nand_config_ident(info);
1599                 if (ret)
1600                         return ret;
1601         }
1602
1603         if (info->reg_ndcr & NDCR_DWIDTH_M)
1604                 chip->options |= NAND_BUSWIDTH_16;
1605
1606         /* Device detection must be done with ECC disabled */
1607         if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1608                 nand_writel(info, NDECCCTRL, 0x0);
1609
1610         if (nand_scan_ident(mtd, 1, NULL))
1611                 return -ENODEV;
1612
1613         if (!pdata->keep_config) {
1614                 ret = pxa3xx_nand_init(host);
1615                 if (ret) {
1616                         dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1617                                 ret);
1618                         return ret;
1619                 }
1620         }
1621
1622         if (pdata->flash_bbt) {
1623                 /*
1624                  * We'll use a bad block table stored in-flash and don't
1625                  * allow writing the bad block marker to the flash.
1626                  */
1627                 chip->bbt_options |= NAND_BBT_USE_FLASH |
1628                                      NAND_BBT_NO_OOB_BBM;
1629                 chip->bbt_td = &bbt_main_descr;
1630                 chip->bbt_md = &bbt_mirror_descr;
1631         }
1632
1633         /*
1634          * If the page size is bigger than the FIFO size, let's check
1635          * we are given the right variant and then switch to the extended
1636          * (aka splitted) command handling,
1637          */
1638         if (mtd->writesize > PAGE_CHUNK_SIZE) {
1639                 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1640                         chip->cmdfunc = nand_cmdfunc_extended;
1641                 } else {
1642                         dev_err(&info->pdev->dev,
1643                                 "unsupported page size on this variant\n");
1644                         return -ENODEV;
1645                 }
1646         }
1647
1648         if (pdata->ecc_strength && pdata->ecc_step_size) {
1649                 ecc_strength = pdata->ecc_strength;
1650                 ecc_step = pdata->ecc_step_size;
1651         } else {
1652                 ecc_strength = chip->ecc_strength_ds;
1653                 ecc_step = chip->ecc_step_ds;
1654         }
1655
1656         /* Set default ECC strength requirements on non-ONFI devices */
1657         if (ecc_strength < 1 && ecc_step < 1) {
1658                 ecc_strength = 1;
1659                 ecc_step = 512;
1660         }
1661
1662         ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1663                            ecc_step, mtd->writesize);
1664         if (ret)
1665                 return ret;
1666
1667         /* calculate addressing information */
1668         if (mtd->writesize >= 2048)
1669                 host->col_addr_cycles = 2;
1670         else
1671                 host->col_addr_cycles = 1;
1672
1673         /* release the initial buffer */
1674         kfree(info->data_buff);
1675
1676         /* allocate the real data + oob buffer */
1677         info->buf_size = mtd->writesize + mtd->oobsize;
1678         ret = pxa3xx_nand_init_buff(info);
1679         if (ret)
1680                 return ret;
1681         info->oob_buff = info->data_buff + mtd->writesize;
1682
1683         if ((mtd->size >> chip->page_shift) > 65536)
1684                 host->row_addr_cycles = 3;
1685         else
1686                 host->row_addr_cycles = 2;
1687
1688         if (!pdata->keep_config)
1689                 pxa3xx_nand_config_tail(info);
1690
1691         return nand_scan_tail(mtd);
1692 }
1693
1694 static int alloc_nand_resource(struct platform_device *pdev)
1695 {
1696         struct device_node *np = pdev->dev.of_node;
1697         struct pxa3xx_nand_platform_data *pdata;
1698         struct pxa3xx_nand_info *info;
1699         struct pxa3xx_nand_host *host;
1700         struct nand_chip *chip = NULL;
1701         struct mtd_info *mtd;
1702         struct resource *r;
1703         int ret, irq, cs;
1704
1705         pdata = dev_get_platdata(&pdev->dev);
1706         if (pdata->num_cs <= 0)
1707                 return -ENODEV;
1708         info = devm_kzalloc(&pdev->dev,
1709                             sizeof(*info) + sizeof(*host) * pdata->num_cs,
1710                             GFP_KERNEL);
1711         if (!info)
1712                 return -ENOMEM;
1713
1714         info->pdev = pdev;
1715         info->variant = pxa3xx_nand_get_variant(pdev);
1716         for (cs = 0; cs < pdata->num_cs; cs++) {
1717                 host = (void *)&info[1] + sizeof(*host) * cs;
1718                 chip = &host->chip;
1719                 nand_set_controller_data(chip, host);
1720                 mtd = nand_to_mtd(chip);
1721                 info->host[cs] = host;
1722                 host->cs = cs;
1723                 host->info_data = info;
1724                 mtd->dev.parent = &pdev->dev;
1725                 /* FIXME: all chips use the same device tree partitions */
1726                 nand_set_flash_node(chip, np);
1727
1728                 nand_set_controller_data(chip, host);
1729                 chip->ecc.read_page     = pxa3xx_nand_read_page_hwecc;
1730                 chip->ecc.write_page    = pxa3xx_nand_write_page_hwecc;
1731                 chip->controller        = &info->controller;
1732                 chip->waitfunc          = pxa3xx_nand_waitfunc;
1733                 chip->select_chip       = pxa3xx_nand_select_chip;
1734                 chip->read_word         = pxa3xx_nand_read_word;
1735                 chip->read_byte         = pxa3xx_nand_read_byte;
1736                 chip->read_buf          = pxa3xx_nand_read_buf;
1737                 chip->write_buf         = pxa3xx_nand_write_buf;
1738                 chip->options           |= NAND_NO_SUBPAGE_WRITE;
1739                 chip->cmdfunc           = nand_cmdfunc;
1740         }
1741
1742         spin_lock_init(&chip->controller->lock);
1743         init_waitqueue_head(&chip->controller->wq);
1744         info->clk = devm_clk_get(&pdev->dev, NULL);
1745         if (IS_ERR(info->clk)) {
1746                 dev_err(&pdev->dev, "failed to get nand clock\n");
1747                 return PTR_ERR(info->clk);
1748         }
1749         ret = clk_prepare_enable(info->clk);
1750         if (ret < 0)
1751                 return ret;
1752
1753         if (use_dma) {
1754                 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1755                 if (r == NULL) {
1756                         dev_err(&pdev->dev,
1757                                 "no resource defined for data DMA\n");
1758                         ret = -ENXIO;
1759                         goto fail_disable_clk;
1760                 }
1761                 info->drcmr_dat = r->start;
1762
1763                 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1764                 if (r == NULL) {
1765                         dev_err(&pdev->dev,
1766                                 "no resource defined for cmd DMA\n");
1767                         ret = -ENXIO;
1768                         goto fail_disable_clk;
1769                 }
1770                 info->drcmr_cmd = r->start;
1771         }
1772
1773         irq = platform_get_irq(pdev, 0);
1774         if (irq < 0) {
1775                 dev_err(&pdev->dev, "no IRQ resource defined\n");
1776                 ret = -ENXIO;
1777                 goto fail_disable_clk;
1778         }
1779
1780         r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1781         info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1782         if (IS_ERR(info->mmio_base)) {
1783                 ret = PTR_ERR(info->mmio_base);
1784                 goto fail_disable_clk;
1785         }
1786         info->mmio_phys = r->start;
1787
1788         /* Allocate a buffer to allow flash detection */
1789         info->buf_size = INIT_BUFFER_SIZE;
1790         info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1791         if (info->data_buff == NULL) {
1792                 ret = -ENOMEM;
1793                 goto fail_disable_clk;
1794         }
1795
1796         /* initialize all interrupts to be disabled */
1797         disable_int(info, NDSR_MASK);
1798
1799         ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1800                                    pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1801                                    pdev->name, info);
1802         if (ret < 0) {
1803                 dev_err(&pdev->dev, "failed to request IRQ\n");
1804                 goto fail_free_buf;
1805         }
1806
1807         platform_set_drvdata(pdev, info);
1808
1809         return 0;
1810
1811 fail_free_buf:
1812         free_irq(irq, info);
1813         kfree(info->data_buff);
1814 fail_disable_clk:
1815         clk_disable_unprepare(info->clk);
1816         return ret;
1817 }
1818
1819 static int pxa3xx_nand_remove(struct platform_device *pdev)
1820 {
1821         struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1822         struct pxa3xx_nand_platform_data *pdata;
1823         int irq, cs;
1824
1825         if (!info)
1826                 return 0;
1827
1828         pdata = dev_get_platdata(&pdev->dev);
1829
1830         irq = platform_get_irq(pdev, 0);
1831         if (irq >= 0)
1832                 free_irq(irq, info);
1833         pxa3xx_nand_free_buff(info);
1834
1835         /*
1836          * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1837          * In order to prevent a lockup of the system bus, the DFI bus
1838          * arbitration is granted to SMC upon driver removal. This is done by
1839          * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1840          * access to the bus anymore.
1841          */
1842         nand_writel(info, NDCR,
1843                     (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1844                     NFCV1_NDCR_ARB_CNTL);
1845         clk_disable_unprepare(info->clk);
1846
1847         for (cs = 0; cs < pdata->num_cs; cs++)
1848                 nand_release(nand_to_mtd(&info->host[cs]->chip));
1849         return 0;
1850 }
1851
1852 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1853 {
1854         struct pxa3xx_nand_platform_data *pdata;
1855         struct device_node *np = pdev->dev.of_node;
1856         const struct of_device_id *of_id =
1857                         of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1858
1859         if (!of_id)
1860                 return 0;
1861
1862         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1863         if (!pdata)
1864                 return -ENOMEM;
1865
1866         if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1867                 pdata->enable_arbiter = 1;
1868         if (of_get_property(np, "marvell,nand-keep-config", NULL))
1869                 pdata->keep_config = 1;
1870         of_property_read_u32(np, "num-cs", &pdata->num_cs);
1871         pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1872
1873         pdata->ecc_strength = of_get_nand_ecc_strength(np);
1874         if (pdata->ecc_strength < 0)
1875                 pdata->ecc_strength = 0;
1876
1877         pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1878         if (pdata->ecc_step_size < 0)
1879                 pdata->ecc_step_size = 0;
1880
1881         pdev->dev.platform_data = pdata;
1882
1883         return 0;
1884 }
1885
1886 static int pxa3xx_nand_probe(struct platform_device *pdev)
1887 {
1888         struct pxa3xx_nand_platform_data *pdata;
1889         struct pxa3xx_nand_info *info;
1890         int ret, cs, probe_success, dma_available;
1891
1892         dma_available = IS_ENABLED(CONFIG_ARM) &&
1893                 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1894         if (use_dma && !dma_available) {
1895                 use_dma = 0;
1896                 dev_warn(&pdev->dev,
1897                          "This platform can't do DMA on this device\n");
1898         }
1899
1900         ret = pxa3xx_nand_probe_dt(pdev);
1901         if (ret)
1902                 return ret;
1903
1904         pdata = dev_get_platdata(&pdev->dev);
1905         if (!pdata) {
1906                 dev_err(&pdev->dev, "no platform data defined\n");
1907                 return -ENODEV;
1908         }
1909
1910         ret = alloc_nand_resource(pdev);
1911         if (ret) {
1912                 dev_err(&pdev->dev, "alloc nand resource failed\n");
1913                 return ret;
1914         }
1915
1916         info = platform_get_drvdata(pdev);
1917         probe_success = 0;
1918         for (cs = 0; cs < pdata->num_cs; cs++) {
1919                 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1920
1921                 /*
1922                  * The mtd name matches the one used in 'mtdparts' kernel
1923                  * parameter. This name cannot be changed or otherwise
1924                  * user's mtd partitions configuration would get broken.
1925                  */
1926                 mtd->name = "pxa3xx_nand-0";
1927                 info->cs = cs;
1928                 ret = pxa3xx_nand_scan(mtd);
1929                 if (ret) {
1930                         dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1931                                 cs);
1932                         continue;
1933                 }
1934
1935                 ret = mtd_device_register(mtd, pdata->parts[cs],
1936                                           pdata->nr_parts[cs]);
1937                 if (!ret)
1938                         probe_success = 1;
1939         }
1940
1941         if (!probe_success) {
1942                 pxa3xx_nand_remove(pdev);
1943                 return -ENODEV;
1944         }
1945
1946         return 0;
1947 }
1948
1949 #ifdef CONFIG_PM
1950 static int pxa3xx_nand_suspend(struct device *dev)
1951 {
1952         struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1953
1954         if (info->state) {
1955                 dev_err(dev, "driver busy, state = %d\n", info->state);
1956                 return -EAGAIN;
1957         }
1958
1959         clk_disable(info->clk);
1960         return 0;
1961 }
1962
1963 static int pxa3xx_nand_resume(struct device *dev)
1964 {
1965         struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1966         int ret;
1967
1968         ret = clk_enable(info->clk);
1969         if (ret < 0)
1970                 return ret;
1971
1972         /* We don't want to handle interrupt without calling mtd routine */
1973         disable_int(info, NDCR_INT_MASK);
1974
1975         /*
1976          * Directly set the chip select to a invalid value,
1977          * then the driver would reset the timing according
1978          * to current chip select at the beginning of cmdfunc
1979          */
1980         info->cs = 0xff;
1981
1982         /*
1983          * As the spec says, the NDSR would be updated to 0x1800 when
1984          * doing the nand_clk disable/enable.
1985          * To prevent it damaging state machine of the driver, clear
1986          * all status before resume
1987          */
1988         nand_writel(info, NDSR, NDSR_MASK);
1989
1990         return 0;
1991 }
1992 #else
1993 #define pxa3xx_nand_suspend     NULL
1994 #define pxa3xx_nand_resume      NULL
1995 #endif
1996
1997 static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
1998         .suspend        = pxa3xx_nand_suspend,
1999         .resume         = pxa3xx_nand_resume,
2000 };
2001
2002 static struct platform_driver pxa3xx_nand_driver = {
2003         .driver = {
2004                 .name   = "pxa3xx-nand",
2005                 .of_match_table = pxa3xx_nand_dt_ids,
2006                 .pm     = &pxa3xx_nand_pm_ops,
2007         },
2008         .probe          = pxa3xx_nand_probe,
2009         .remove         = pxa3xx_nand_remove,
2010 };
2011
2012 module_platform_driver(pxa3xx_nand_driver);
2013
2014 MODULE_LICENSE("GPL");
2015 MODULE_DESCRIPTION("PXA3xx NAND controller driver");