2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
42 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE (2048)
47 * Define a buffer size for the initial command that detects the flash device:
48 * STATUS, READID and PARAM.
49 * ONFI param page is 256 bytes, and there are three redundant copies
50 * to be read. JEDEC param page is 512 bytes, and there are also three
51 * redundant copies to be read.
52 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
54 #define INIT_BUFFER_SIZE 2048
56 /* registers and bit definitions */
57 #define NDCR (0x00) /* Control register */
58 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
59 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
60 #define NDSR (0x14) /* Status Register */
61 #define NDPCR (0x18) /* Page Count Register */
62 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
63 #define NDBDR1 (0x20) /* Bad Block Register 1 */
64 #define NDECCCTRL (0x28) /* ECC control */
65 #define NDDB (0x40) /* Data Buffer */
66 #define NDCB0 (0x48) /* Command Buffer0 */
67 #define NDCB1 (0x4C) /* Command Buffer1 */
68 #define NDCB2 (0x50) /* Command Buffer2 */
70 #define NDCR_SPARE_EN (0x1 << 31)
71 #define NDCR_ECC_EN (0x1 << 30)
72 #define NDCR_DMA_EN (0x1 << 29)
73 #define NDCR_ND_RUN (0x1 << 28)
74 #define NDCR_DWIDTH_C (0x1 << 27)
75 #define NDCR_DWIDTH_M (0x1 << 26)
76 #define NDCR_PAGE_SZ (0x1 << 24)
77 #define NDCR_NCSX (0x1 << 23)
78 #define NDCR_ND_MODE (0x3 << 21)
79 #define NDCR_NAND_MODE (0x0)
80 #define NDCR_CLR_PG_CNT (0x1 << 20)
81 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
82 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
83 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
85 #define NDCR_RA_START (0x1 << 15)
86 #define NDCR_PG_PER_BLK (0x1 << 14)
87 #define NDCR_ND_ARB_EN (0x1 << 12)
88 #define NDCR_INT_MASK (0xFFF)
90 #define NDSR_MASK (0xfff)
91 #define NDSR_ERR_CNT_OFF (16)
92 #define NDSR_ERR_CNT_MASK (0x1f)
93 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
94 #define NDSR_RDY (0x1 << 12)
95 #define NDSR_FLASH_RDY (0x1 << 11)
96 #define NDSR_CS0_PAGED (0x1 << 10)
97 #define NDSR_CS1_PAGED (0x1 << 9)
98 #define NDSR_CS0_CMDD (0x1 << 8)
99 #define NDSR_CS1_CMDD (0x1 << 7)
100 #define NDSR_CS0_BBD (0x1 << 6)
101 #define NDSR_CS1_BBD (0x1 << 5)
102 #define NDSR_UNCORERR (0x1 << 4)
103 #define NDSR_CORERR (0x1 << 3)
104 #define NDSR_WRDREQ (0x1 << 2)
105 #define NDSR_RDDREQ (0x1 << 1)
106 #define NDSR_WRCMDREQ (0x1)
108 #define NDCB0_LEN_OVRD (0x1 << 28)
109 #define NDCB0_ST_ROW_EN (0x1 << 26)
110 #define NDCB0_AUTO_RS (0x1 << 25)
111 #define NDCB0_CSEL (0x1 << 24)
112 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
113 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
114 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
115 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
116 #define NDCB0_NC (0x1 << 20)
117 #define NDCB0_DBC (0x1 << 19)
118 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
119 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
120 #define NDCB0_CMD2_MASK (0xff << 8)
121 #define NDCB0_CMD1_MASK (0xff)
122 #define NDCB0_ADDR_CYC_SHIFT (16)
124 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
125 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
126 #define EXT_CMD_TYPE_READ 4 /* Read */
127 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
128 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
129 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
130 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
133 * This should be large enough to read 'ONFI' and 'JEDEC'.
134 * Let's use 7 bytes, which is the maximum ID count supported
135 * by the controller (see NDCR_RD_ID_CNT_MASK).
137 #define READ_ID_BYTES 7
139 /* macros for registers read/write */
140 #define nand_writel(info, off, val) \
141 writel_relaxed((val), (info)->mmio_base + (off))
143 #define nand_readl(info, off) \
144 readl_relaxed((info)->mmio_base + (off))
146 /* error code and state */
169 enum pxa3xx_nand_variant {
170 PXA3XX_NAND_VARIANT_PXA,
171 PXA3XX_NAND_VARIANT_ARMADA370,
174 struct pxa3xx_nand_host {
175 struct nand_chip chip;
176 struct mtd_info *mtd;
179 /* page size of attached chip */
183 /* calculated from pxa3xx_nand_flash data */
184 unsigned int col_addr_cycles;
185 unsigned int row_addr_cycles;
188 struct pxa3xx_nand_info {
189 struct nand_hw_control controller;
190 struct platform_device *pdev;
193 void __iomem *mmio_base;
194 unsigned long mmio_phys;
195 struct completion cmd_complete, dev_ready;
197 unsigned int buf_start;
198 unsigned int buf_count;
199 unsigned int buf_size;
200 unsigned int data_buff_pos;
201 unsigned int oob_buff_pos;
203 /* DMA information */
207 unsigned char *data_buff;
208 unsigned char *oob_buff;
209 dma_addr_t data_buff_phys;
211 struct pxa_dma_desc *data_desc;
212 dma_addr_t data_desc_addr;
214 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
218 * This driver supports NFCv1 (as found in PXA SoC)
219 * and NFCv2 (as found in Armada 370/XP SoC).
221 enum pxa3xx_nand_variant variant;
224 int use_ecc; /* use HW ECC ? */
225 int ecc_bch; /* using BCH ECC? */
226 int use_dma; /* use DMA ? */
227 int use_spare; /* use spare ? */
230 unsigned int data_size; /* data to be read from FIFO */
231 unsigned int chunk_size; /* split commands chunk size */
232 unsigned int oob_size;
233 unsigned int spare_size;
234 unsigned int ecc_size;
235 unsigned int ecc_err_cnt;
236 unsigned int max_bitflips;
239 /* cached register value */
244 /* generated NDCBx register values */
251 static bool use_dma = 1;
252 module_param(use_dma, bool, 0444);
253 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
255 struct pxa3xx_nand_timing {
256 unsigned int tCH; /* Enable signal hold time */
257 unsigned int tCS; /* Enable signal setup time */
258 unsigned int tWH; /* ND_nWE high duration */
259 unsigned int tWP; /* ND_nWE pulse time */
260 unsigned int tRH; /* ND_nRE high duration */
261 unsigned int tRP; /* ND_nRE pulse width */
262 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
263 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
264 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
267 struct pxa3xx_nand_flash {
270 unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
271 unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
272 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
273 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
274 unsigned int num_blocks; /* Number of physical blocks in Flash */
276 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
279 static struct pxa3xx_nand_timing timing[] = {
280 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
281 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
282 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
283 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
286 static struct pxa3xx_nand_flash builtin_flash_types[] = {
287 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
288 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
289 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
290 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
291 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
292 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
293 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
294 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
295 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
298 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
299 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301 static struct nand_bbt_descr bbt_main_descr = {
302 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
303 | NAND_BBT_2BIT | NAND_BBT_VERSION,
307 .maxblocks = 8, /* Last 8 blocks in each chip */
308 .pattern = bbt_pattern
311 static struct nand_bbt_descr bbt_mirror_descr = {
312 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313 | NAND_BBT_2BIT | NAND_BBT_VERSION,
317 .maxblocks = 8, /* Last 8 blocks in each chip */
318 .pattern = bbt_mirror_pattern
321 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
324 32, 33, 34, 35, 36, 37, 38, 39,
325 40, 41, 42, 43, 44, 45, 46, 47,
326 48, 49, 50, 51, 52, 53, 54, 55,
327 56, 57, 58, 59, 60, 61, 62, 63},
328 .oobfree = { {2, 30} }
331 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
334 32, 33, 34, 35, 36, 37, 38, 39,
335 40, 41, 42, 43, 44, 45, 46, 47,
336 48, 49, 50, 51, 52, 53, 54, 55,
337 56, 57, 58, 59, 60, 61, 62, 63,
338 96, 97, 98, 99, 100, 101, 102, 103,
339 104, 105, 106, 107, 108, 109, 110, 111,
340 112, 113, 114, 115, 116, 117, 118, 119,
341 120, 121, 122, 123, 124, 125, 126, 127},
342 /* Bootrom looks in bytes 0 & 5 for bad blocks */
343 .oobfree = { {6, 26}, { 64, 32} }
346 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
349 32, 33, 34, 35, 36, 37, 38, 39,
350 40, 41, 42, 43, 44, 45, 46, 47,
351 48, 49, 50, 51, 52, 53, 54, 55,
352 56, 57, 58, 59, 60, 61, 62, 63},
356 /* Define a default flash type setting serve as flash detecting only */
357 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
359 #define NDTR0_tCH(c) (min((c), 7) << 19)
360 #define NDTR0_tCS(c) (min((c), 7) << 16)
361 #define NDTR0_tWH(c) (min((c), 7) << 11)
362 #define NDTR0_tWP(c) (min((c), 7) << 8)
363 #define NDTR0_tRH(c) (min((c), 7) << 3)
364 #define NDTR0_tRP(c) (min((c), 7) << 0)
366 #define NDTR1_tR(c) (min((c), 65535) << 16)
367 #define NDTR1_tWHR(c) (min((c), 15) << 4)
368 #define NDTR1_tAR(c) (min((c), 15) << 0)
370 /* convert nano-seconds to nand flash controller clock cycles */
371 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
373 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
375 .compatible = "marvell,pxa3xx-nand",
376 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
379 .compatible = "marvell,armada370-nand",
380 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
384 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
386 static enum pxa3xx_nand_variant
387 pxa3xx_nand_get_variant(struct platform_device *pdev)
389 const struct of_device_id *of_id =
390 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
392 return PXA3XX_NAND_VARIANT_PXA;
393 return (enum pxa3xx_nand_variant)of_id->data;
396 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
397 const struct pxa3xx_nand_timing *t)
399 struct pxa3xx_nand_info *info = host->info_data;
400 unsigned long nand_clk = clk_get_rate(info->clk);
401 uint32_t ndtr0, ndtr1;
403 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
404 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
405 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
406 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
407 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
408 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
410 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
411 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
412 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
414 info->ndtr0cs0 = ndtr0;
415 info->ndtr1cs0 = ndtr1;
416 nand_writel(info, NDTR0CS0, ndtr0);
417 nand_writel(info, NDTR1CS0, ndtr1);
421 * Set the data and OOB size, depending on the selected
422 * spare and ECC configuration.
423 * Only applicable to READ0, READOOB and PAGEPROG commands.
425 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
426 struct mtd_info *mtd)
428 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
430 info->data_size = mtd->writesize;
434 info->oob_size = info->spare_size;
436 info->oob_size += info->ecc_size;
440 * NOTE: it is a must to set ND_RUN firstly, then write
441 * command buffer, otherwise, it does not work.
442 * We enable all the interrupt at the same time, and
443 * let pxa3xx_nand_irq to handle all logic.
445 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
449 ndcr = info->reg_ndcr;
454 nand_writel(info, NDECCCTRL, 0x1);
456 ndcr &= ~NDCR_ECC_EN;
458 nand_writel(info, NDECCCTRL, 0x0);
464 ndcr &= ~NDCR_DMA_EN;
467 ndcr |= NDCR_SPARE_EN;
469 ndcr &= ~NDCR_SPARE_EN;
473 /* clear status bits and run */
474 nand_writel(info, NDSR, NDSR_MASK);
475 nand_writel(info, NDCR, 0);
476 nand_writel(info, NDCR, ndcr);
479 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
482 int timeout = NAND_STOP_DELAY;
484 /* wait RUN bit in NDCR become 0 */
485 ndcr = nand_readl(info, NDCR);
486 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
487 ndcr = nand_readl(info, NDCR);
492 ndcr &= ~NDCR_ND_RUN;
493 nand_writel(info, NDCR, ndcr);
495 /* clear status bits */
496 nand_writel(info, NDSR, NDSR_MASK);
499 static void __maybe_unused
500 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
504 ndcr = nand_readl(info, NDCR);
505 nand_writel(info, NDCR, ndcr & ~int_mask);
508 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
512 ndcr = nand_readl(info, NDCR);
513 nand_writel(info, NDCR, ndcr | int_mask);
516 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
523 * According to the datasheet, when reading from NDDB
524 * with BCH enabled, after each 32 bytes reads, we
525 * have to make sure that the NDSR.RDDREQ bit is set.
527 * Drain the FIFO 8 32 bits reads at a time, and skip
528 * the polling on the last read.
531 readsl(info->mmio_base + NDDB, data, 8);
533 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
534 val & NDSR_RDDREQ, 1000, 5000);
536 dev_err(&info->pdev->dev,
537 "Timeout on RDDREQ while draining the FIFO\n");
546 readsl(info->mmio_base + NDDB, data, len);
549 static void handle_data_pio(struct pxa3xx_nand_info *info)
551 unsigned int do_bytes = min(info->data_size, info->chunk_size);
553 switch (info->state) {
554 case STATE_PIO_WRITING:
555 writesl(info->mmio_base + NDDB,
556 info->data_buff + info->data_buff_pos,
557 DIV_ROUND_UP(do_bytes, 4));
559 if (info->oob_size > 0)
560 writesl(info->mmio_base + NDDB,
561 info->oob_buff + info->oob_buff_pos,
562 DIV_ROUND_UP(info->oob_size, 4));
564 case STATE_PIO_READING:
566 info->data_buff + info->data_buff_pos,
567 DIV_ROUND_UP(do_bytes, 4));
569 if (info->oob_size > 0)
571 info->oob_buff + info->oob_buff_pos,
572 DIV_ROUND_UP(info->oob_size, 4));
575 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
580 /* Update buffer pointers for multi-page read/write */
581 info->data_buff_pos += do_bytes;
582 info->oob_buff_pos += info->oob_size;
583 info->data_size -= do_bytes;
587 static void start_data_dma(struct pxa3xx_nand_info *info)
589 struct pxa_dma_desc *desc = info->data_desc;
590 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
592 desc->ddadr = DDADR_STOP;
593 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
595 switch (info->state) {
596 case STATE_DMA_WRITING:
597 desc->dsadr = info->data_buff_phys;
598 desc->dtadr = info->mmio_phys + NDDB;
599 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
601 case STATE_DMA_READING:
602 desc->dtadr = info->data_buff_phys;
603 desc->dsadr = info->mmio_phys + NDDB;
604 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
607 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
612 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
613 DDADR(info->data_dma_ch) = info->data_desc_addr;
614 DCSR(info->data_dma_ch) |= DCSR_RUN;
617 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
619 struct pxa3xx_nand_info *info = data;
622 dcsr = DCSR(channel);
623 DCSR(channel) = dcsr;
625 if (dcsr & DCSR_BUSERR) {
626 info->retcode = ERR_DMABUSERR;
629 info->state = STATE_DMA_DONE;
630 enable_int(info, NDCR_INT_MASK);
631 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
634 static void start_data_dma(struct pxa3xx_nand_info *info)
638 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
640 struct pxa3xx_nand_info *info = data;
642 handle_data_pio(info);
644 info->state = STATE_CMD_DONE;
645 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
650 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
652 struct pxa3xx_nand_info *info = devid;
653 unsigned int status, is_completed = 0, is_ready = 0;
654 unsigned int ready, cmd_done;
655 irqreturn_t ret = IRQ_HANDLED;
658 ready = NDSR_FLASH_RDY;
659 cmd_done = NDSR_CS0_CMDD;
662 cmd_done = NDSR_CS1_CMDD;
665 status = nand_readl(info, NDSR);
667 if (status & NDSR_UNCORERR)
668 info->retcode = ERR_UNCORERR;
669 if (status & NDSR_CORERR) {
670 info->retcode = ERR_CORERR;
671 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
673 info->ecc_err_cnt = NDSR_ERR_CNT(status);
675 info->ecc_err_cnt = 1;
678 * Each chunk composing a page is corrected independently,
679 * and we need to store maximum number of corrected bitflips
680 * to return it to the MTD layer in ecc.read_page().
682 info->max_bitflips = max_t(unsigned int,
686 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
687 /* whether use dma to transfer data */
689 disable_int(info, NDCR_INT_MASK);
690 info->state = (status & NDSR_RDDREQ) ?
691 STATE_DMA_READING : STATE_DMA_WRITING;
692 start_data_dma(info);
693 goto NORMAL_IRQ_EXIT;
695 info->state = (status & NDSR_RDDREQ) ?
696 STATE_PIO_READING : STATE_PIO_WRITING;
697 ret = IRQ_WAKE_THREAD;
698 goto NORMAL_IRQ_EXIT;
701 if (status & cmd_done) {
702 info->state = STATE_CMD_DONE;
705 if (status & ready) {
706 info->state = STATE_READY;
711 * Clear all status bit before issuing the next command, which
712 * can and will alter the status bits and will deserve a new
713 * interrupt on its own. This lets the controller exit the IRQ
715 nand_writel(info, NDSR, status);
717 if (status & NDSR_WRCMDREQ) {
718 status &= ~NDSR_WRCMDREQ;
719 info->state = STATE_CMD_HANDLE;
722 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
723 * must be loaded by writing directly either 12 or 16
724 * bytes directly to NDCB0, four bytes at a time.
726 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
727 * but each NDCBx register can be read.
729 nand_writel(info, NDCB0, info->ndcb0);
730 nand_writel(info, NDCB0, info->ndcb1);
731 nand_writel(info, NDCB0, info->ndcb2);
733 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
734 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
735 nand_writel(info, NDCB0, info->ndcb3);
739 complete(&info->cmd_complete);
741 complete(&info->dev_ready);
746 static inline int is_buf_blank(uint8_t *buf, size_t len)
748 for (; len > 0; len--)
754 static void set_command_address(struct pxa3xx_nand_info *info,
755 unsigned int page_size, uint16_t column, int page_addr)
757 /* small page addr setting */
758 if (page_size < PAGE_CHUNK_SIZE) {
759 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
764 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
767 if (page_addr & 0xFF0000)
768 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
774 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
776 struct pxa3xx_nand_host *host = info->host[info->cs];
777 struct mtd_info *mtd = host->mtd;
779 /* reset data and oob column point to handle data */
783 info->data_buff_pos = 0;
784 info->oob_buff_pos = 0;
787 info->retcode = ERR_NONE;
788 info->ecc_err_cnt = 0;
794 case NAND_CMD_PAGEPROG:
796 case NAND_CMD_READOOB:
797 pxa3xx_set_datasize(info, mtd);
809 * If we are about to issue a read command, or about to set
810 * the write address, then clean the data buffer.
812 if (command == NAND_CMD_READ0 ||
813 command == NAND_CMD_READOOB ||
814 command == NAND_CMD_SEQIN) {
816 info->buf_count = mtd->writesize + mtd->oobsize;
817 memset(info->data_buff, 0xFF, info->buf_count);
822 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
823 int ext_cmd_type, uint16_t column, int page_addr)
825 int addr_cycle, exec_cmd;
826 struct pxa3xx_nand_host *host;
827 struct mtd_info *mtd;
829 host = info->host[info->cs];
835 info->ndcb0 = NDCB0_CSEL;
839 if (command == NAND_CMD_SEQIN)
842 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
843 + host->col_addr_cycles);
846 case NAND_CMD_READOOB:
848 info->buf_start = column;
849 info->ndcb0 |= NDCB0_CMD_TYPE(0)
853 if (command == NAND_CMD_READOOB)
854 info->buf_start += mtd->writesize;
857 * Multiple page read needs an 'extended command type' field,
858 * which is either naked-read or last-read according to the
861 if (mtd->writesize == PAGE_CHUNK_SIZE) {
862 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
863 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
864 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
866 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
867 info->ndcb3 = info->chunk_size +
871 set_command_address(info, mtd->writesize, column, page_addr);
876 info->buf_start = column;
877 set_command_address(info, mtd->writesize, 0, page_addr);
880 * Multiple page programming needs to execute the initial
881 * SEQIN command that sets the page address.
883 if (mtd->writesize > PAGE_CHUNK_SIZE) {
884 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
885 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
888 /* No data transfer in this case */
894 case NAND_CMD_PAGEPROG:
895 if (is_buf_blank(info->data_buff,
896 (mtd->writesize + mtd->oobsize))) {
901 /* Second command setting for large pages */
902 if (mtd->writesize > PAGE_CHUNK_SIZE) {
904 * Multiple page write uses the 'extended command'
905 * field. This can be used to issue a command dispatch
906 * or a naked-write depending on the current stage.
908 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
910 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
911 info->ndcb3 = info->chunk_size +
915 * This is the command dispatch that completes a chunked
916 * page program operation.
918 if (info->data_size == 0) {
919 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
920 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
927 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
931 | (NAND_CMD_PAGEPROG << 8)
938 info->buf_count = INIT_BUFFER_SIZE;
939 info->ndcb0 |= NDCB0_CMD_TYPE(0)
943 info->ndcb1 = (column & 0xFF);
944 info->ndcb3 = INIT_BUFFER_SIZE;
945 info->data_size = INIT_BUFFER_SIZE;
948 case NAND_CMD_READID:
949 info->buf_count = READ_ID_BYTES;
950 info->ndcb0 |= NDCB0_CMD_TYPE(3)
953 info->ndcb1 = (column & 0xFF);
957 case NAND_CMD_STATUS:
959 info->ndcb0 |= NDCB0_CMD_TYPE(4)
966 case NAND_CMD_ERASE1:
967 info->ndcb0 |= NDCB0_CMD_TYPE(2)
971 | (NAND_CMD_ERASE2 << 8)
973 info->ndcb1 = page_addr;
978 info->ndcb0 |= NDCB0_CMD_TYPE(5)
983 case NAND_CMD_ERASE2:
989 dev_err(&info->pdev->dev, "non-supported command %x\n",
997 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
998 int column, int page_addr)
1000 struct pxa3xx_nand_host *host = mtd->priv;
1001 struct pxa3xx_nand_info *info = host->info_data;
1005 * if this is a x16 device ,then convert the input
1006 * "byte" address into a "word" address appropriate
1007 * for indexing a word-oriented device
1009 if (info->reg_ndcr & NDCR_DWIDTH_M)
1013 * There may be different NAND chip hooked to
1014 * different chip select, so check whether
1015 * chip select has been changed, if yes, reset the timing
1017 if (info->cs != host->cs) {
1018 info->cs = host->cs;
1019 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1020 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1023 prepare_start_command(info, command);
1025 info->state = STATE_PREPARED;
1026 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1029 init_completion(&info->cmd_complete);
1030 init_completion(&info->dev_ready);
1031 info->need_wait = 1;
1032 pxa3xx_nand_start(info);
1034 if (!wait_for_completion_timeout(&info->cmd_complete,
1035 CHIP_DELAY_TIMEOUT)) {
1036 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1037 /* Stop State Machine for next command cycle */
1038 pxa3xx_nand_stop(info);
1041 info->state = STATE_IDLE;
1044 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1045 const unsigned command,
1046 int column, int page_addr)
1048 struct pxa3xx_nand_host *host = mtd->priv;
1049 struct pxa3xx_nand_info *info = host->info_data;
1050 int exec_cmd, ext_cmd_type;
1053 * if this is a x16 device then convert the input
1054 * "byte" address into a "word" address appropriate
1055 * for indexing a word-oriented device
1057 if (info->reg_ndcr & NDCR_DWIDTH_M)
1061 * There may be different NAND chip hooked to
1062 * different chip select, so check whether
1063 * chip select has been changed, if yes, reset the timing
1065 if (info->cs != host->cs) {
1066 info->cs = host->cs;
1067 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1068 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1071 /* Select the extended command for the first command */
1073 case NAND_CMD_READ0:
1074 case NAND_CMD_READOOB:
1075 ext_cmd_type = EXT_CMD_TYPE_MONO;
1077 case NAND_CMD_SEQIN:
1078 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1080 case NAND_CMD_PAGEPROG:
1081 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1088 prepare_start_command(info, command);
1091 * Prepare the "is ready" completion before starting a command
1092 * transaction sequence. If the command is not executed the
1093 * completion will be completed, see below.
1095 * We can do that inside the loop because the command variable
1096 * is invariant and thus so is the exec_cmd.
1098 info->need_wait = 1;
1099 init_completion(&info->dev_ready);
1101 info->state = STATE_PREPARED;
1102 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1105 info->need_wait = 0;
1106 complete(&info->dev_ready);
1110 init_completion(&info->cmd_complete);
1111 pxa3xx_nand_start(info);
1113 if (!wait_for_completion_timeout(&info->cmd_complete,
1114 CHIP_DELAY_TIMEOUT)) {
1115 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1116 /* Stop State Machine for next command cycle */
1117 pxa3xx_nand_stop(info);
1121 /* Check if the sequence is complete */
1122 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1126 * After a splitted program command sequence has issued
1127 * the command dispatch, the command sequence is complete.
1129 if (info->data_size == 0 &&
1130 command == NAND_CMD_PAGEPROG &&
1131 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1134 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1135 /* Last read: issue a 'last naked read' */
1136 if (info->data_size == info->chunk_size)
1137 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1139 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1142 * If a splitted program command has no more data to transfer,
1143 * the command dispatch must be issued to complete.
1145 } else if (command == NAND_CMD_PAGEPROG &&
1146 info->data_size == 0) {
1147 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1151 info->state = STATE_IDLE;
1154 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1155 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1157 chip->write_buf(mtd, buf, mtd->writesize);
1158 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1163 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1164 struct nand_chip *chip, uint8_t *buf, int oob_required,
1167 struct pxa3xx_nand_host *host = mtd->priv;
1168 struct pxa3xx_nand_info *info = host->info_data;
1170 chip->read_buf(mtd, buf, mtd->writesize);
1171 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1173 if (info->retcode == ERR_CORERR && info->use_ecc) {
1174 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1176 } else if (info->retcode == ERR_UNCORERR) {
1178 * for blank page (all 0xff), HW will calculate its ECC as
1179 * 0, which is different from the ECC information within
1180 * OOB, ignore such uncorrectable errors
1182 if (is_buf_blank(buf, mtd->writesize))
1183 info->retcode = ERR_NONE;
1185 mtd->ecc_stats.failed++;
1188 return info->max_bitflips;
1191 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1193 struct pxa3xx_nand_host *host = mtd->priv;
1194 struct pxa3xx_nand_info *info = host->info_data;
1197 if (info->buf_start < info->buf_count)
1198 /* Has just send a new command? */
1199 retval = info->data_buff[info->buf_start++];
1204 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1206 struct pxa3xx_nand_host *host = mtd->priv;
1207 struct pxa3xx_nand_info *info = host->info_data;
1208 u16 retval = 0xFFFF;
1210 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1211 retval = *((u16 *)(info->data_buff+info->buf_start));
1212 info->buf_start += 2;
1217 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1219 struct pxa3xx_nand_host *host = mtd->priv;
1220 struct pxa3xx_nand_info *info = host->info_data;
1221 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1223 memcpy(buf, info->data_buff + info->buf_start, real_len);
1224 info->buf_start += real_len;
1227 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1228 const uint8_t *buf, int len)
1230 struct pxa3xx_nand_host *host = mtd->priv;
1231 struct pxa3xx_nand_info *info = host->info_data;
1232 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1234 memcpy(info->data_buff + info->buf_start, buf, real_len);
1235 info->buf_start += real_len;
1238 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1243 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1245 struct pxa3xx_nand_host *host = mtd->priv;
1246 struct pxa3xx_nand_info *info = host->info_data;
1248 if (info->need_wait) {
1249 info->need_wait = 0;
1250 if (!wait_for_completion_timeout(&info->dev_ready,
1251 CHIP_DELAY_TIMEOUT)) {
1252 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1253 return NAND_STATUS_FAIL;
1257 /* pxa3xx_nand_send_command has waited for command complete */
1258 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1259 if (info->retcode == ERR_NONE)
1262 return NAND_STATUS_FAIL;
1265 return NAND_STATUS_READY;
1268 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1269 const struct pxa3xx_nand_flash *f)
1271 struct platform_device *pdev = info->pdev;
1272 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1273 struct pxa3xx_nand_host *host = info->host[info->cs];
1274 uint32_t ndcr = 0x0; /* enable all interrupts */
1276 if (f->page_size != 2048 && f->page_size != 512) {
1277 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1281 if (f->flash_width != 16 && f->flash_width != 8) {
1282 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1286 /* calculate addressing information */
1287 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1289 if (f->num_blocks * f->page_per_block > 65536)
1290 host->row_addr_cycles = 3;
1292 host->row_addr_cycles = 2;
1294 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1295 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1296 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1297 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1298 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1299 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1301 ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1302 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1304 info->reg_ndcr = ndcr;
1306 pxa3xx_nand_set_timing(host, f->timing);
1310 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1312 uint32_t ndcr = nand_readl(info, NDCR);
1314 /* Set an initial chunk size */
1315 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1316 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1317 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1318 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1323 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1325 struct platform_device *pdev = info->pdev;
1326 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1329 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1330 if (info->data_buff == NULL)
1335 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1336 &info->data_buff_phys, GFP_KERNEL);
1337 if (info->data_buff == NULL) {
1338 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1342 info->data_desc = (void *)info->data_buff + data_desc_offset;
1343 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1345 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1346 pxa3xx_nand_data_dma_irq, info);
1347 if (info->data_dma_ch < 0) {
1348 dev_err(&pdev->dev, "failed to request data dma\n");
1349 dma_free_coherent(&pdev->dev, info->buf_size,
1350 info->data_buff, info->data_buff_phys);
1351 return info->data_dma_ch;
1355 * Now that DMA buffers are allocated we turn on
1356 * DMA proper for I/O operations.
1362 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1364 struct platform_device *pdev = info->pdev;
1365 if (info->use_dma) {
1366 pxa_free_dma(info->data_dma_ch);
1367 dma_free_coherent(&pdev->dev, info->buf_size,
1368 info->data_buff, info->data_buff_phys);
1370 kfree(info->data_buff);
1374 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1376 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1377 if (info->data_buff == NULL)
1382 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1384 kfree(info->data_buff);
1388 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1390 struct mtd_info *mtd;
1391 struct nand_chip *chip;
1394 mtd = info->host[info->cs]->mtd;
1397 /* use the common timing to make a try */
1398 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1402 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1403 ret = chip->waitfunc(mtd, chip);
1404 if (ret & NAND_STATUS_FAIL)
1410 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1411 struct nand_ecc_ctrl *ecc,
1412 int strength, int ecc_stepsize, int page_size)
1414 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1415 info->chunk_size = 2048;
1416 info->spare_size = 40;
1417 info->ecc_size = 24;
1418 ecc->mode = NAND_ECC_HW;
1422 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1423 info->chunk_size = 512;
1424 info->spare_size = 8;
1426 ecc->mode = NAND_ECC_HW;
1431 * Required ECC: 4-bit correction per 512 bytes
1432 * Select: 16-bit correction per 2048 bytes
1434 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1436 info->chunk_size = 2048;
1437 info->spare_size = 32;
1438 info->ecc_size = 32;
1439 ecc->mode = NAND_ECC_HW;
1440 ecc->size = info->chunk_size;
1441 ecc->layout = &ecc_layout_2KB_bch4bit;
1444 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1446 info->chunk_size = 2048;
1447 info->spare_size = 32;
1448 info->ecc_size = 32;
1449 ecc->mode = NAND_ECC_HW;
1450 ecc->size = info->chunk_size;
1451 ecc->layout = &ecc_layout_4KB_bch4bit;
1455 * Required ECC: 8-bit correction per 512 bytes
1456 * Select: 16-bit correction per 1024 bytes
1458 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1460 info->chunk_size = 1024;
1461 info->spare_size = 0;
1462 info->ecc_size = 32;
1463 ecc->mode = NAND_ECC_HW;
1464 ecc->size = info->chunk_size;
1465 ecc->layout = &ecc_layout_4KB_bch8bit;
1468 dev_err(&info->pdev->dev,
1469 "ECC strength %d at page size %d is not supported\n",
1470 strength, page_size);
1474 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1475 ecc->strength, ecc->size);
1479 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1481 struct pxa3xx_nand_host *host = mtd->priv;
1482 struct pxa3xx_nand_info *info = host->info_data;
1483 struct platform_device *pdev = info->pdev;
1484 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1485 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1486 const struct pxa3xx_nand_flash *f = NULL;
1487 struct nand_chip *chip = mtd->priv;
1491 uint16_t ecc_strength, ecc_step;
1493 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1496 /* Set a default chunk size */
1497 info->chunk_size = 512;
1499 ret = pxa3xx_nand_sensing(info);
1501 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1507 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1508 id = *((uint16_t *)(info->data_buff));
1510 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1512 dev_warn(&info->pdev->dev,
1513 "Read out ID 0, potential timing set wrong!!\n");
1518 num = ARRAY_SIZE(builtin_flash_types) - 1;
1519 for (i = 0; i < num; i++) {
1520 f = &builtin_flash_types[i + 1];
1522 /* find the chip in default list */
1523 if (f->chip_id == id)
1527 if (i >= (ARRAY_SIZE(builtin_flash_types) - 1)) {
1528 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1533 ret = pxa3xx_nand_config_flash(info, f);
1535 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1539 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1541 pxa3xx_flash_ids[0].name = f->name;
1542 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1543 pxa3xx_flash_ids[0].pagesize = f->page_size;
1544 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1545 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1546 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1547 if (f->flash_width == 16)
1548 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1549 pxa3xx_flash_ids[1].name = NULL;
1550 def = pxa3xx_flash_ids;
1552 if (info->reg_ndcr & NDCR_DWIDTH_M)
1553 chip->options |= NAND_BUSWIDTH_16;
1555 /* Device detection must be done with ECC disabled */
1556 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1557 nand_writel(info, NDECCCTRL, 0x0);
1559 if (nand_scan_ident(mtd, 1, def))
1562 if (pdata->flash_bbt) {
1564 * We'll use a bad block table stored in-flash and don't
1565 * allow writing the bad block marker to the flash.
1567 chip->bbt_options |= NAND_BBT_USE_FLASH |
1568 NAND_BBT_NO_OOB_BBM;
1569 chip->bbt_td = &bbt_main_descr;
1570 chip->bbt_md = &bbt_mirror_descr;
1574 * If the page size is bigger than the FIFO size, let's check
1575 * we are given the right variant and then switch to the extended
1576 * (aka splitted) command handling,
1578 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1579 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1580 chip->cmdfunc = nand_cmdfunc_extended;
1582 dev_err(&info->pdev->dev,
1583 "unsupported page size on this variant\n");
1588 if (pdata->ecc_strength && pdata->ecc_step_size) {
1589 ecc_strength = pdata->ecc_strength;
1590 ecc_step = pdata->ecc_step_size;
1592 ecc_strength = chip->ecc_strength_ds;
1593 ecc_step = chip->ecc_step_ds;
1596 /* Set default ECC strength requirements on non-ONFI devices */
1597 if (ecc_strength < 1 && ecc_step < 1) {
1602 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1603 ecc_step, mtd->writesize);
1607 /* calculate addressing information */
1608 if (mtd->writesize >= 2048)
1609 host->col_addr_cycles = 2;
1611 host->col_addr_cycles = 1;
1613 /* release the initial buffer */
1614 kfree(info->data_buff);
1616 /* allocate the real data + oob buffer */
1617 info->buf_size = mtd->writesize + mtd->oobsize;
1618 ret = pxa3xx_nand_init_buff(info);
1621 info->oob_buff = info->data_buff + mtd->writesize;
1623 if ((mtd->size >> chip->page_shift) > 65536)
1624 host->row_addr_cycles = 3;
1626 host->row_addr_cycles = 2;
1627 return nand_scan_tail(mtd);
1630 static int alloc_nand_resource(struct platform_device *pdev)
1632 struct pxa3xx_nand_platform_data *pdata;
1633 struct pxa3xx_nand_info *info;
1634 struct pxa3xx_nand_host *host;
1635 struct nand_chip *chip = NULL;
1636 struct mtd_info *mtd;
1640 pdata = dev_get_platdata(&pdev->dev);
1641 if (pdata->num_cs <= 0)
1643 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1644 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1649 info->variant = pxa3xx_nand_get_variant(pdev);
1650 for (cs = 0; cs < pdata->num_cs; cs++) {
1651 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1652 chip = (struct nand_chip *)(&mtd[1]);
1653 host = (struct pxa3xx_nand_host *)chip;
1654 info->host[cs] = host;
1657 host->info_data = info;
1659 mtd->owner = THIS_MODULE;
1661 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1662 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1663 chip->controller = &info->controller;
1664 chip->waitfunc = pxa3xx_nand_waitfunc;
1665 chip->select_chip = pxa3xx_nand_select_chip;
1666 chip->read_word = pxa3xx_nand_read_word;
1667 chip->read_byte = pxa3xx_nand_read_byte;
1668 chip->read_buf = pxa3xx_nand_read_buf;
1669 chip->write_buf = pxa3xx_nand_write_buf;
1670 chip->options |= NAND_NO_SUBPAGE_WRITE;
1671 chip->cmdfunc = nand_cmdfunc;
1674 spin_lock_init(&chip->controller->lock);
1675 init_waitqueue_head(&chip->controller->wq);
1676 info->clk = devm_clk_get(&pdev->dev, NULL);
1677 if (IS_ERR(info->clk)) {
1678 dev_err(&pdev->dev, "failed to get nand clock\n");
1679 return PTR_ERR(info->clk);
1681 ret = clk_prepare_enable(info->clk);
1687 * This is a dirty hack to make this driver work from
1688 * devicetree bindings. It can be removed once we have
1689 * a prober DMA controller framework for DT.
1691 if (pdev->dev.of_node &&
1692 of_machine_is_compatible("marvell,pxa3xx")) {
1693 info->drcmr_dat = 97;
1694 info->drcmr_cmd = 99;
1696 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1699 "no resource defined for data DMA\n");
1701 goto fail_disable_clk;
1703 info->drcmr_dat = r->start;
1705 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1708 "no resource defined for cmd DMA\n");
1710 goto fail_disable_clk;
1712 info->drcmr_cmd = r->start;
1716 irq = platform_get_irq(pdev, 0);
1718 dev_err(&pdev->dev, "no IRQ resource defined\n");
1720 goto fail_disable_clk;
1723 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1724 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1725 if (IS_ERR(info->mmio_base)) {
1726 ret = PTR_ERR(info->mmio_base);
1727 goto fail_disable_clk;
1729 info->mmio_phys = r->start;
1731 /* Allocate a buffer to allow flash detection */
1732 info->buf_size = INIT_BUFFER_SIZE;
1733 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1734 if (info->data_buff == NULL) {
1736 goto fail_disable_clk;
1739 /* initialize all interrupts to be disabled */
1740 disable_int(info, NDSR_MASK);
1742 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1743 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1746 dev_err(&pdev->dev, "failed to request IRQ\n");
1750 platform_set_drvdata(pdev, info);
1755 free_irq(irq, info);
1756 kfree(info->data_buff);
1758 clk_disable_unprepare(info->clk);
1762 static int pxa3xx_nand_remove(struct platform_device *pdev)
1764 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1765 struct pxa3xx_nand_platform_data *pdata;
1771 pdata = dev_get_platdata(&pdev->dev);
1773 irq = platform_get_irq(pdev, 0);
1775 free_irq(irq, info);
1776 pxa3xx_nand_free_buff(info);
1778 clk_disable_unprepare(info->clk);
1780 for (cs = 0; cs < pdata->num_cs; cs++)
1781 nand_release(info->host[cs]->mtd);
1785 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1787 struct pxa3xx_nand_platform_data *pdata;
1788 struct device_node *np = pdev->dev.of_node;
1789 const struct of_device_id *of_id =
1790 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1795 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1799 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1800 pdata->enable_arbiter = 1;
1801 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1802 pdata->keep_config = 1;
1803 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1804 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1806 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1807 if (pdata->ecc_strength < 0)
1808 pdata->ecc_strength = 0;
1810 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1811 if (pdata->ecc_step_size < 0)
1812 pdata->ecc_step_size = 0;
1814 pdev->dev.platform_data = pdata;
1819 static int pxa3xx_nand_probe(struct platform_device *pdev)
1821 struct pxa3xx_nand_platform_data *pdata;
1822 struct mtd_part_parser_data ppdata = {};
1823 struct pxa3xx_nand_info *info;
1824 int ret, cs, probe_success;
1826 #ifndef ARCH_HAS_DMA
1829 dev_warn(&pdev->dev,
1830 "This platform can't do DMA on this device\n");
1833 ret = pxa3xx_nand_probe_dt(pdev);
1837 pdata = dev_get_platdata(&pdev->dev);
1839 dev_err(&pdev->dev, "no platform data defined\n");
1843 ret = alloc_nand_resource(pdev);
1845 dev_err(&pdev->dev, "alloc nand resource failed\n");
1849 info = platform_get_drvdata(pdev);
1851 for (cs = 0; cs < pdata->num_cs; cs++) {
1852 struct mtd_info *mtd = info->host[cs]->mtd;
1855 * The mtd name matches the one used in 'mtdparts' kernel
1856 * parameter. This name cannot be changed or otherwise
1857 * user's mtd partitions configuration would get broken.
1859 mtd->name = "pxa3xx_nand-0";
1861 ret = pxa3xx_nand_scan(mtd);
1863 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1868 ppdata.of_node = pdev->dev.of_node;
1869 ret = mtd_device_parse_register(mtd, NULL,
1870 &ppdata, pdata->parts[cs],
1871 pdata->nr_parts[cs]);
1876 if (!probe_success) {
1877 pxa3xx_nand_remove(pdev);
1885 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1887 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1888 struct pxa3xx_nand_platform_data *pdata;
1889 struct mtd_info *mtd;
1892 pdata = dev_get_platdata(&pdev->dev);
1894 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1898 for (cs = 0; cs < pdata->num_cs; cs++) {
1899 mtd = info->host[cs]->mtd;
1906 static int pxa3xx_nand_resume(struct platform_device *pdev)
1908 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1909 struct pxa3xx_nand_platform_data *pdata;
1910 struct mtd_info *mtd;
1913 pdata = dev_get_platdata(&pdev->dev);
1914 /* We don't want to handle interrupt without calling mtd routine */
1915 disable_int(info, NDCR_INT_MASK);
1918 * Directly set the chip select to a invalid value,
1919 * then the driver would reset the timing according
1920 * to current chip select at the beginning of cmdfunc
1925 * As the spec says, the NDSR would be updated to 0x1800 when
1926 * doing the nand_clk disable/enable.
1927 * To prevent it damaging state machine of the driver, clear
1928 * all status before resume
1930 nand_writel(info, NDSR, NDSR_MASK);
1931 for (cs = 0; cs < pdata->num_cs; cs++) {
1932 mtd = info->host[cs]->mtd;
1939 #define pxa3xx_nand_suspend NULL
1940 #define pxa3xx_nand_resume NULL
1943 static struct platform_driver pxa3xx_nand_driver = {
1945 .name = "pxa3xx-nand",
1946 .of_match_table = pxa3xx_nand_dt_ids,
1948 .probe = pxa3xx_nand_probe,
1949 .remove = pxa3xx_nand_remove,
1950 .suspend = pxa3xx_nand_suspend,
1951 .resume = pxa3xx_nand_resume,
1954 module_platform_driver(pxa3xx_nand_driver);
1956 MODULE_LICENSE("GPL");
1957 MODULE_DESCRIPTION("PXA3xx NAND controller driver");