2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
42 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE (2048)
47 * Define a buffer size for the initial command that detects the flash device:
48 * STATUS, READID and PARAM.
49 * ONFI param page is 256 bytes, and there are three redundant copies
50 * to be read. JEDEC param page is 512 bytes, and there are also three
51 * redundant copies to be read.
52 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
54 #define INIT_BUFFER_SIZE 2048
56 /* registers and bit definitions */
57 #define NDCR (0x00) /* Control register */
58 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
59 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
60 #define NDSR (0x14) /* Status Register */
61 #define NDPCR (0x18) /* Page Count Register */
62 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
63 #define NDBDR1 (0x20) /* Bad Block Register 1 */
64 #define NDECCCTRL (0x28) /* ECC control */
65 #define NDDB (0x40) /* Data Buffer */
66 #define NDCB0 (0x48) /* Command Buffer0 */
67 #define NDCB1 (0x4C) /* Command Buffer1 */
68 #define NDCB2 (0x50) /* Command Buffer2 */
70 #define NDCR_SPARE_EN (0x1 << 31)
71 #define NDCR_ECC_EN (0x1 << 30)
72 #define NDCR_DMA_EN (0x1 << 29)
73 #define NDCR_ND_RUN (0x1 << 28)
74 #define NDCR_DWIDTH_C (0x1 << 27)
75 #define NDCR_DWIDTH_M (0x1 << 26)
76 #define NDCR_PAGE_SZ (0x1 << 24)
77 #define NDCR_NCSX (0x1 << 23)
78 #define NDCR_ND_MODE (0x3 << 21)
79 #define NDCR_NAND_MODE (0x0)
80 #define NDCR_CLR_PG_CNT (0x1 << 20)
81 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
82 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
83 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
85 #define NDCR_RA_START (0x1 << 15)
86 #define NDCR_PG_PER_BLK (0x1 << 14)
87 #define NDCR_ND_ARB_EN (0x1 << 12)
88 #define NDCR_INT_MASK (0xFFF)
90 #define NDSR_MASK (0xfff)
91 #define NDSR_ERR_CNT_OFF (16)
92 #define NDSR_ERR_CNT_MASK (0x1f)
93 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
94 #define NDSR_RDY (0x1 << 12)
95 #define NDSR_FLASH_RDY (0x1 << 11)
96 #define NDSR_CS0_PAGED (0x1 << 10)
97 #define NDSR_CS1_PAGED (0x1 << 9)
98 #define NDSR_CS0_CMDD (0x1 << 8)
99 #define NDSR_CS1_CMDD (0x1 << 7)
100 #define NDSR_CS0_BBD (0x1 << 6)
101 #define NDSR_CS1_BBD (0x1 << 5)
102 #define NDSR_UNCORERR (0x1 << 4)
103 #define NDSR_CORERR (0x1 << 3)
104 #define NDSR_WRDREQ (0x1 << 2)
105 #define NDSR_RDDREQ (0x1 << 1)
106 #define NDSR_WRCMDREQ (0x1)
108 #define NDCB0_LEN_OVRD (0x1 << 28)
109 #define NDCB0_ST_ROW_EN (0x1 << 26)
110 #define NDCB0_AUTO_RS (0x1 << 25)
111 #define NDCB0_CSEL (0x1 << 24)
112 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
113 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
114 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
115 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
116 #define NDCB0_NC (0x1 << 20)
117 #define NDCB0_DBC (0x1 << 19)
118 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
119 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
120 #define NDCB0_CMD2_MASK (0xff << 8)
121 #define NDCB0_CMD1_MASK (0xff)
122 #define NDCB0_ADDR_CYC_SHIFT (16)
124 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
125 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
126 #define EXT_CMD_TYPE_READ 4 /* Read */
127 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
128 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
129 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
130 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
132 /* macros for registers read/write */
133 #define nand_writel(info, off, val) \
134 writel_relaxed((val), (info)->mmio_base + (off))
136 #define nand_readl(info, off) \
137 readl_relaxed((info)->mmio_base + (off))
139 /* error code and state */
162 enum pxa3xx_nand_variant {
163 PXA3XX_NAND_VARIANT_PXA,
164 PXA3XX_NAND_VARIANT_ARMADA370,
167 struct pxa3xx_nand_host {
168 struct nand_chip chip;
169 struct mtd_info *mtd;
172 /* page size of attached chip */
176 /* calculated from pxa3xx_nand_flash data */
177 unsigned int col_addr_cycles;
178 unsigned int row_addr_cycles;
179 size_t read_id_bytes;
183 struct pxa3xx_nand_info {
184 struct nand_hw_control controller;
185 struct platform_device *pdev;
188 void __iomem *mmio_base;
189 unsigned long mmio_phys;
190 struct completion cmd_complete, dev_ready;
192 unsigned int buf_start;
193 unsigned int buf_count;
194 unsigned int buf_size;
195 unsigned int data_buff_pos;
196 unsigned int oob_buff_pos;
198 /* DMA information */
202 unsigned char *data_buff;
203 unsigned char *oob_buff;
204 dma_addr_t data_buff_phys;
206 struct pxa_dma_desc *data_desc;
207 dma_addr_t data_desc_addr;
209 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
213 * This driver supports NFCv1 (as found in PXA SoC)
214 * and NFCv2 (as found in Armada 370/XP SoC).
216 enum pxa3xx_nand_variant variant;
219 int use_ecc; /* use HW ECC ? */
220 int ecc_bch; /* using BCH ECC? */
221 int use_dma; /* use DMA ? */
222 int use_spare; /* use spare ? */
225 unsigned int data_size; /* data to be read from FIFO */
226 unsigned int chunk_size; /* split commands chunk size */
227 unsigned int oob_size;
228 unsigned int spare_size;
229 unsigned int ecc_size;
230 unsigned int ecc_err_cnt;
231 unsigned int max_bitflips;
234 /* cached register value */
239 /* generated NDCBx register values */
246 static bool use_dma = 1;
247 module_param(use_dma, bool, 0444);
248 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
250 static struct pxa3xx_nand_timing timing[] = {
251 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
252 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
253 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
254 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
257 static struct pxa3xx_nand_flash builtin_flash_types[] = {
258 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
259 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
260 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
261 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
262 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
263 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
264 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
265 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
266 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
269 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
270 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
272 static struct nand_bbt_descr bbt_main_descr = {
273 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
274 | NAND_BBT_2BIT | NAND_BBT_VERSION,
278 .maxblocks = 8, /* Last 8 blocks in each chip */
279 .pattern = bbt_pattern
282 static struct nand_bbt_descr bbt_mirror_descr = {
283 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
284 | NAND_BBT_2BIT | NAND_BBT_VERSION,
288 .maxblocks = 8, /* Last 8 blocks in each chip */
289 .pattern = bbt_mirror_pattern
292 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
295 32, 33, 34, 35, 36, 37, 38, 39,
296 40, 41, 42, 43, 44, 45, 46, 47,
297 48, 49, 50, 51, 52, 53, 54, 55,
298 56, 57, 58, 59, 60, 61, 62, 63},
299 .oobfree = { {2, 30} }
302 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
305 32, 33, 34, 35, 36, 37, 38, 39,
306 40, 41, 42, 43, 44, 45, 46, 47,
307 48, 49, 50, 51, 52, 53, 54, 55,
308 56, 57, 58, 59, 60, 61, 62, 63,
309 96, 97, 98, 99, 100, 101, 102, 103,
310 104, 105, 106, 107, 108, 109, 110, 111,
311 112, 113, 114, 115, 116, 117, 118, 119,
312 120, 121, 122, 123, 124, 125, 126, 127},
313 /* Bootrom looks in bytes 0 & 5 for bad blocks */
314 .oobfree = { {6, 26}, { 64, 32} }
317 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
320 32, 33, 34, 35, 36, 37, 38, 39,
321 40, 41, 42, 43, 44, 45, 46, 47,
322 48, 49, 50, 51, 52, 53, 54, 55,
323 56, 57, 58, 59, 60, 61, 62, 63},
327 /* Define a default flash type setting serve as flash detecting only */
328 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
330 #define NDTR0_tCH(c) (min((c), 7) << 19)
331 #define NDTR0_tCS(c) (min((c), 7) << 16)
332 #define NDTR0_tWH(c) (min((c), 7) << 11)
333 #define NDTR0_tWP(c) (min((c), 7) << 8)
334 #define NDTR0_tRH(c) (min((c), 7) << 3)
335 #define NDTR0_tRP(c) (min((c), 7) << 0)
337 #define NDTR1_tR(c) (min((c), 65535) << 16)
338 #define NDTR1_tWHR(c) (min((c), 15) << 4)
339 #define NDTR1_tAR(c) (min((c), 15) << 0)
341 /* convert nano-seconds to nand flash controller clock cycles */
342 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
344 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
346 .compatible = "marvell,pxa3xx-nand",
347 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
350 .compatible = "marvell,armada370-nand",
351 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
355 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
357 static enum pxa3xx_nand_variant
358 pxa3xx_nand_get_variant(struct platform_device *pdev)
360 const struct of_device_id *of_id =
361 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
363 return PXA3XX_NAND_VARIANT_PXA;
364 return (enum pxa3xx_nand_variant)of_id->data;
367 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
368 const struct pxa3xx_nand_timing *t)
370 struct pxa3xx_nand_info *info = host->info_data;
371 unsigned long nand_clk = clk_get_rate(info->clk);
372 uint32_t ndtr0, ndtr1;
374 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
375 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
376 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
377 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
378 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
379 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
381 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
382 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
383 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
385 info->ndtr0cs0 = ndtr0;
386 info->ndtr1cs0 = ndtr1;
387 nand_writel(info, NDTR0CS0, ndtr0);
388 nand_writel(info, NDTR1CS0, ndtr1);
392 * Set the data and OOB size, depending on the selected
393 * spare and ECC configuration.
394 * Only applicable to READ0, READOOB and PAGEPROG commands.
396 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
397 struct mtd_info *mtd)
399 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
401 info->data_size = mtd->writesize;
405 info->oob_size = info->spare_size;
407 info->oob_size += info->ecc_size;
411 * NOTE: it is a must to set ND_RUN firstly, then write
412 * command buffer, otherwise, it does not work.
413 * We enable all the interrupt at the same time, and
414 * let pxa3xx_nand_irq to handle all logic.
416 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
420 ndcr = info->reg_ndcr;
425 nand_writel(info, NDECCCTRL, 0x1);
427 ndcr &= ~NDCR_ECC_EN;
429 nand_writel(info, NDECCCTRL, 0x0);
435 ndcr &= ~NDCR_DMA_EN;
438 ndcr |= NDCR_SPARE_EN;
440 ndcr &= ~NDCR_SPARE_EN;
444 /* clear status bits and run */
445 nand_writel(info, NDSR, NDSR_MASK);
446 nand_writel(info, NDCR, 0);
447 nand_writel(info, NDCR, ndcr);
450 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
453 int timeout = NAND_STOP_DELAY;
455 /* wait RUN bit in NDCR become 0 */
456 ndcr = nand_readl(info, NDCR);
457 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
458 ndcr = nand_readl(info, NDCR);
463 ndcr &= ~NDCR_ND_RUN;
464 nand_writel(info, NDCR, ndcr);
466 /* clear status bits */
467 nand_writel(info, NDSR, NDSR_MASK);
470 static void __maybe_unused
471 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
475 ndcr = nand_readl(info, NDCR);
476 nand_writel(info, NDCR, ndcr & ~int_mask);
479 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
483 ndcr = nand_readl(info, NDCR);
484 nand_writel(info, NDCR, ndcr | int_mask);
487 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
494 * According to the datasheet, when reading from NDDB
495 * with BCH enabled, after each 32 bytes reads, we
496 * have to make sure that the NDSR.RDDREQ bit is set.
498 * Drain the FIFO 8 32 bits reads at a time, and skip
499 * the polling on the last read.
502 readsl(info->mmio_base + NDDB, data, 8);
504 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
505 val & NDSR_RDDREQ, 1000, 5000);
507 dev_err(&info->pdev->dev,
508 "Timeout on RDDREQ while draining the FIFO\n");
517 readsl(info->mmio_base + NDDB, data, len);
520 static void handle_data_pio(struct pxa3xx_nand_info *info)
522 unsigned int do_bytes = min(info->data_size, info->chunk_size);
524 switch (info->state) {
525 case STATE_PIO_WRITING:
526 writesl(info->mmio_base + NDDB,
527 info->data_buff + info->data_buff_pos,
528 DIV_ROUND_UP(do_bytes, 4));
530 if (info->oob_size > 0)
531 writesl(info->mmio_base + NDDB,
532 info->oob_buff + info->oob_buff_pos,
533 DIV_ROUND_UP(info->oob_size, 4));
535 case STATE_PIO_READING:
537 info->data_buff + info->data_buff_pos,
538 DIV_ROUND_UP(do_bytes, 4));
540 if (info->oob_size > 0)
542 info->oob_buff + info->oob_buff_pos,
543 DIV_ROUND_UP(info->oob_size, 4));
546 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
551 /* Update buffer pointers for multi-page read/write */
552 info->data_buff_pos += do_bytes;
553 info->oob_buff_pos += info->oob_size;
554 info->data_size -= do_bytes;
558 static void start_data_dma(struct pxa3xx_nand_info *info)
560 struct pxa_dma_desc *desc = info->data_desc;
561 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
563 desc->ddadr = DDADR_STOP;
564 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
566 switch (info->state) {
567 case STATE_DMA_WRITING:
568 desc->dsadr = info->data_buff_phys;
569 desc->dtadr = info->mmio_phys + NDDB;
570 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
572 case STATE_DMA_READING:
573 desc->dtadr = info->data_buff_phys;
574 desc->dsadr = info->mmio_phys + NDDB;
575 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
578 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
583 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
584 DDADR(info->data_dma_ch) = info->data_desc_addr;
585 DCSR(info->data_dma_ch) |= DCSR_RUN;
588 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
590 struct pxa3xx_nand_info *info = data;
593 dcsr = DCSR(channel);
594 DCSR(channel) = dcsr;
596 if (dcsr & DCSR_BUSERR) {
597 info->retcode = ERR_DMABUSERR;
600 info->state = STATE_DMA_DONE;
601 enable_int(info, NDCR_INT_MASK);
602 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
605 static void start_data_dma(struct pxa3xx_nand_info *info)
609 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
611 struct pxa3xx_nand_info *info = data;
613 handle_data_pio(info);
615 info->state = STATE_CMD_DONE;
616 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
621 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
623 struct pxa3xx_nand_info *info = devid;
624 unsigned int status, is_completed = 0, is_ready = 0;
625 unsigned int ready, cmd_done;
626 irqreturn_t ret = IRQ_HANDLED;
629 ready = NDSR_FLASH_RDY;
630 cmd_done = NDSR_CS0_CMDD;
633 cmd_done = NDSR_CS1_CMDD;
636 status = nand_readl(info, NDSR);
638 if (status & NDSR_UNCORERR)
639 info->retcode = ERR_UNCORERR;
640 if (status & NDSR_CORERR) {
641 info->retcode = ERR_CORERR;
642 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
644 info->ecc_err_cnt = NDSR_ERR_CNT(status);
646 info->ecc_err_cnt = 1;
649 * Each chunk composing a page is corrected independently,
650 * and we need to store maximum number of corrected bitflips
651 * to return it to the MTD layer in ecc.read_page().
653 info->max_bitflips = max_t(unsigned int,
657 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
658 /* whether use dma to transfer data */
660 disable_int(info, NDCR_INT_MASK);
661 info->state = (status & NDSR_RDDREQ) ?
662 STATE_DMA_READING : STATE_DMA_WRITING;
663 start_data_dma(info);
664 goto NORMAL_IRQ_EXIT;
666 info->state = (status & NDSR_RDDREQ) ?
667 STATE_PIO_READING : STATE_PIO_WRITING;
668 ret = IRQ_WAKE_THREAD;
669 goto NORMAL_IRQ_EXIT;
672 if (status & cmd_done) {
673 info->state = STATE_CMD_DONE;
676 if (status & ready) {
677 info->state = STATE_READY;
682 * Clear all status bit before issuing the next command, which
683 * can and will alter the status bits and will deserve a new
684 * interrupt on its own. This lets the controller exit the IRQ
686 nand_writel(info, NDSR, status);
688 if (status & NDSR_WRCMDREQ) {
689 status &= ~NDSR_WRCMDREQ;
690 info->state = STATE_CMD_HANDLE;
693 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
694 * must be loaded by writing directly either 12 or 16
695 * bytes directly to NDCB0, four bytes at a time.
697 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
698 * but each NDCBx register can be read.
700 nand_writel(info, NDCB0, info->ndcb0);
701 nand_writel(info, NDCB0, info->ndcb1);
702 nand_writel(info, NDCB0, info->ndcb2);
704 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
705 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
706 nand_writel(info, NDCB0, info->ndcb3);
710 complete(&info->cmd_complete);
712 complete(&info->dev_ready);
717 static inline int is_buf_blank(uint8_t *buf, size_t len)
719 for (; len > 0; len--)
725 static void set_command_address(struct pxa3xx_nand_info *info,
726 unsigned int page_size, uint16_t column, int page_addr)
728 /* small page addr setting */
729 if (page_size < PAGE_CHUNK_SIZE) {
730 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
735 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
738 if (page_addr & 0xFF0000)
739 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
745 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
747 struct pxa3xx_nand_host *host = info->host[info->cs];
748 struct mtd_info *mtd = host->mtd;
750 /* reset data and oob column point to handle data */
754 info->data_buff_pos = 0;
755 info->oob_buff_pos = 0;
758 info->retcode = ERR_NONE;
759 info->ecc_err_cnt = 0;
765 case NAND_CMD_PAGEPROG:
767 case NAND_CMD_READOOB:
768 pxa3xx_set_datasize(info, mtd);
780 * If we are about to issue a read command, or about to set
781 * the write address, then clean the data buffer.
783 if (command == NAND_CMD_READ0 ||
784 command == NAND_CMD_READOOB ||
785 command == NAND_CMD_SEQIN) {
787 info->buf_count = mtd->writesize + mtd->oobsize;
788 memset(info->data_buff, 0xFF, info->buf_count);
793 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
794 int ext_cmd_type, uint16_t column, int page_addr)
796 int addr_cycle, exec_cmd;
797 struct pxa3xx_nand_host *host;
798 struct mtd_info *mtd;
800 host = info->host[info->cs];
806 info->ndcb0 = NDCB0_CSEL;
810 if (command == NAND_CMD_SEQIN)
813 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
814 + host->col_addr_cycles);
817 case NAND_CMD_READOOB:
819 info->buf_start = column;
820 info->ndcb0 |= NDCB0_CMD_TYPE(0)
824 if (command == NAND_CMD_READOOB)
825 info->buf_start += mtd->writesize;
828 * Multiple page read needs an 'extended command type' field,
829 * which is either naked-read or last-read according to the
832 if (mtd->writesize == PAGE_CHUNK_SIZE) {
833 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
834 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
835 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
837 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
838 info->ndcb3 = info->chunk_size +
842 set_command_address(info, mtd->writesize, column, page_addr);
847 info->buf_start = column;
848 set_command_address(info, mtd->writesize, 0, page_addr);
851 * Multiple page programming needs to execute the initial
852 * SEQIN command that sets the page address.
854 if (mtd->writesize > PAGE_CHUNK_SIZE) {
855 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
856 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
859 /* No data transfer in this case */
865 case NAND_CMD_PAGEPROG:
866 if (is_buf_blank(info->data_buff,
867 (mtd->writesize + mtd->oobsize))) {
872 /* Second command setting for large pages */
873 if (mtd->writesize > PAGE_CHUNK_SIZE) {
875 * Multiple page write uses the 'extended command'
876 * field. This can be used to issue a command dispatch
877 * or a naked-write depending on the current stage.
879 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
881 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
882 info->ndcb3 = info->chunk_size +
886 * This is the command dispatch that completes a chunked
887 * page program operation.
889 if (info->data_size == 0) {
890 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
891 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
898 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
902 | (NAND_CMD_PAGEPROG << 8)
909 info->buf_count = INIT_BUFFER_SIZE;
910 info->ndcb0 |= NDCB0_CMD_TYPE(0)
914 info->ndcb1 = (column & 0xFF);
915 info->ndcb3 = INIT_BUFFER_SIZE;
916 info->data_size = INIT_BUFFER_SIZE;
919 case NAND_CMD_READID:
920 info->buf_count = host->read_id_bytes;
921 info->ndcb0 |= NDCB0_CMD_TYPE(3)
924 info->ndcb1 = (column & 0xFF);
928 case NAND_CMD_STATUS:
930 info->ndcb0 |= NDCB0_CMD_TYPE(4)
937 case NAND_CMD_ERASE1:
938 info->ndcb0 |= NDCB0_CMD_TYPE(2)
942 | (NAND_CMD_ERASE2 << 8)
944 info->ndcb1 = page_addr;
949 info->ndcb0 |= NDCB0_CMD_TYPE(5)
954 case NAND_CMD_ERASE2:
960 dev_err(&info->pdev->dev, "non-supported command %x\n",
968 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
969 int column, int page_addr)
971 struct pxa3xx_nand_host *host = mtd->priv;
972 struct pxa3xx_nand_info *info = host->info_data;
976 * if this is a x16 device ,then convert the input
977 * "byte" address into a "word" address appropriate
978 * for indexing a word-oriented device
980 if (info->reg_ndcr & NDCR_DWIDTH_M)
984 * There may be different NAND chip hooked to
985 * different chip select, so check whether
986 * chip select has been changed, if yes, reset the timing
988 if (info->cs != host->cs) {
990 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
991 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
994 prepare_start_command(info, command);
996 info->state = STATE_PREPARED;
997 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1000 init_completion(&info->cmd_complete);
1001 init_completion(&info->dev_ready);
1002 info->need_wait = 1;
1003 pxa3xx_nand_start(info);
1005 if (!wait_for_completion_timeout(&info->cmd_complete,
1006 CHIP_DELAY_TIMEOUT)) {
1007 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1008 /* Stop State Machine for next command cycle */
1009 pxa3xx_nand_stop(info);
1012 info->state = STATE_IDLE;
1015 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1016 const unsigned command,
1017 int column, int page_addr)
1019 struct pxa3xx_nand_host *host = mtd->priv;
1020 struct pxa3xx_nand_info *info = host->info_data;
1021 int exec_cmd, ext_cmd_type;
1024 * if this is a x16 device then convert the input
1025 * "byte" address into a "word" address appropriate
1026 * for indexing a word-oriented device
1028 if (info->reg_ndcr & NDCR_DWIDTH_M)
1032 * There may be different NAND chip hooked to
1033 * different chip select, so check whether
1034 * chip select has been changed, if yes, reset the timing
1036 if (info->cs != host->cs) {
1037 info->cs = host->cs;
1038 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1039 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1042 /* Select the extended command for the first command */
1044 case NAND_CMD_READ0:
1045 case NAND_CMD_READOOB:
1046 ext_cmd_type = EXT_CMD_TYPE_MONO;
1048 case NAND_CMD_SEQIN:
1049 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1051 case NAND_CMD_PAGEPROG:
1052 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1059 prepare_start_command(info, command);
1062 * Prepare the "is ready" completion before starting a command
1063 * transaction sequence. If the command is not executed the
1064 * completion will be completed, see below.
1066 * We can do that inside the loop because the command variable
1067 * is invariant and thus so is the exec_cmd.
1069 info->need_wait = 1;
1070 init_completion(&info->dev_ready);
1072 info->state = STATE_PREPARED;
1073 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1076 info->need_wait = 0;
1077 complete(&info->dev_ready);
1081 init_completion(&info->cmd_complete);
1082 pxa3xx_nand_start(info);
1084 if (!wait_for_completion_timeout(&info->cmd_complete,
1085 CHIP_DELAY_TIMEOUT)) {
1086 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1087 /* Stop State Machine for next command cycle */
1088 pxa3xx_nand_stop(info);
1092 /* Check if the sequence is complete */
1093 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1097 * After a splitted program command sequence has issued
1098 * the command dispatch, the command sequence is complete.
1100 if (info->data_size == 0 &&
1101 command == NAND_CMD_PAGEPROG &&
1102 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1105 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1106 /* Last read: issue a 'last naked read' */
1107 if (info->data_size == info->chunk_size)
1108 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1110 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1113 * If a splitted program command has no more data to transfer,
1114 * the command dispatch must be issued to complete.
1116 } else if (command == NAND_CMD_PAGEPROG &&
1117 info->data_size == 0) {
1118 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1122 info->state = STATE_IDLE;
1125 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1126 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1128 chip->write_buf(mtd, buf, mtd->writesize);
1129 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1134 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1135 struct nand_chip *chip, uint8_t *buf, int oob_required,
1138 struct pxa3xx_nand_host *host = mtd->priv;
1139 struct pxa3xx_nand_info *info = host->info_data;
1141 chip->read_buf(mtd, buf, mtd->writesize);
1142 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1144 if (info->retcode == ERR_CORERR && info->use_ecc) {
1145 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1147 } else if (info->retcode == ERR_UNCORERR) {
1149 * for blank page (all 0xff), HW will calculate its ECC as
1150 * 0, which is different from the ECC information within
1151 * OOB, ignore such uncorrectable errors
1153 if (is_buf_blank(buf, mtd->writesize))
1154 info->retcode = ERR_NONE;
1156 mtd->ecc_stats.failed++;
1159 return info->max_bitflips;
1162 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1164 struct pxa3xx_nand_host *host = mtd->priv;
1165 struct pxa3xx_nand_info *info = host->info_data;
1168 if (info->buf_start < info->buf_count)
1169 /* Has just send a new command? */
1170 retval = info->data_buff[info->buf_start++];
1175 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1177 struct pxa3xx_nand_host *host = mtd->priv;
1178 struct pxa3xx_nand_info *info = host->info_data;
1179 u16 retval = 0xFFFF;
1181 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1182 retval = *((u16 *)(info->data_buff+info->buf_start));
1183 info->buf_start += 2;
1188 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1190 struct pxa3xx_nand_host *host = mtd->priv;
1191 struct pxa3xx_nand_info *info = host->info_data;
1192 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1194 memcpy(buf, info->data_buff + info->buf_start, real_len);
1195 info->buf_start += real_len;
1198 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1199 const uint8_t *buf, int len)
1201 struct pxa3xx_nand_host *host = mtd->priv;
1202 struct pxa3xx_nand_info *info = host->info_data;
1203 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1205 memcpy(info->data_buff + info->buf_start, buf, real_len);
1206 info->buf_start += real_len;
1209 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1214 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1216 struct pxa3xx_nand_host *host = mtd->priv;
1217 struct pxa3xx_nand_info *info = host->info_data;
1219 if (info->need_wait) {
1220 info->need_wait = 0;
1221 if (!wait_for_completion_timeout(&info->dev_ready,
1222 CHIP_DELAY_TIMEOUT)) {
1223 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1224 return NAND_STATUS_FAIL;
1228 /* pxa3xx_nand_send_command has waited for command complete */
1229 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1230 if (info->retcode == ERR_NONE)
1233 return NAND_STATUS_FAIL;
1236 return NAND_STATUS_READY;
1239 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1240 const struct pxa3xx_nand_flash *f)
1242 struct platform_device *pdev = info->pdev;
1243 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1244 struct pxa3xx_nand_host *host = info->host[info->cs];
1245 uint32_t ndcr = 0x0; /* enable all interrupts */
1247 if (f->page_size != 2048 && f->page_size != 512) {
1248 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1252 if (f->flash_width != 16 && f->flash_width != 8) {
1253 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1257 /* calculate flash information */
1258 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1260 /* calculate addressing information */
1261 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1263 if (f->num_blocks * f->page_per_block > 65536)
1264 host->row_addr_cycles = 3;
1266 host->row_addr_cycles = 2;
1268 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1269 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1270 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1271 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1272 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1273 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1275 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1276 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1278 info->reg_ndcr = ndcr;
1280 pxa3xx_nand_set_timing(host, f->timing);
1284 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1287 * We set 0 by hard coding here, for we don't support keep_config
1288 * when there is more than one chip attached to the controller
1290 struct pxa3xx_nand_host *host = info->host[0];
1291 uint32_t ndcr = nand_readl(info, NDCR);
1293 if (ndcr & NDCR_PAGE_SZ) {
1294 /* Controller's FIFO size */
1295 info->chunk_size = 2048;
1296 host->read_id_bytes = 4;
1298 info->chunk_size = 512;
1299 host->read_id_bytes = 2;
1302 /* Set an initial chunk size */
1303 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1304 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1305 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1310 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1312 struct platform_device *pdev = info->pdev;
1313 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1316 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1317 if (info->data_buff == NULL)
1322 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1323 &info->data_buff_phys, GFP_KERNEL);
1324 if (info->data_buff == NULL) {
1325 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1329 info->data_desc = (void *)info->data_buff + data_desc_offset;
1330 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1332 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1333 pxa3xx_nand_data_dma_irq, info);
1334 if (info->data_dma_ch < 0) {
1335 dev_err(&pdev->dev, "failed to request data dma\n");
1336 dma_free_coherent(&pdev->dev, info->buf_size,
1337 info->data_buff, info->data_buff_phys);
1338 return info->data_dma_ch;
1342 * Now that DMA buffers are allocated we turn on
1343 * DMA proper for I/O operations.
1349 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1351 struct platform_device *pdev = info->pdev;
1352 if (info->use_dma) {
1353 pxa_free_dma(info->data_dma_ch);
1354 dma_free_coherent(&pdev->dev, info->buf_size,
1355 info->data_buff, info->data_buff_phys);
1357 kfree(info->data_buff);
1361 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1363 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1364 if (info->data_buff == NULL)
1369 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1371 kfree(info->data_buff);
1375 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1377 struct mtd_info *mtd;
1378 struct nand_chip *chip;
1381 mtd = info->host[info->cs]->mtd;
1384 /* use the common timing to make a try */
1385 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1389 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1390 ret = chip->waitfunc(mtd, chip);
1391 if (ret & NAND_STATUS_FAIL)
1397 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1398 struct nand_ecc_ctrl *ecc,
1399 int strength, int ecc_stepsize, int page_size)
1401 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1402 info->chunk_size = 2048;
1403 info->spare_size = 40;
1404 info->ecc_size = 24;
1405 ecc->mode = NAND_ECC_HW;
1409 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1410 info->chunk_size = 512;
1411 info->spare_size = 8;
1413 ecc->mode = NAND_ECC_HW;
1418 * Required ECC: 4-bit correction per 512 bytes
1419 * Select: 16-bit correction per 2048 bytes
1421 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1423 info->chunk_size = 2048;
1424 info->spare_size = 32;
1425 info->ecc_size = 32;
1426 ecc->mode = NAND_ECC_HW;
1427 ecc->size = info->chunk_size;
1428 ecc->layout = &ecc_layout_2KB_bch4bit;
1431 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1433 info->chunk_size = 2048;
1434 info->spare_size = 32;
1435 info->ecc_size = 32;
1436 ecc->mode = NAND_ECC_HW;
1437 ecc->size = info->chunk_size;
1438 ecc->layout = &ecc_layout_4KB_bch4bit;
1442 * Required ECC: 8-bit correction per 512 bytes
1443 * Select: 16-bit correction per 1024 bytes
1445 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1447 info->chunk_size = 1024;
1448 info->spare_size = 0;
1449 info->ecc_size = 32;
1450 ecc->mode = NAND_ECC_HW;
1451 ecc->size = info->chunk_size;
1452 ecc->layout = &ecc_layout_4KB_bch8bit;
1455 dev_err(&info->pdev->dev,
1456 "ECC strength %d at page size %d is not supported\n",
1457 strength, page_size);
1461 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1462 ecc->strength, ecc->size);
1466 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1468 struct pxa3xx_nand_host *host = mtd->priv;
1469 struct pxa3xx_nand_info *info = host->info_data;
1470 struct platform_device *pdev = info->pdev;
1471 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1472 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1473 const struct pxa3xx_nand_flash *f = NULL;
1474 struct nand_chip *chip = mtd->priv;
1478 uint16_t ecc_strength, ecc_step;
1480 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1483 /* Set a default chunk size */
1484 info->chunk_size = 512;
1486 ret = pxa3xx_nand_sensing(info);
1488 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1494 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1495 id = *((uint16_t *)(info->data_buff));
1497 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1499 dev_warn(&info->pdev->dev,
1500 "Read out ID 0, potential timing set wrong!!\n");
1505 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1506 for (i = 0; i < num; i++) {
1507 if (i < pdata->num_flash)
1508 f = pdata->flash + i;
1510 f = &builtin_flash_types[i - pdata->num_flash + 1];
1512 /* find the chip in default list */
1513 if (f->chip_id == id)
1517 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1518 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1523 ret = pxa3xx_nand_config_flash(info, f);
1525 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1529 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1531 pxa3xx_flash_ids[0].name = f->name;
1532 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1533 pxa3xx_flash_ids[0].pagesize = f->page_size;
1534 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1535 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1536 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1537 if (f->flash_width == 16)
1538 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1539 pxa3xx_flash_ids[1].name = NULL;
1540 def = pxa3xx_flash_ids;
1542 if (info->reg_ndcr & NDCR_DWIDTH_M)
1543 chip->options |= NAND_BUSWIDTH_16;
1545 /* Device detection must be done with ECC disabled */
1546 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1547 nand_writel(info, NDECCCTRL, 0x0);
1549 if (nand_scan_ident(mtd, 1, def))
1552 if (pdata->flash_bbt) {
1554 * We'll use a bad block table stored in-flash and don't
1555 * allow writing the bad block marker to the flash.
1557 chip->bbt_options |= NAND_BBT_USE_FLASH |
1558 NAND_BBT_NO_OOB_BBM;
1559 chip->bbt_td = &bbt_main_descr;
1560 chip->bbt_md = &bbt_mirror_descr;
1564 * If the page size is bigger than the FIFO size, let's check
1565 * we are given the right variant and then switch to the extended
1566 * (aka splitted) command handling,
1568 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1569 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1570 chip->cmdfunc = nand_cmdfunc_extended;
1572 dev_err(&info->pdev->dev,
1573 "unsupported page size on this variant\n");
1578 if (pdata->ecc_strength && pdata->ecc_step_size) {
1579 ecc_strength = pdata->ecc_strength;
1580 ecc_step = pdata->ecc_step_size;
1582 ecc_strength = chip->ecc_strength_ds;
1583 ecc_step = chip->ecc_step_ds;
1586 /* Set default ECC strength requirements on non-ONFI devices */
1587 if (ecc_strength < 1 && ecc_step < 1) {
1592 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1593 ecc_step, mtd->writesize);
1597 /* calculate addressing information */
1598 if (mtd->writesize >= 2048)
1599 host->col_addr_cycles = 2;
1601 host->col_addr_cycles = 1;
1603 /* release the initial buffer */
1604 kfree(info->data_buff);
1606 /* allocate the real data + oob buffer */
1607 info->buf_size = mtd->writesize + mtd->oobsize;
1608 ret = pxa3xx_nand_init_buff(info);
1611 info->oob_buff = info->data_buff + mtd->writesize;
1613 if ((mtd->size >> chip->page_shift) > 65536)
1614 host->row_addr_cycles = 3;
1616 host->row_addr_cycles = 2;
1617 return nand_scan_tail(mtd);
1620 static int alloc_nand_resource(struct platform_device *pdev)
1622 struct pxa3xx_nand_platform_data *pdata;
1623 struct pxa3xx_nand_info *info;
1624 struct pxa3xx_nand_host *host;
1625 struct nand_chip *chip = NULL;
1626 struct mtd_info *mtd;
1630 pdata = dev_get_platdata(&pdev->dev);
1631 if (pdata->num_cs <= 0)
1633 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1634 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1639 info->variant = pxa3xx_nand_get_variant(pdev);
1640 for (cs = 0; cs < pdata->num_cs; cs++) {
1641 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1642 chip = (struct nand_chip *)(&mtd[1]);
1643 host = (struct pxa3xx_nand_host *)chip;
1644 info->host[cs] = host;
1647 host->info_data = info;
1649 mtd->owner = THIS_MODULE;
1651 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1652 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1653 chip->controller = &info->controller;
1654 chip->waitfunc = pxa3xx_nand_waitfunc;
1655 chip->select_chip = pxa3xx_nand_select_chip;
1656 chip->read_word = pxa3xx_nand_read_word;
1657 chip->read_byte = pxa3xx_nand_read_byte;
1658 chip->read_buf = pxa3xx_nand_read_buf;
1659 chip->write_buf = pxa3xx_nand_write_buf;
1660 chip->options |= NAND_NO_SUBPAGE_WRITE;
1661 chip->cmdfunc = nand_cmdfunc;
1664 spin_lock_init(&chip->controller->lock);
1665 init_waitqueue_head(&chip->controller->wq);
1666 info->clk = devm_clk_get(&pdev->dev, NULL);
1667 if (IS_ERR(info->clk)) {
1668 dev_err(&pdev->dev, "failed to get nand clock\n");
1669 return PTR_ERR(info->clk);
1671 ret = clk_prepare_enable(info->clk);
1677 * This is a dirty hack to make this driver work from
1678 * devicetree bindings. It can be removed once we have
1679 * a prober DMA controller framework for DT.
1681 if (pdev->dev.of_node &&
1682 of_machine_is_compatible("marvell,pxa3xx")) {
1683 info->drcmr_dat = 97;
1684 info->drcmr_cmd = 99;
1686 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1689 "no resource defined for data DMA\n");
1691 goto fail_disable_clk;
1693 info->drcmr_dat = r->start;
1695 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1698 "no resource defined for cmd DMA\n");
1700 goto fail_disable_clk;
1702 info->drcmr_cmd = r->start;
1706 irq = platform_get_irq(pdev, 0);
1708 dev_err(&pdev->dev, "no IRQ resource defined\n");
1710 goto fail_disable_clk;
1713 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1714 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1715 if (IS_ERR(info->mmio_base)) {
1716 ret = PTR_ERR(info->mmio_base);
1717 goto fail_disable_clk;
1719 info->mmio_phys = r->start;
1721 /* Allocate a buffer to allow flash detection */
1722 info->buf_size = INIT_BUFFER_SIZE;
1723 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1724 if (info->data_buff == NULL) {
1726 goto fail_disable_clk;
1729 /* initialize all interrupts to be disabled */
1730 disable_int(info, NDSR_MASK);
1732 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1733 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1736 dev_err(&pdev->dev, "failed to request IRQ\n");
1740 platform_set_drvdata(pdev, info);
1745 free_irq(irq, info);
1746 kfree(info->data_buff);
1748 clk_disable_unprepare(info->clk);
1752 static int pxa3xx_nand_remove(struct platform_device *pdev)
1754 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1755 struct pxa3xx_nand_platform_data *pdata;
1761 pdata = dev_get_platdata(&pdev->dev);
1763 irq = platform_get_irq(pdev, 0);
1765 free_irq(irq, info);
1766 pxa3xx_nand_free_buff(info);
1768 clk_disable_unprepare(info->clk);
1770 for (cs = 0; cs < pdata->num_cs; cs++)
1771 nand_release(info->host[cs]->mtd);
1775 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1777 struct pxa3xx_nand_platform_data *pdata;
1778 struct device_node *np = pdev->dev.of_node;
1779 const struct of_device_id *of_id =
1780 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1785 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1789 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1790 pdata->enable_arbiter = 1;
1791 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1792 pdata->keep_config = 1;
1793 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1794 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1796 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1797 if (pdata->ecc_strength < 0)
1798 pdata->ecc_strength = 0;
1800 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1801 if (pdata->ecc_step_size < 0)
1802 pdata->ecc_step_size = 0;
1804 pdev->dev.platform_data = pdata;
1809 static int pxa3xx_nand_probe(struct platform_device *pdev)
1811 struct pxa3xx_nand_platform_data *pdata;
1812 struct mtd_part_parser_data ppdata = {};
1813 struct pxa3xx_nand_info *info;
1814 int ret, cs, probe_success;
1816 #ifndef ARCH_HAS_DMA
1819 dev_warn(&pdev->dev,
1820 "This platform can't do DMA on this device\n");
1823 ret = pxa3xx_nand_probe_dt(pdev);
1827 pdata = dev_get_platdata(&pdev->dev);
1829 dev_err(&pdev->dev, "no platform data defined\n");
1833 ret = alloc_nand_resource(pdev);
1835 dev_err(&pdev->dev, "alloc nand resource failed\n");
1839 info = platform_get_drvdata(pdev);
1841 for (cs = 0; cs < pdata->num_cs; cs++) {
1842 struct mtd_info *mtd = info->host[cs]->mtd;
1845 * The mtd name matches the one used in 'mtdparts' kernel
1846 * parameter. This name cannot be changed or otherwise
1847 * user's mtd partitions configuration would get broken.
1849 mtd->name = "pxa3xx_nand-0";
1851 ret = pxa3xx_nand_scan(mtd);
1853 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1858 ppdata.of_node = pdev->dev.of_node;
1859 ret = mtd_device_parse_register(mtd, NULL,
1860 &ppdata, pdata->parts[cs],
1861 pdata->nr_parts[cs]);
1866 if (!probe_success) {
1867 pxa3xx_nand_remove(pdev);
1875 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1877 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1878 struct pxa3xx_nand_platform_data *pdata;
1879 struct mtd_info *mtd;
1882 pdata = dev_get_platdata(&pdev->dev);
1884 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1888 for (cs = 0; cs < pdata->num_cs; cs++) {
1889 mtd = info->host[cs]->mtd;
1896 static int pxa3xx_nand_resume(struct platform_device *pdev)
1898 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1899 struct pxa3xx_nand_platform_data *pdata;
1900 struct mtd_info *mtd;
1903 pdata = dev_get_platdata(&pdev->dev);
1904 /* We don't want to handle interrupt without calling mtd routine */
1905 disable_int(info, NDCR_INT_MASK);
1908 * Directly set the chip select to a invalid value,
1909 * then the driver would reset the timing according
1910 * to current chip select at the beginning of cmdfunc
1915 * As the spec says, the NDSR would be updated to 0x1800 when
1916 * doing the nand_clk disable/enable.
1917 * To prevent it damaging state machine of the driver, clear
1918 * all status before resume
1920 nand_writel(info, NDSR, NDSR_MASK);
1921 for (cs = 0; cs < pdata->num_cs; cs++) {
1922 mtd = info->host[cs]->mtd;
1929 #define pxa3xx_nand_suspend NULL
1930 #define pxa3xx_nand_resume NULL
1933 static struct platform_driver pxa3xx_nand_driver = {
1935 .name = "pxa3xx-nand",
1936 .of_match_table = pxa3xx_nand_dt_ids,
1938 .probe = pxa3xx_nand_probe,
1939 .remove = pxa3xx_nand_remove,
1940 .suspend = pxa3xx_nand_suspend,
1941 .resume = pxa3xx_nand_resume,
1944 module_platform_driver(pxa3xx_nand_driver);
1946 MODULE_LICENSE("GPL");
1947 MODULE_DESCRIPTION("PXA3xx NAND controller driver");