2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
42 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE (2048)
47 * Define a buffer size for the initial command that detects the flash device:
48 * STATUS, READID and PARAM.
49 * ONFI param page is 256 bytes, and there are three redundant copies
50 * to be read. JEDEC param page is 512 bytes, and there are also three
51 * redundant copies to be read.
52 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
54 #define INIT_BUFFER_SIZE 2048
56 /* registers and bit definitions */
57 #define NDCR (0x00) /* Control register */
58 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
59 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
60 #define NDSR (0x14) /* Status Register */
61 #define NDPCR (0x18) /* Page Count Register */
62 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
63 #define NDBDR1 (0x20) /* Bad Block Register 1 */
64 #define NDECCCTRL (0x28) /* ECC control */
65 #define NDDB (0x40) /* Data Buffer */
66 #define NDCB0 (0x48) /* Command Buffer0 */
67 #define NDCB1 (0x4C) /* Command Buffer1 */
68 #define NDCB2 (0x50) /* Command Buffer2 */
70 #define NDCR_SPARE_EN (0x1 << 31)
71 #define NDCR_ECC_EN (0x1 << 30)
72 #define NDCR_DMA_EN (0x1 << 29)
73 #define NDCR_ND_RUN (0x1 << 28)
74 #define NDCR_DWIDTH_C (0x1 << 27)
75 #define NDCR_DWIDTH_M (0x1 << 26)
76 #define NDCR_PAGE_SZ (0x1 << 24)
77 #define NDCR_NCSX (0x1 << 23)
78 #define NDCR_ND_MODE (0x3 << 21)
79 #define NDCR_NAND_MODE (0x0)
80 #define NDCR_CLR_PG_CNT (0x1 << 20)
81 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
82 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
83 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
85 #define NDCR_RA_START (0x1 << 15)
86 #define NDCR_PG_PER_BLK (0x1 << 14)
87 #define NDCR_ND_ARB_EN (0x1 << 12)
88 #define NDCR_INT_MASK (0xFFF)
90 #define NDSR_MASK (0xfff)
91 #define NDSR_ERR_CNT_OFF (16)
92 #define NDSR_ERR_CNT_MASK (0x1f)
93 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
94 #define NDSR_RDY (0x1 << 12)
95 #define NDSR_FLASH_RDY (0x1 << 11)
96 #define NDSR_CS0_PAGED (0x1 << 10)
97 #define NDSR_CS1_PAGED (0x1 << 9)
98 #define NDSR_CS0_CMDD (0x1 << 8)
99 #define NDSR_CS1_CMDD (0x1 << 7)
100 #define NDSR_CS0_BBD (0x1 << 6)
101 #define NDSR_CS1_BBD (0x1 << 5)
102 #define NDSR_UNCORERR (0x1 << 4)
103 #define NDSR_CORERR (0x1 << 3)
104 #define NDSR_WRDREQ (0x1 << 2)
105 #define NDSR_RDDREQ (0x1 << 1)
106 #define NDSR_WRCMDREQ (0x1)
108 #define NDCB0_LEN_OVRD (0x1 << 28)
109 #define NDCB0_ST_ROW_EN (0x1 << 26)
110 #define NDCB0_AUTO_RS (0x1 << 25)
111 #define NDCB0_CSEL (0x1 << 24)
112 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
113 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
114 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
115 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
116 #define NDCB0_NC (0x1 << 20)
117 #define NDCB0_DBC (0x1 << 19)
118 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
119 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
120 #define NDCB0_CMD2_MASK (0xff << 8)
121 #define NDCB0_CMD1_MASK (0xff)
122 #define NDCB0_ADDR_CYC_SHIFT (16)
124 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
125 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
126 #define EXT_CMD_TYPE_READ 4 /* Read */
127 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
128 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
129 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
130 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
133 * This should be large enough to read 'ONFI' and 'JEDEC'.
134 * Let's use 7 bytes, which is the maximum ID count supported
135 * by the controller (see NDCR_RD_ID_CNT_MASK).
137 #define READ_ID_BYTES 7
139 /* macros for registers read/write */
140 #define nand_writel(info, off, val) \
141 writel_relaxed((val), (info)->mmio_base + (off))
143 #define nand_readl(info, off) \
144 readl_relaxed((info)->mmio_base + (off))
146 /* error code and state */
169 enum pxa3xx_nand_variant {
170 PXA3XX_NAND_VARIANT_PXA,
171 PXA3XX_NAND_VARIANT_ARMADA370,
174 struct pxa3xx_nand_host {
175 struct nand_chip chip;
176 struct mtd_info *mtd;
179 /* page size of attached chip */
183 /* calculated from pxa3xx_nand_flash data */
184 unsigned int col_addr_cycles;
185 unsigned int row_addr_cycles;
188 struct pxa3xx_nand_info {
189 struct nand_hw_control controller;
190 struct platform_device *pdev;
193 void __iomem *mmio_base;
194 unsigned long mmio_phys;
195 struct completion cmd_complete, dev_ready;
197 unsigned int buf_start;
198 unsigned int buf_count;
199 unsigned int buf_size;
200 unsigned int data_buff_pos;
201 unsigned int oob_buff_pos;
203 /* DMA information */
207 unsigned char *data_buff;
208 unsigned char *oob_buff;
209 dma_addr_t data_buff_phys;
211 struct pxa_dma_desc *data_desc;
212 dma_addr_t data_desc_addr;
214 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
218 * This driver supports NFCv1 (as found in PXA SoC)
219 * and NFCv2 (as found in Armada 370/XP SoC).
221 enum pxa3xx_nand_variant variant;
224 int use_ecc; /* use HW ECC ? */
225 int ecc_bch; /* using BCH ECC? */
226 int use_dma; /* use DMA ? */
227 int use_spare; /* use spare ? */
230 unsigned int data_size; /* data to be read from FIFO */
231 unsigned int chunk_size; /* split commands chunk size */
232 unsigned int oob_size;
233 unsigned int spare_size;
234 unsigned int ecc_size;
235 unsigned int ecc_err_cnt;
236 unsigned int max_bitflips;
239 /* cached register value */
244 /* generated NDCBx register values */
251 static bool use_dma = 1;
252 module_param(use_dma, bool, 0444);
253 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
255 static struct pxa3xx_nand_timing timing[] = {
256 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
257 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
258 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
259 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
262 static struct pxa3xx_nand_flash builtin_flash_types[] = {
263 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
264 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
265 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
266 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
267 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
268 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
269 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
270 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
271 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
274 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
275 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
277 static struct nand_bbt_descr bbt_main_descr = {
278 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
279 | NAND_BBT_2BIT | NAND_BBT_VERSION,
283 .maxblocks = 8, /* Last 8 blocks in each chip */
284 .pattern = bbt_pattern
287 static struct nand_bbt_descr bbt_mirror_descr = {
288 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
289 | NAND_BBT_2BIT | NAND_BBT_VERSION,
293 .maxblocks = 8, /* Last 8 blocks in each chip */
294 .pattern = bbt_mirror_pattern
297 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
300 32, 33, 34, 35, 36, 37, 38, 39,
301 40, 41, 42, 43, 44, 45, 46, 47,
302 48, 49, 50, 51, 52, 53, 54, 55,
303 56, 57, 58, 59, 60, 61, 62, 63},
304 .oobfree = { {2, 30} }
307 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
310 32, 33, 34, 35, 36, 37, 38, 39,
311 40, 41, 42, 43, 44, 45, 46, 47,
312 48, 49, 50, 51, 52, 53, 54, 55,
313 56, 57, 58, 59, 60, 61, 62, 63,
314 96, 97, 98, 99, 100, 101, 102, 103,
315 104, 105, 106, 107, 108, 109, 110, 111,
316 112, 113, 114, 115, 116, 117, 118, 119,
317 120, 121, 122, 123, 124, 125, 126, 127},
318 /* Bootrom looks in bytes 0 & 5 for bad blocks */
319 .oobfree = { {6, 26}, { 64, 32} }
322 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
332 /* Define a default flash type setting serve as flash detecting only */
333 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
335 #define NDTR0_tCH(c) (min((c), 7) << 19)
336 #define NDTR0_tCS(c) (min((c), 7) << 16)
337 #define NDTR0_tWH(c) (min((c), 7) << 11)
338 #define NDTR0_tWP(c) (min((c), 7) << 8)
339 #define NDTR0_tRH(c) (min((c), 7) << 3)
340 #define NDTR0_tRP(c) (min((c), 7) << 0)
342 #define NDTR1_tR(c) (min((c), 65535) << 16)
343 #define NDTR1_tWHR(c) (min((c), 15) << 4)
344 #define NDTR1_tAR(c) (min((c), 15) << 0)
346 /* convert nano-seconds to nand flash controller clock cycles */
347 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
349 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
351 .compatible = "marvell,pxa3xx-nand",
352 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
355 .compatible = "marvell,armada370-nand",
356 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
360 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
362 static enum pxa3xx_nand_variant
363 pxa3xx_nand_get_variant(struct platform_device *pdev)
365 const struct of_device_id *of_id =
366 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
368 return PXA3XX_NAND_VARIANT_PXA;
369 return (enum pxa3xx_nand_variant)of_id->data;
372 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
373 const struct pxa3xx_nand_timing *t)
375 struct pxa3xx_nand_info *info = host->info_data;
376 unsigned long nand_clk = clk_get_rate(info->clk);
377 uint32_t ndtr0, ndtr1;
379 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
380 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
381 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
382 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
383 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
384 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
386 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
387 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
388 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
390 info->ndtr0cs0 = ndtr0;
391 info->ndtr1cs0 = ndtr1;
392 nand_writel(info, NDTR0CS0, ndtr0);
393 nand_writel(info, NDTR1CS0, ndtr1);
397 * Set the data and OOB size, depending on the selected
398 * spare and ECC configuration.
399 * Only applicable to READ0, READOOB and PAGEPROG commands.
401 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
402 struct mtd_info *mtd)
404 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
406 info->data_size = mtd->writesize;
410 info->oob_size = info->spare_size;
412 info->oob_size += info->ecc_size;
416 * NOTE: it is a must to set ND_RUN firstly, then write
417 * command buffer, otherwise, it does not work.
418 * We enable all the interrupt at the same time, and
419 * let pxa3xx_nand_irq to handle all logic.
421 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
425 ndcr = info->reg_ndcr;
430 nand_writel(info, NDECCCTRL, 0x1);
432 ndcr &= ~NDCR_ECC_EN;
434 nand_writel(info, NDECCCTRL, 0x0);
440 ndcr &= ~NDCR_DMA_EN;
443 ndcr |= NDCR_SPARE_EN;
445 ndcr &= ~NDCR_SPARE_EN;
449 /* clear status bits and run */
450 nand_writel(info, NDSR, NDSR_MASK);
451 nand_writel(info, NDCR, 0);
452 nand_writel(info, NDCR, ndcr);
455 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
458 int timeout = NAND_STOP_DELAY;
460 /* wait RUN bit in NDCR become 0 */
461 ndcr = nand_readl(info, NDCR);
462 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
463 ndcr = nand_readl(info, NDCR);
468 ndcr &= ~NDCR_ND_RUN;
469 nand_writel(info, NDCR, ndcr);
471 /* clear status bits */
472 nand_writel(info, NDSR, NDSR_MASK);
475 static void __maybe_unused
476 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
480 ndcr = nand_readl(info, NDCR);
481 nand_writel(info, NDCR, ndcr & ~int_mask);
484 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
488 ndcr = nand_readl(info, NDCR);
489 nand_writel(info, NDCR, ndcr | int_mask);
492 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
499 * According to the datasheet, when reading from NDDB
500 * with BCH enabled, after each 32 bytes reads, we
501 * have to make sure that the NDSR.RDDREQ bit is set.
503 * Drain the FIFO 8 32 bits reads at a time, and skip
504 * the polling on the last read.
507 readsl(info->mmio_base + NDDB, data, 8);
509 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
510 val & NDSR_RDDREQ, 1000, 5000);
512 dev_err(&info->pdev->dev,
513 "Timeout on RDDREQ while draining the FIFO\n");
522 readsl(info->mmio_base + NDDB, data, len);
525 static void handle_data_pio(struct pxa3xx_nand_info *info)
527 unsigned int do_bytes = min(info->data_size, info->chunk_size);
529 switch (info->state) {
530 case STATE_PIO_WRITING:
531 writesl(info->mmio_base + NDDB,
532 info->data_buff + info->data_buff_pos,
533 DIV_ROUND_UP(do_bytes, 4));
535 if (info->oob_size > 0)
536 writesl(info->mmio_base + NDDB,
537 info->oob_buff + info->oob_buff_pos,
538 DIV_ROUND_UP(info->oob_size, 4));
540 case STATE_PIO_READING:
542 info->data_buff + info->data_buff_pos,
543 DIV_ROUND_UP(do_bytes, 4));
545 if (info->oob_size > 0)
547 info->oob_buff + info->oob_buff_pos,
548 DIV_ROUND_UP(info->oob_size, 4));
551 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
556 /* Update buffer pointers for multi-page read/write */
557 info->data_buff_pos += do_bytes;
558 info->oob_buff_pos += info->oob_size;
559 info->data_size -= do_bytes;
563 static void start_data_dma(struct pxa3xx_nand_info *info)
565 struct pxa_dma_desc *desc = info->data_desc;
566 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
568 desc->ddadr = DDADR_STOP;
569 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
571 switch (info->state) {
572 case STATE_DMA_WRITING:
573 desc->dsadr = info->data_buff_phys;
574 desc->dtadr = info->mmio_phys + NDDB;
575 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
577 case STATE_DMA_READING:
578 desc->dtadr = info->data_buff_phys;
579 desc->dsadr = info->mmio_phys + NDDB;
580 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
583 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
588 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
589 DDADR(info->data_dma_ch) = info->data_desc_addr;
590 DCSR(info->data_dma_ch) |= DCSR_RUN;
593 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
595 struct pxa3xx_nand_info *info = data;
598 dcsr = DCSR(channel);
599 DCSR(channel) = dcsr;
601 if (dcsr & DCSR_BUSERR) {
602 info->retcode = ERR_DMABUSERR;
605 info->state = STATE_DMA_DONE;
606 enable_int(info, NDCR_INT_MASK);
607 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
610 static void start_data_dma(struct pxa3xx_nand_info *info)
614 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
616 struct pxa3xx_nand_info *info = data;
618 handle_data_pio(info);
620 info->state = STATE_CMD_DONE;
621 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
626 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
628 struct pxa3xx_nand_info *info = devid;
629 unsigned int status, is_completed = 0, is_ready = 0;
630 unsigned int ready, cmd_done;
631 irqreturn_t ret = IRQ_HANDLED;
634 ready = NDSR_FLASH_RDY;
635 cmd_done = NDSR_CS0_CMDD;
638 cmd_done = NDSR_CS1_CMDD;
641 status = nand_readl(info, NDSR);
643 if (status & NDSR_UNCORERR)
644 info->retcode = ERR_UNCORERR;
645 if (status & NDSR_CORERR) {
646 info->retcode = ERR_CORERR;
647 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
649 info->ecc_err_cnt = NDSR_ERR_CNT(status);
651 info->ecc_err_cnt = 1;
654 * Each chunk composing a page is corrected independently,
655 * and we need to store maximum number of corrected bitflips
656 * to return it to the MTD layer in ecc.read_page().
658 info->max_bitflips = max_t(unsigned int,
662 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
663 /* whether use dma to transfer data */
665 disable_int(info, NDCR_INT_MASK);
666 info->state = (status & NDSR_RDDREQ) ?
667 STATE_DMA_READING : STATE_DMA_WRITING;
668 start_data_dma(info);
669 goto NORMAL_IRQ_EXIT;
671 info->state = (status & NDSR_RDDREQ) ?
672 STATE_PIO_READING : STATE_PIO_WRITING;
673 ret = IRQ_WAKE_THREAD;
674 goto NORMAL_IRQ_EXIT;
677 if (status & cmd_done) {
678 info->state = STATE_CMD_DONE;
681 if (status & ready) {
682 info->state = STATE_READY;
687 * Clear all status bit before issuing the next command, which
688 * can and will alter the status bits and will deserve a new
689 * interrupt on its own. This lets the controller exit the IRQ
691 nand_writel(info, NDSR, status);
693 if (status & NDSR_WRCMDREQ) {
694 status &= ~NDSR_WRCMDREQ;
695 info->state = STATE_CMD_HANDLE;
698 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
699 * must be loaded by writing directly either 12 or 16
700 * bytes directly to NDCB0, four bytes at a time.
702 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
703 * but each NDCBx register can be read.
705 nand_writel(info, NDCB0, info->ndcb0);
706 nand_writel(info, NDCB0, info->ndcb1);
707 nand_writel(info, NDCB0, info->ndcb2);
709 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
710 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
711 nand_writel(info, NDCB0, info->ndcb3);
715 complete(&info->cmd_complete);
717 complete(&info->dev_ready);
722 static inline int is_buf_blank(uint8_t *buf, size_t len)
724 for (; len > 0; len--)
730 static void set_command_address(struct pxa3xx_nand_info *info,
731 unsigned int page_size, uint16_t column, int page_addr)
733 /* small page addr setting */
734 if (page_size < PAGE_CHUNK_SIZE) {
735 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
740 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
743 if (page_addr & 0xFF0000)
744 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
750 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
752 struct pxa3xx_nand_host *host = info->host[info->cs];
753 struct mtd_info *mtd = host->mtd;
755 /* reset data and oob column point to handle data */
759 info->data_buff_pos = 0;
760 info->oob_buff_pos = 0;
763 info->retcode = ERR_NONE;
764 info->ecc_err_cnt = 0;
770 case NAND_CMD_PAGEPROG:
772 case NAND_CMD_READOOB:
773 pxa3xx_set_datasize(info, mtd);
785 * If we are about to issue a read command, or about to set
786 * the write address, then clean the data buffer.
788 if (command == NAND_CMD_READ0 ||
789 command == NAND_CMD_READOOB ||
790 command == NAND_CMD_SEQIN) {
792 info->buf_count = mtd->writesize + mtd->oobsize;
793 memset(info->data_buff, 0xFF, info->buf_count);
798 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
799 int ext_cmd_type, uint16_t column, int page_addr)
801 int addr_cycle, exec_cmd;
802 struct pxa3xx_nand_host *host;
803 struct mtd_info *mtd;
805 host = info->host[info->cs];
811 info->ndcb0 = NDCB0_CSEL;
815 if (command == NAND_CMD_SEQIN)
818 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
819 + host->col_addr_cycles);
822 case NAND_CMD_READOOB:
824 info->buf_start = column;
825 info->ndcb0 |= NDCB0_CMD_TYPE(0)
829 if (command == NAND_CMD_READOOB)
830 info->buf_start += mtd->writesize;
833 * Multiple page read needs an 'extended command type' field,
834 * which is either naked-read or last-read according to the
837 if (mtd->writesize == PAGE_CHUNK_SIZE) {
838 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
839 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
840 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
842 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
843 info->ndcb3 = info->chunk_size +
847 set_command_address(info, mtd->writesize, column, page_addr);
852 info->buf_start = column;
853 set_command_address(info, mtd->writesize, 0, page_addr);
856 * Multiple page programming needs to execute the initial
857 * SEQIN command that sets the page address.
859 if (mtd->writesize > PAGE_CHUNK_SIZE) {
860 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
861 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
864 /* No data transfer in this case */
870 case NAND_CMD_PAGEPROG:
871 if (is_buf_blank(info->data_buff,
872 (mtd->writesize + mtd->oobsize))) {
877 /* Second command setting for large pages */
878 if (mtd->writesize > PAGE_CHUNK_SIZE) {
880 * Multiple page write uses the 'extended command'
881 * field. This can be used to issue a command dispatch
882 * or a naked-write depending on the current stage.
884 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
886 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
887 info->ndcb3 = info->chunk_size +
891 * This is the command dispatch that completes a chunked
892 * page program operation.
894 if (info->data_size == 0) {
895 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
896 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
903 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
907 | (NAND_CMD_PAGEPROG << 8)
914 info->buf_count = INIT_BUFFER_SIZE;
915 info->ndcb0 |= NDCB0_CMD_TYPE(0)
919 info->ndcb1 = (column & 0xFF);
920 info->ndcb3 = INIT_BUFFER_SIZE;
921 info->data_size = INIT_BUFFER_SIZE;
924 case NAND_CMD_READID:
925 info->buf_count = READ_ID_BYTES;
926 info->ndcb0 |= NDCB0_CMD_TYPE(3)
929 info->ndcb1 = (column & 0xFF);
933 case NAND_CMD_STATUS:
935 info->ndcb0 |= NDCB0_CMD_TYPE(4)
942 case NAND_CMD_ERASE1:
943 info->ndcb0 |= NDCB0_CMD_TYPE(2)
947 | (NAND_CMD_ERASE2 << 8)
949 info->ndcb1 = page_addr;
954 info->ndcb0 |= NDCB0_CMD_TYPE(5)
959 case NAND_CMD_ERASE2:
965 dev_err(&info->pdev->dev, "non-supported command %x\n",
973 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
974 int column, int page_addr)
976 struct pxa3xx_nand_host *host = mtd->priv;
977 struct pxa3xx_nand_info *info = host->info_data;
981 * if this is a x16 device ,then convert the input
982 * "byte" address into a "word" address appropriate
983 * for indexing a word-oriented device
985 if (info->reg_ndcr & NDCR_DWIDTH_M)
989 * There may be different NAND chip hooked to
990 * different chip select, so check whether
991 * chip select has been changed, if yes, reset the timing
993 if (info->cs != host->cs) {
995 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
996 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
999 prepare_start_command(info, command);
1001 info->state = STATE_PREPARED;
1002 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1005 init_completion(&info->cmd_complete);
1006 init_completion(&info->dev_ready);
1007 info->need_wait = 1;
1008 pxa3xx_nand_start(info);
1010 if (!wait_for_completion_timeout(&info->cmd_complete,
1011 CHIP_DELAY_TIMEOUT)) {
1012 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1013 /* Stop State Machine for next command cycle */
1014 pxa3xx_nand_stop(info);
1017 info->state = STATE_IDLE;
1020 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1021 const unsigned command,
1022 int column, int page_addr)
1024 struct pxa3xx_nand_host *host = mtd->priv;
1025 struct pxa3xx_nand_info *info = host->info_data;
1026 int exec_cmd, ext_cmd_type;
1029 * if this is a x16 device then convert the input
1030 * "byte" address into a "word" address appropriate
1031 * for indexing a word-oriented device
1033 if (info->reg_ndcr & NDCR_DWIDTH_M)
1037 * There may be different NAND chip hooked to
1038 * different chip select, so check whether
1039 * chip select has been changed, if yes, reset the timing
1041 if (info->cs != host->cs) {
1042 info->cs = host->cs;
1043 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1044 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1047 /* Select the extended command for the first command */
1049 case NAND_CMD_READ0:
1050 case NAND_CMD_READOOB:
1051 ext_cmd_type = EXT_CMD_TYPE_MONO;
1053 case NAND_CMD_SEQIN:
1054 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1056 case NAND_CMD_PAGEPROG:
1057 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1064 prepare_start_command(info, command);
1067 * Prepare the "is ready" completion before starting a command
1068 * transaction sequence. If the command is not executed the
1069 * completion will be completed, see below.
1071 * We can do that inside the loop because the command variable
1072 * is invariant and thus so is the exec_cmd.
1074 info->need_wait = 1;
1075 init_completion(&info->dev_ready);
1077 info->state = STATE_PREPARED;
1078 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1081 info->need_wait = 0;
1082 complete(&info->dev_ready);
1086 init_completion(&info->cmd_complete);
1087 pxa3xx_nand_start(info);
1089 if (!wait_for_completion_timeout(&info->cmd_complete,
1090 CHIP_DELAY_TIMEOUT)) {
1091 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1092 /* Stop State Machine for next command cycle */
1093 pxa3xx_nand_stop(info);
1097 /* Check if the sequence is complete */
1098 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1102 * After a splitted program command sequence has issued
1103 * the command dispatch, the command sequence is complete.
1105 if (info->data_size == 0 &&
1106 command == NAND_CMD_PAGEPROG &&
1107 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1110 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1111 /* Last read: issue a 'last naked read' */
1112 if (info->data_size == info->chunk_size)
1113 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1115 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1118 * If a splitted program command has no more data to transfer,
1119 * the command dispatch must be issued to complete.
1121 } else if (command == NAND_CMD_PAGEPROG &&
1122 info->data_size == 0) {
1123 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1127 info->state = STATE_IDLE;
1130 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1131 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1133 chip->write_buf(mtd, buf, mtd->writesize);
1134 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1139 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1140 struct nand_chip *chip, uint8_t *buf, int oob_required,
1143 struct pxa3xx_nand_host *host = mtd->priv;
1144 struct pxa3xx_nand_info *info = host->info_data;
1146 chip->read_buf(mtd, buf, mtd->writesize);
1147 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1149 if (info->retcode == ERR_CORERR && info->use_ecc) {
1150 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1152 } else if (info->retcode == ERR_UNCORERR) {
1154 * for blank page (all 0xff), HW will calculate its ECC as
1155 * 0, which is different from the ECC information within
1156 * OOB, ignore such uncorrectable errors
1158 if (is_buf_blank(buf, mtd->writesize))
1159 info->retcode = ERR_NONE;
1161 mtd->ecc_stats.failed++;
1164 return info->max_bitflips;
1167 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1169 struct pxa3xx_nand_host *host = mtd->priv;
1170 struct pxa3xx_nand_info *info = host->info_data;
1173 if (info->buf_start < info->buf_count)
1174 /* Has just send a new command? */
1175 retval = info->data_buff[info->buf_start++];
1180 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1182 struct pxa3xx_nand_host *host = mtd->priv;
1183 struct pxa3xx_nand_info *info = host->info_data;
1184 u16 retval = 0xFFFF;
1186 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1187 retval = *((u16 *)(info->data_buff+info->buf_start));
1188 info->buf_start += 2;
1193 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1195 struct pxa3xx_nand_host *host = mtd->priv;
1196 struct pxa3xx_nand_info *info = host->info_data;
1197 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1199 memcpy(buf, info->data_buff + info->buf_start, real_len);
1200 info->buf_start += real_len;
1203 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1204 const uint8_t *buf, int len)
1206 struct pxa3xx_nand_host *host = mtd->priv;
1207 struct pxa3xx_nand_info *info = host->info_data;
1208 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1210 memcpy(info->data_buff + info->buf_start, buf, real_len);
1211 info->buf_start += real_len;
1214 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1219 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1221 struct pxa3xx_nand_host *host = mtd->priv;
1222 struct pxa3xx_nand_info *info = host->info_data;
1224 if (info->need_wait) {
1225 info->need_wait = 0;
1226 if (!wait_for_completion_timeout(&info->dev_ready,
1227 CHIP_DELAY_TIMEOUT)) {
1228 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1229 return NAND_STATUS_FAIL;
1233 /* pxa3xx_nand_send_command has waited for command complete */
1234 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1235 if (info->retcode == ERR_NONE)
1238 return NAND_STATUS_FAIL;
1241 return NAND_STATUS_READY;
1244 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1245 const struct pxa3xx_nand_flash *f)
1247 struct platform_device *pdev = info->pdev;
1248 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1249 struct pxa3xx_nand_host *host = info->host[info->cs];
1250 uint32_t ndcr = 0x0; /* enable all interrupts */
1252 if (f->page_size != 2048 && f->page_size != 512) {
1253 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1257 if (f->flash_width != 16 && f->flash_width != 8) {
1258 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1262 /* calculate addressing information */
1263 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1265 if (f->num_blocks * f->page_per_block > 65536)
1266 host->row_addr_cycles = 3;
1268 host->row_addr_cycles = 2;
1270 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1271 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1272 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1273 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1274 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1275 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1277 ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1278 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1280 info->reg_ndcr = ndcr;
1282 pxa3xx_nand_set_timing(host, f->timing);
1286 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1288 uint32_t ndcr = nand_readl(info, NDCR);
1290 /* Set an initial chunk size */
1291 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1292 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1293 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1294 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1299 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1301 struct platform_device *pdev = info->pdev;
1302 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1305 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1306 if (info->data_buff == NULL)
1311 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1312 &info->data_buff_phys, GFP_KERNEL);
1313 if (info->data_buff == NULL) {
1314 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1318 info->data_desc = (void *)info->data_buff + data_desc_offset;
1319 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1321 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1322 pxa3xx_nand_data_dma_irq, info);
1323 if (info->data_dma_ch < 0) {
1324 dev_err(&pdev->dev, "failed to request data dma\n");
1325 dma_free_coherent(&pdev->dev, info->buf_size,
1326 info->data_buff, info->data_buff_phys);
1327 return info->data_dma_ch;
1331 * Now that DMA buffers are allocated we turn on
1332 * DMA proper for I/O operations.
1338 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1340 struct platform_device *pdev = info->pdev;
1341 if (info->use_dma) {
1342 pxa_free_dma(info->data_dma_ch);
1343 dma_free_coherent(&pdev->dev, info->buf_size,
1344 info->data_buff, info->data_buff_phys);
1346 kfree(info->data_buff);
1350 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1352 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1353 if (info->data_buff == NULL)
1358 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1360 kfree(info->data_buff);
1364 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1366 struct mtd_info *mtd;
1367 struct nand_chip *chip;
1370 mtd = info->host[info->cs]->mtd;
1373 /* use the common timing to make a try */
1374 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1378 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1379 ret = chip->waitfunc(mtd, chip);
1380 if (ret & NAND_STATUS_FAIL)
1386 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1387 struct nand_ecc_ctrl *ecc,
1388 int strength, int ecc_stepsize, int page_size)
1390 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1391 info->chunk_size = 2048;
1392 info->spare_size = 40;
1393 info->ecc_size = 24;
1394 ecc->mode = NAND_ECC_HW;
1398 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1399 info->chunk_size = 512;
1400 info->spare_size = 8;
1402 ecc->mode = NAND_ECC_HW;
1407 * Required ECC: 4-bit correction per 512 bytes
1408 * Select: 16-bit correction per 2048 bytes
1410 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1412 info->chunk_size = 2048;
1413 info->spare_size = 32;
1414 info->ecc_size = 32;
1415 ecc->mode = NAND_ECC_HW;
1416 ecc->size = info->chunk_size;
1417 ecc->layout = &ecc_layout_2KB_bch4bit;
1420 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1422 info->chunk_size = 2048;
1423 info->spare_size = 32;
1424 info->ecc_size = 32;
1425 ecc->mode = NAND_ECC_HW;
1426 ecc->size = info->chunk_size;
1427 ecc->layout = &ecc_layout_4KB_bch4bit;
1431 * Required ECC: 8-bit correction per 512 bytes
1432 * Select: 16-bit correction per 1024 bytes
1434 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1436 info->chunk_size = 1024;
1437 info->spare_size = 0;
1438 info->ecc_size = 32;
1439 ecc->mode = NAND_ECC_HW;
1440 ecc->size = info->chunk_size;
1441 ecc->layout = &ecc_layout_4KB_bch8bit;
1444 dev_err(&info->pdev->dev,
1445 "ECC strength %d at page size %d is not supported\n",
1446 strength, page_size);
1450 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1451 ecc->strength, ecc->size);
1455 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1457 struct pxa3xx_nand_host *host = mtd->priv;
1458 struct pxa3xx_nand_info *info = host->info_data;
1459 struct platform_device *pdev = info->pdev;
1460 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1461 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1462 const struct pxa3xx_nand_flash *f = NULL;
1463 struct nand_chip *chip = mtd->priv;
1467 uint16_t ecc_strength, ecc_step;
1469 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1472 /* Set a default chunk size */
1473 info->chunk_size = 512;
1475 ret = pxa3xx_nand_sensing(info);
1477 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1483 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1484 id = *((uint16_t *)(info->data_buff));
1486 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1488 dev_warn(&info->pdev->dev,
1489 "Read out ID 0, potential timing set wrong!!\n");
1494 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1495 for (i = 0; i < num; i++) {
1496 if (i < pdata->num_flash)
1497 f = pdata->flash + i;
1499 f = &builtin_flash_types[i - pdata->num_flash + 1];
1501 /* find the chip in default list */
1502 if (f->chip_id == id)
1506 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1507 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1512 ret = pxa3xx_nand_config_flash(info, f);
1514 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1518 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1520 pxa3xx_flash_ids[0].name = f->name;
1521 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1522 pxa3xx_flash_ids[0].pagesize = f->page_size;
1523 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1524 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1525 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1526 if (f->flash_width == 16)
1527 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1528 pxa3xx_flash_ids[1].name = NULL;
1529 def = pxa3xx_flash_ids;
1531 if (info->reg_ndcr & NDCR_DWIDTH_M)
1532 chip->options |= NAND_BUSWIDTH_16;
1534 /* Device detection must be done with ECC disabled */
1535 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1536 nand_writel(info, NDECCCTRL, 0x0);
1538 if (nand_scan_ident(mtd, 1, def))
1541 if (pdata->flash_bbt) {
1543 * We'll use a bad block table stored in-flash and don't
1544 * allow writing the bad block marker to the flash.
1546 chip->bbt_options |= NAND_BBT_USE_FLASH |
1547 NAND_BBT_NO_OOB_BBM;
1548 chip->bbt_td = &bbt_main_descr;
1549 chip->bbt_md = &bbt_mirror_descr;
1553 * If the page size is bigger than the FIFO size, let's check
1554 * we are given the right variant and then switch to the extended
1555 * (aka splitted) command handling,
1557 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1558 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1559 chip->cmdfunc = nand_cmdfunc_extended;
1561 dev_err(&info->pdev->dev,
1562 "unsupported page size on this variant\n");
1567 if (pdata->ecc_strength && pdata->ecc_step_size) {
1568 ecc_strength = pdata->ecc_strength;
1569 ecc_step = pdata->ecc_step_size;
1571 ecc_strength = chip->ecc_strength_ds;
1572 ecc_step = chip->ecc_step_ds;
1575 /* Set default ECC strength requirements on non-ONFI devices */
1576 if (ecc_strength < 1 && ecc_step < 1) {
1581 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1582 ecc_step, mtd->writesize);
1586 /* calculate addressing information */
1587 if (mtd->writesize >= 2048)
1588 host->col_addr_cycles = 2;
1590 host->col_addr_cycles = 1;
1592 /* release the initial buffer */
1593 kfree(info->data_buff);
1595 /* allocate the real data + oob buffer */
1596 info->buf_size = mtd->writesize + mtd->oobsize;
1597 ret = pxa3xx_nand_init_buff(info);
1600 info->oob_buff = info->data_buff + mtd->writesize;
1602 if ((mtd->size >> chip->page_shift) > 65536)
1603 host->row_addr_cycles = 3;
1605 host->row_addr_cycles = 2;
1606 return nand_scan_tail(mtd);
1609 static int alloc_nand_resource(struct platform_device *pdev)
1611 struct pxa3xx_nand_platform_data *pdata;
1612 struct pxa3xx_nand_info *info;
1613 struct pxa3xx_nand_host *host;
1614 struct nand_chip *chip = NULL;
1615 struct mtd_info *mtd;
1619 pdata = dev_get_platdata(&pdev->dev);
1620 if (pdata->num_cs <= 0)
1622 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1623 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1628 info->variant = pxa3xx_nand_get_variant(pdev);
1629 for (cs = 0; cs < pdata->num_cs; cs++) {
1630 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1631 chip = (struct nand_chip *)(&mtd[1]);
1632 host = (struct pxa3xx_nand_host *)chip;
1633 info->host[cs] = host;
1636 host->info_data = info;
1638 mtd->owner = THIS_MODULE;
1640 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1641 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1642 chip->controller = &info->controller;
1643 chip->waitfunc = pxa3xx_nand_waitfunc;
1644 chip->select_chip = pxa3xx_nand_select_chip;
1645 chip->read_word = pxa3xx_nand_read_word;
1646 chip->read_byte = pxa3xx_nand_read_byte;
1647 chip->read_buf = pxa3xx_nand_read_buf;
1648 chip->write_buf = pxa3xx_nand_write_buf;
1649 chip->options |= NAND_NO_SUBPAGE_WRITE;
1650 chip->cmdfunc = nand_cmdfunc;
1653 spin_lock_init(&chip->controller->lock);
1654 init_waitqueue_head(&chip->controller->wq);
1655 info->clk = devm_clk_get(&pdev->dev, NULL);
1656 if (IS_ERR(info->clk)) {
1657 dev_err(&pdev->dev, "failed to get nand clock\n");
1658 return PTR_ERR(info->clk);
1660 ret = clk_prepare_enable(info->clk);
1666 * This is a dirty hack to make this driver work from
1667 * devicetree bindings. It can be removed once we have
1668 * a prober DMA controller framework for DT.
1670 if (pdev->dev.of_node &&
1671 of_machine_is_compatible("marvell,pxa3xx")) {
1672 info->drcmr_dat = 97;
1673 info->drcmr_cmd = 99;
1675 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1678 "no resource defined for data DMA\n");
1680 goto fail_disable_clk;
1682 info->drcmr_dat = r->start;
1684 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1687 "no resource defined for cmd DMA\n");
1689 goto fail_disable_clk;
1691 info->drcmr_cmd = r->start;
1695 irq = platform_get_irq(pdev, 0);
1697 dev_err(&pdev->dev, "no IRQ resource defined\n");
1699 goto fail_disable_clk;
1702 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1703 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1704 if (IS_ERR(info->mmio_base)) {
1705 ret = PTR_ERR(info->mmio_base);
1706 goto fail_disable_clk;
1708 info->mmio_phys = r->start;
1710 /* Allocate a buffer to allow flash detection */
1711 info->buf_size = INIT_BUFFER_SIZE;
1712 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1713 if (info->data_buff == NULL) {
1715 goto fail_disable_clk;
1718 /* initialize all interrupts to be disabled */
1719 disable_int(info, NDSR_MASK);
1721 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1722 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1725 dev_err(&pdev->dev, "failed to request IRQ\n");
1729 platform_set_drvdata(pdev, info);
1734 free_irq(irq, info);
1735 kfree(info->data_buff);
1737 clk_disable_unprepare(info->clk);
1741 static int pxa3xx_nand_remove(struct platform_device *pdev)
1743 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1744 struct pxa3xx_nand_platform_data *pdata;
1750 pdata = dev_get_platdata(&pdev->dev);
1752 irq = platform_get_irq(pdev, 0);
1754 free_irq(irq, info);
1755 pxa3xx_nand_free_buff(info);
1757 clk_disable_unprepare(info->clk);
1759 for (cs = 0; cs < pdata->num_cs; cs++)
1760 nand_release(info->host[cs]->mtd);
1764 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1766 struct pxa3xx_nand_platform_data *pdata;
1767 struct device_node *np = pdev->dev.of_node;
1768 const struct of_device_id *of_id =
1769 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1774 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1778 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1779 pdata->enable_arbiter = 1;
1780 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1781 pdata->keep_config = 1;
1782 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1783 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1785 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1786 if (pdata->ecc_strength < 0)
1787 pdata->ecc_strength = 0;
1789 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1790 if (pdata->ecc_step_size < 0)
1791 pdata->ecc_step_size = 0;
1793 pdev->dev.platform_data = pdata;
1798 static int pxa3xx_nand_probe(struct platform_device *pdev)
1800 struct pxa3xx_nand_platform_data *pdata;
1801 struct mtd_part_parser_data ppdata = {};
1802 struct pxa3xx_nand_info *info;
1803 int ret, cs, probe_success;
1805 #ifndef ARCH_HAS_DMA
1808 dev_warn(&pdev->dev,
1809 "This platform can't do DMA on this device\n");
1812 ret = pxa3xx_nand_probe_dt(pdev);
1816 pdata = dev_get_platdata(&pdev->dev);
1818 dev_err(&pdev->dev, "no platform data defined\n");
1822 ret = alloc_nand_resource(pdev);
1824 dev_err(&pdev->dev, "alloc nand resource failed\n");
1828 info = platform_get_drvdata(pdev);
1830 for (cs = 0; cs < pdata->num_cs; cs++) {
1831 struct mtd_info *mtd = info->host[cs]->mtd;
1834 * The mtd name matches the one used in 'mtdparts' kernel
1835 * parameter. This name cannot be changed or otherwise
1836 * user's mtd partitions configuration would get broken.
1838 mtd->name = "pxa3xx_nand-0";
1840 ret = pxa3xx_nand_scan(mtd);
1842 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1847 ppdata.of_node = pdev->dev.of_node;
1848 ret = mtd_device_parse_register(mtd, NULL,
1849 &ppdata, pdata->parts[cs],
1850 pdata->nr_parts[cs]);
1855 if (!probe_success) {
1856 pxa3xx_nand_remove(pdev);
1864 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1866 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1867 struct pxa3xx_nand_platform_data *pdata;
1868 struct mtd_info *mtd;
1871 pdata = dev_get_platdata(&pdev->dev);
1873 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1877 for (cs = 0; cs < pdata->num_cs; cs++) {
1878 mtd = info->host[cs]->mtd;
1885 static int pxa3xx_nand_resume(struct platform_device *pdev)
1887 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1888 struct pxa3xx_nand_platform_data *pdata;
1889 struct mtd_info *mtd;
1892 pdata = dev_get_platdata(&pdev->dev);
1893 /* We don't want to handle interrupt without calling mtd routine */
1894 disable_int(info, NDCR_INT_MASK);
1897 * Directly set the chip select to a invalid value,
1898 * then the driver would reset the timing according
1899 * to current chip select at the beginning of cmdfunc
1904 * As the spec says, the NDSR would be updated to 0x1800 when
1905 * doing the nand_clk disable/enable.
1906 * To prevent it damaging state machine of the driver, clear
1907 * all status before resume
1909 nand_writel(info, NDSR, NDSR_MASK);
1910 for (cs = 0; cs < pdata->num_cs; cs++) {
1911 mtd = info->host[cs]->mtd;
1918 #define pxa3xx_nand_suspend NULL
1919 #define pxa3xx_nand_resume NULL
1922 static struct platform_driver pxa3xx_nand_driver = {
1924 .name = "pxa3xx-nand",
1925 .of_match_table = pxa3xx_nand_dt_ids,
1927 .probe = pxa3xx_nand_probe,
1928 .remove = pxa3xx_nand_remove,
1929 .suspend = pxa3xx_nand_suspend,
1930 .resume = pxa3xx_nand_resume,
1933 module_platform_driver(pxa3xx_nand_driver);
1935 MODULE_LICENSE("GPL");
1936 MODULE_DESCRIPTION("PXA3xx NAND controller driver");