2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/delay.h>
20 #include <linux/clk.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/nand.h>
23 #include <linux/mtd/partitions.h>
25 #include <linux/iopoll.h>
26 #include <linux/irq.h>
27 #include <linux/slab.h>
29 #include <linux/of_device.h>
30 #include <linux/of_mtd.h>
32 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
40 #include <linux/platform_data/mtd-nand-pxa3xx.h>
42 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
43 #define NAND_STOP_DELAY msecs_to_jiffies(40)
44 #define PAGE_CHUNK_SIZE (2048)
47 * Define a buffer size for the initial command that detects the flash device:
48 * STATUS, READID and PARAM. The largest of these is the PARAM command,
51 #define INIT_BUFFER_SIZE 256
53 /* registers and bit definitions */
54 #define NDCR (0x00) /* Control register */
55 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
56 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
57 #define NDSR (0x14) /* Status Register */
58 #define NDPCR (0x18) /* Page Count Register */
59 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
60 #define NDBDR1 (0x20) /* Bad Block Register 1 */
61 #define NDECCCTRL (0x28) /* ECC control */
62 #define NDDB (0x40) /* Data Buffer */
63 #define NDCB0 (0x48) /* Command Buffer0 */
64 #define NDCB1 (0x4C) /* Command Buffer1 */
65 #define NDCB2 (0x50) /* Command Buffer2 */
67 #define NDCR_SPARE_EN (0x1 << 31)
68 #define NDCR_ECC_EN (0x1 << 30)
69 #define NDCR_DMA_EN (0x1 << 29)
70 #define NDCR_ND_RUN (0x1 << 28)
71 #define NDCR_DWIDTH_C (0x1 << 27)
72 #define NDCR_DWIDTH_M (0x1 << 26)
73 #define NDCR_PAGE_SZ (0x1 << 24)
74 #define NDCR_NCSX (0x1 << 23)
75 #define NDCR_ND_MODE (0x3 << 21)
76 #define NDCR_NAND_MODE (0x0)
77 #define NDCR_CLR_PG_CNT (0x1 << 20)
78 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
79 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
80 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
82 #define NDCR_RA_START (0x1 << 15)
83 #define NDCR_PG_PER_BLK (0x1 << 14)
84 #define NDCR_ND_ARB_EN (0x1 << 12)
85 #define NDCR_INT_MASK (0xFFF)
87 #define NDSR_MASK (0xfff)
88 #define NDSR_ERR_CNT_OFF (16)
89 #define NDSR_ERR_CNT_MASK (0x1f)
90 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
91 #define NDSR_RDY (0x1 << 12)
92 #define NDSR_FLASH_RDY (0x1 << 11)
93 #define NDSR_CS0_PAGED (0x1 << 10)
94 #define NDSR_CS1_PAGED (0x1 << 9)
95 #define NDSR_CS0_CMDD (0x1 << 8)
96 #define NDSR_CS1_CMDD (0x1 << 7)
97 #define NDSR_CS0_BBD (0x1 << 6)
98 #define NDSR_CS1_BBD (0x1 << 5)
99 #define NDSR_UNCORERR (0x1 << 4)
100 #define NDSR_CORERR (0x1 << 3)
101 #define NDSR_WRDREQ (0x1 << 2)
102 #define NDSR_RDDREQ (0x1 << 1)
103 #define NDSR_WRCMDREQ (0x1)
105 #define NDCB0_LEN_OVRD (0x1 << 28)
106 #define NDCB0_ST_ROW_EN (0x1 << 26)
107 #define NDCB0_AUTO_RS (0x1 << 25)
108 #define NDCB0_CSEL (0x1 << 24)
109 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
110 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
111 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
112 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
113 #define NDCB0_NC (0x1 << 20)
114 #define NDCB0_DBC (0x1 << 19)
115 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
116 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
117 #define NDCB0_CMD2_MASK (0xff << 8)
118 #define NDCB0_CMD1_MASK (0xff)
119 #define NDCB0_ADDR_CYC_SHIFT (16)
121 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
122 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
123 #define EXT_CMD_TYPE_READ 4 /* Read */
124 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
125 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
126 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
127 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
129 /* macros for registers read/write */
130 #define nand_writel(info, off, val) \
131 writel_relaxed((val), (info)->mmio_base + (off))
133 #define nand_readl(info, off) \
134 readl_relaxed((info)->mmio_base + (off))
136 /* error code and state */
159 enum pxa3xx_nand_variant {
160 PXA3XX_NAND_VARIANT_PXA,
161 PXA3XX_NAND_VARIANT_ARMADA370,
164 struct pxa3xx_nand_host {
165 struct nand_chip chip;
166 struct mtd_info *mtd;
169 /* page size of attached chip */
173 /* calculated from pxa3xx_nand_flash data */
174 unsigned int col_addr_cycles;
175 unsigned int row_addr_cycles;
176 size_t read_id_bytes;
180 struct pxa3xx_nand_info {
181 struct nand_hw_control controller;
182 struct platform_device *pdev;
185 void __iomem *mmio_base;
186 unsigned long mmio_phys;
187 struct completion cmd_complete, dev_ready;
189 unsigned int buf_start;
190 unsigned int buf_count;
191 unsigned int buf_size;
192 unsigned int data_buff_pos;
193 unsigned int oob_buff_pos;
195 /* DMA information */
199 unsigned char *data_buff;
200 unsigned char *oob_buff;
201 dma_addr_t data_buff_phys;
203 struct pxa_dma_desc *data_desc;
204 dma_addr_t data_desc_addr;
206 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
210 * This driver supports NFCv1 (as found in PXA SoC)
211 * and NFCv2 (as found in Armada 370/XP SoC).
213 enum pxa3xx_nand_variant variant;
216 int use_ecc; /* use HW ECC ? */
217 int ecc_bch; /* using BCH ECC? */
218 int use_dma; /* use DMA ? */
219 int use_spare; /* use spare ? */
222 unsigned int data_size; /* data to be read from FIFO */
223 unsigned int chunk_size; /* split commands chunk size */
224 unsigned int oob_size;
225 unsigned int spare_size;
226 unsigned int ecc_size;
227 unsigned int ecc_err_cnt;
228 unsigned int max_bitflips;
231 /* cached register value */
236 /* generated NDCBx register values */
243 static bool use_dma = 1;
244 module_param(use_dma, bool, 0444);
245 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
247 static struct pxa3xx_nand_timing timing[] = {
248 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
249 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
250 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
251 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
254 static struct pxa3xx_nand_flash builtin_flash_types[] = {
255 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
256 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
257 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
258 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
259 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
260 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
261 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
262 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
263 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
266 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
267 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
269 static struct nand_bbt_descr bbt_main_descr = {
270 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
271 | NAND_BBT_2BIT | NAND_BBT_VERSION,
275 .maxblocks = 8, /* Last 8 blocks in each chip */
276 .pattern = bbt_pattern
279 static struct nand_bbt_descr bbt_mirror_descr = {
280 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
281 | NAND_BBT_2BIT | NAND_BBT_VERSION,
285 .maxblocks = 8, /* Last 8 blocks in each chip */
286 .pattern = bbt_mirror_pattern
289 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
292 32, 33, 34, 35, 36, 37, 38, 39,
293 40, 41, 42, 43, 44, 45, 46, 47,
294 48, 49, 50, 51, 52, 53, 54, 55,
295 56, 57, 58, 59, 60, 61, 62, 63},
296 .oobfree = { {2, 30} }
299 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
302 32, 33, 34, 35, 36, 37, 38, 39,
303 40, 41, 42, 43, 44, 45, 46, 47,
304 48, 49, 50, 51, 52, 53, 54, 55,
305 56, 57, 58, 59, 60, 61, 62, 63,
306 96, 97, 98, 99, 100, 101, 102, 103,
307 104, 105, 106, 107, 108, 109, 110, 111,
308 112, 113, 114, 115, 116, 117, 118, 119,
309 120, 121, 122, 123, 124, 125, 126, 127},
310 /* Bootrom looks in bytes 0 & 5 for bad blocks */
311 .oobfree = { {6, 26}, { 64, 32} }
314 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
317 32, 33, 34, 35, 36, 37, 38, 39,
318 40, 41, 42, 43, 44, 45, 46, 47,
319 48, 49, 50, 51, 52, 53, 54, 55,
320 56, 57, 58, 59, 60, 61, 62, 63},
324 /* Define a default flash type setting serve as flash detecting only */
325 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
327 #define NDTR0_tCH(c) (min((c), 7) << 19)
328 #define NDTR0_tCS(c) (min((c), 7) << 16)
329 #define NDTR0_tWH(c) (min((c), 7) << 11)
330 #define NDTR0_tWP(c) (min((c), 7) << 8)
331 #define NDTR0_tRH(c) (min((c), 7) << 3)
332 #define NDTR0_tRP(c) (min((c), 7) << 0)
334 #define NDTR1_tR(c) (min((c), 65535) << 16)
335 #define NDTR1_tWHR(c) (min((c), 15) << 4)
336 #define NDTR1_tAR(c) (min((c), 15) << 0)
338 /* convert nano-seconds to nand flash controller clock cycles */
339 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
341 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
343 .compatible = "marvell,pxa3xx-nand",
344 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
347 .compatible = "marvell,armada370-nand",
348 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
352 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
354 static enum pxa3xx_nand_variant
355 pxa3xx_nand_get_variant(struct platform_device *pdev)
357 const struct of_device_id *of_id =
358 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
360 return PXA3XX_NAND_VARIANT_PXA;
361 return (enum pxa3xx_nand_variant)of_id->data;
364 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
365 const struct pxa3xx_nand_timing *t)
367 struct pxa3xx_nand_info *info = host->info_data;
368 unsigned long nand_clk = clk_get_rate(info->clk);
369 uint32_t ndtr0, ndtr1;
371 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
372 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
373 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
374 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
375 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
376 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
378 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
379 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
380 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
382 info->ndtr0cs0 = ndtr0;
383 info->ndtr1cs0 = ndtr1;
384 nand_writel(info, NDTR0CS0, ndtr0);
385 nand_writel(info, NDTR1CS0, ndtr1);
389 * Set the data and OOB size, depending on the selected
390 * spare and ECC configuration.
391 * Only applicable to READ0, READOOB and PAGEPROG commands.
393 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
394 struct mtd_info *mtd)
396 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
398 info->data_size = mtd->writesize;
402 info->oob_size = info->spare_size;
404 info->oob_size += info->ecc_size;
408 * NOTE: it is a must to set ND_RUN firstly, then write
409 * command buffer, otherwise, it does not work.
410 * We enable all the interrupt at the same time, and
411 * let pxa3xx_nand_irq to handle all logic.
413 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
417 ndcr = info->reg_ndcr;
422 nand_writel(info, NDECCCTRL, 0x1);
424 ndcr &= ~NDCR_ECC_EN;
426 nand_writel(info, NDECCCTRL, 0x0);
432 ndcr &= ~NDCR_DMA_EN;
435 ndcr |= NDCR_SPARE_EN;
437 ndcr &= ~NDCR_SPARE_EN;
441 /* clear status bits and run */
442 nand_writel(info, NDCR, 0);
443 nand_writel(info, NDSR, NDSR_MASK);
444 nand_writel(info, NDCR, ndcr);
447 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
450 int timeout = NAND_STOP_DELAY;
452 /* wait RUN bit in NDCR become 0 */
453 ndcr = nand_readl(info, NDCR);
454 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
455 ndcr = nand_readl(info, NDCR);
460 ndcr &= ~NDCR_ND_RUN;
461 nand_writel(info, NDCR, ndcr);
463 /* clear status bits */
464 nand_writel(info, NDSR, NDSR_MASK);
467 static void __maybe_unused
468 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
472 ndcr = nand_readl(info, NDCR);
473 nand_writel(info, NDCR, ndcr & ~int_mask);
476 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
480 ndcr = nand_readl(info, NDCR);
481 nand_writel(info, NDCR, ndcr | int_mask);
484 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
491 * According to the datasheet, when reading from NDDB
492 * with BCH enabled, after each 32 bytes reads, we
493 * have to make sure that the NDSR.RDDREQ bit is set.
495 * Drain the FIFO 8 32 bits reads at a time, and skip
496 * the polling on the last read.
499 readsl(info->mmio_base + NDDB, data, 8);
501 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
502 val & NDSR_RDDREQ, 1000, 5000);
504 dev_err(&info->pdev->dev,
505 "Timeout on RDDREQ while draining the FIFO\n");
514 readsl(info->mmio_base + NDDB, data, len);
517 static void handle_data_pio(struct pxa3xx_nand_info *info)
519 unsigned int do_bytes = min(info->data_size, info->chunk_size);
521 switch (info->state) {
522 case STATE_PIO_WRITING:
523 writesl(info->mmio_base + NDDB,
524 info->data_buff + info->data_buff_pos,
525 DIV_ROUND_UP(do_bytes, 4));
527 if (info->oob_size > 0)
528 writesl(info->mmio_base + NDDB,
529 info->oob_buff + info->oob_buff_pos,
530 DIV_ROUND_UP(info->oob_size, 4));
532 case STATE_PIO_READING:
534 info->data_buff + info->data_buff_pos,
535 DIV_ROUND_UP(do_bytes, 4));
537 if (info->oob_size > 0)
539 info->oob_buff + info->oob_buff_pos,
540 DIV_ROUND_UP(info->oob_size, 4));
543 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
548 /* Update buffer pointers for multi-page read/write */
549 info->data_buff_pos += do_bytes;
550 info->oob_buff_pos += info->oob_size;
551 info->data_size -= do_bytes;
555 static void start_data_dma(struct pxa3xx_nand_info *info)
557 struct pxa_dma_desc *desc = info->data_desc;
558 int dma_len = ALIGN(info->data_size + info->oob_size, 32);
560 desc->ddadr = DDADR_STOP;
561 desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len;
563 switch (info->state) {
564 case STATE_DMA_WRITING:
565 desc->dsadr = info->data_buff_phys;
566 desc->dtadr = info->mmio_phys + NDDB;
567 desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
569 case STATE_DMA_READING:
570 desc->dtadr = info->data_buff_phys;
571 desc->dsadr = info->mmio_phys + NDDB;
572 desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
575 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
580 DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch;
581 DDADR(info->data_dma_ch) = info->data_desc_addr;
582 DCSR(info->data_dma_ch) |= DCSR_RUN;
585 static void pxa3xx_nand_data_dma_irq(int channel, void *data)
587 struct pxa3xx_nand_info *info = data;
590 dcsr = DCSR(channel);
591 DCSR(channel) = dcsr;
593 if (dcsr & DCSR_BUSERR) {
594 info->retcode = ERR_DMABUSERR;
597 info->state = STATE_DMA_DONE;
598 enable_int(info, NDCR_INT_MASK);
599 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
602 static void start_data_dma(struct pxa3xx_nand_info *info)
606 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
608 struct pxa3xx_nand_info *info = data;
610 handle_data_pio(info);
612 info->state = STATE_CMD_DONE;
613 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
618 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
620 struct pxa3xx_nand_info *info = devid;
621 unsigned int status, is_completed = 0, is_ready = 0;
622 unsigned int ready, cmd_done;
623 irqreturn_t ret = IRQ_HANDLED;
626 ready = NDSR_FLASH_RDY;
627 cmd_done = NDSR_CS0_CMDD;
630 cmd_done = NDSR_CS1_CMDD;
633 status = nand_readl(info, NDSR);
635 if (status & NDSR_UNCORERR)
636 info->retcode = ERR_UNCORERR;
637 if (status & NDSR_CORERR) {
638 info->retcode = ERR_CORERR;
639 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
641 info->ecc_err_cnt = NDSR_ERR_CNT(status);
643 info->ecc_err_cnt = 1;
646 * Each chunk composing a page is corrected independently,
647 * and we need to store maximum number of corrected bitflips
648 * to return it to the MTD layer in ecc.read_page().
650 info->max_bitflips = max_t(unsigned int,
654 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
655 /* whether use dma to transfer data */
657 disable_int(info, NDCR_INT_MASK);
658 info->state = (status & NDSR_RDDREQ) ?
659 STATE_DMA_READING : STATE_DMA_WRITING;
660 start_data_dma(info);
661 goto NORMAL_IRQ_EXIT;
663 info->state = (status & NDSR_RDDREQ) ?
664 STATE_PIO_READING : STATE_PIO_WRITING;
665 ret = IRQ_WAKE_THREAD;
666 goto NORMAL_IRQ_EXIT;
669 if (status & cmd_done) {
670 info->state = STATE_CMD_DONE;
673 if (status & ready) {
674 info->state = STATE_READY;
678 if (status & NDSR_WRCMDREQ) {
679 nand_writel(info, NDSR, NDSR_WRCMDREQ);
680 status &= ~NDSR_WRCMDREQ;
681 info->state = STATE_CMD_HANDLE;
684 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
685 * must be loaded by writing directly either 12 or 16
686 * bytes directly to NDCB0, four bytes at a time.
688 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
689 * but each NDCBx register can be read.
691 nand_writel(info, NDCB0, info->ndcb0);
692 nand_writel(info, NDCB0, info->ndcb1);
693 nand_writel(info, NDCB0, info->ndcb2);
695 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
696 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
697 nand_writel(info, NDCB0, info->ndcb3);
700 /* clear NDSR to let the controller exit the IRQ */
701 nand_writel(info, NDSR, status);
703 complete(&info->cmd_complete);
705 complete(&info->dev_ready);
710 static inline int is_buf_blank(uint8_t *buf, size_t len)
712 for (; len > 0; len--)
718 static void set_command_address(struct pxa3xx_nand_info *info,
719 unsigned int page_size, uint16_t column, int page_addr)
721 /* small page addr setting */
722 if (page_size < PAGE_CHUNK_SIZE) {
723 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
728 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
731 if (page_addr & 0xFF0000)
732 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
738 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
740 struct pxa3xx_nand_host *host = info->host[info->cs];
741 struct mtd_info *mtd = host->mtd;
743 /* reset data and oob column point to handle data */
747 info->data_buff_pos = 0;
748 info->oob_buff_pos = 0;
751 info->retcode = ERR_NONE;
752 info->ecc_err_cnt = 0;
758 case NAND_CMD_PAGEPROG:
760 case NAND_CMD_READOOB:
761 pxa3xx_set_datasize(info, mtd);
773 * If we are about to issue a read command, or about to set
774 * the write address, then clean the data buffer.
776 if (command == NAND_CMD_READ0 ||
777 command == NAND_CMD_READOOB ||
778 command == NAND_CMD_SEQIN) {
780 info->buf_count = mtd->writesize + mtd->oobsize;
781 memset(info->data_buff, 0xFF, info->buf_count);
786 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
787 int ext_cmd_type, uint16_t column, int page_addr)
789 int addr_cycle, exec_cmd;
790 struct pxa3xx_nand_host *host;
791 struct mtd_info *mtd;
793 host = info->host[info->cs];
799 info->ndcb0 = NDCB0_CSEL;
803 if (command == NAND_CMD_SEQIN)
806 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
807 + host->col_addr_cycles);
810 case NAND_CMD_READOOB:
812 info->buf_start = column;
813 info->ndcb0 |= NDCB0_CMD_TYPE(0)
817 if (command == NAND_CMD_READOOB)
818 info->buf_start += mtd->writesize;
821 * Multiple page read needs an 'extended command type' field,
822 * which is either naked-read or last-read according to the
825 if (mtd->writesize == PAGE_CHUNK_SIZE) {
826 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
827 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
828 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
830 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
831 info->ndcb3 = info->chunk_size +
835 set_command_address(info, mtd->writesize, column, page_addr);
840 info->buf_start = column;
841 set_command_address(info, mtd->writesize, 0, page_addr);
844 * Multiple page programming needs to execute the initial
845 * SEQIN command that sets the page address.
847 if (mtd->writesize > PAGE_CHUNK_SIZE) {
848 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
849 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
852 /* No data transfer in this case */
858 case NAND_CMD_PAGEPROG:
859 if (is_buf_blank(info->data_buff,
860 (mtd->writesize + mtd->oobsize))) {
865 /* Second command setting for large pages */
866 if (mtd->writesize > PAGE_CHUNK_SIZE) {
868 * Multiple page write uses the 'extended command'
869 * field. This can be used to issue a command dispatch
870 * or a naked-write depending on the current stage.
872 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
874 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
875 info->ndcb3 = info->chunk_size +
879 * This is the command dispatch that completes a chunked
880 * page program operation.
882 if (info->data_size == 0) {
883 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
884 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
891 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
895 | (NAND_CMD_PAGEPROG << 8)
902 info->buf_count = 256;
903 info->ndcb0 |= NDCB0_CMD_TYPE(0)
907 info->ndcb1 = (column & 0xFF);
909 info->data_size = 256;
912 case NAND_CMD_READID:
913 info->buf_count = host->read_id_bytes;
914 info->ndcb0 |= NDCB0_CMD_TYPE(3)
917 info->ndcb1 = (column & 0xFF);
921 case NAND_CMD_STATUS:
923 info->ndcb0 |= NDCB0_CMD_TYPE(4)
930 case NAND_CMD_ERASE1:
931 info->ndcb0 |= NDCB0_CMD_TYPE(2)
935 | (NAND_CMD_ERASE2 << 8)
937 info->ndcb1 = page_addr;
942 info->ndcb0 |= NDCB0_CMD_TYPE(5)
947 case NAND_CMD_ERASE2:
953 dev_err(&info->pdev->dev, "non-supported command %x\n",
961 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
962 int column, int page_addr)
964 struct pxa3xx_nand_host *host = mtd->priv;
965 struct pxa3xx_nand_info *info = host->info_data;
969 * if this is a x16 device ,then convert the input
970 * "byte" address into a "word" address appropriate
971 * for indexing a word-oriented device
973 if (info->reg_ndcr & NDCR_DWIDTH_M)
977 * There may be different NAND chip hooked to
978 * different chip select, so check whether
979 * chip select has been changed, if yes, reset the timing
981 if (info->cs != host->cs) {
983 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
984 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
987 prepare_start_command(info, command);
989 info->state = STATE_PREPARED;
990 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
993 init_completion(&info->cmd_complete);
994 init_completion(&info->dev_ready);
996 pxa3xx_nand_start(info);
998 if (!wait_for_completion_timeout(&info->cmd_complete,
999 CHIP_DELAY_TIMEOUT)) {
1000 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1001 /* Stop State Machine for next command cycle */
1002 pxa3xx_nand_stop(info);
1005 info->state = STATE_IDLE;
1008 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1009 const unsigned command,
1010 int column, int page_addr)
1012 struct pxa3xx_nand_host *host = mtd->priv;
1013 struct pxa3xx_nand_info *info = host->info_data;
1014 int exec_cmd, ext_cmd_type;
1017 * if this is a x16 device then convert the input
1018 * "byte" address into a "word" address appropriate
1019 * for indexing a word-oriented device
1021 if (info->reg_ndcr & NDCR_DWIDTH_M)
1025 * There may be different NAND chip hooked to
1026 * different chip select, so check whether
1027 * chip select has been changed, if yes, reset the timing
1029 if (info->cs != host->cs) {
1030 info->cs = host->cs;
1031 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1032 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1035 /* Select the extended command for the first command */
1037 case NAND_CMD_READ0:
1038 case NAND_CMD_READOOB:
1039 ext_cmd_type = EXT_CMD_TYPE_MONO;
1041 case NAND_CMD_SEQIN:
1042 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1044 case NAND_CMD_PAGEPROG:
1045 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1052 prepare_start_command(info, command);
1055 * Prepare the "is ready" completion before starting a command
1056 * transaction sequence. If the command is not executed the
1057 * completion will be completed, see below.
1059 * We can do that inside the loop because the command variable
1060 * is invariant and thus so is the exec_cmd.
1062 info->need_wait = 1;
1063 init_completion(&info->dev_ready);
1065 info->state = STATE_PREPARED;
1066 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1069 info->need_wait = 0;
1070 complete(&info->dev_ready);
1074 init_completion(&info->cmd_complete);
1075 pxa3xx_nand_start(info);
1077 if (!wait_for_completion_timeout(&info->cmd_complete,
1078 CHIP_DELAY_TIMEOUT)) {
1079 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1080 /* Stop State Machine for next command cycle */
1081 pxa3xx_nand_stop(info);
1085 /* Check if the sequence is complete */
1086 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1090 * After a splitted program command sequence has issued
1091 * the command dispatch, the command sequence is complete.
1093 if (info->data_size == 0 &&
1094 command == NAND_CMD_PAGEPROG &&
1095 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1098 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1099 /* Last read: issue a 'last naked read' */
1100 if (info->data_size == info->chunk_size)
1101 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1103 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1106 * If a splitted program command has no more data to transfer,
1107 * the command dispatch must be issued to complete.
1109 } else if (command == NAND_CMD_PAGEPROG &&
1110 info->data_size == 0) {
1111 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1115 info->state = STATE_IDLE;
1118 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1119 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1121 chip->write_buf(mtd, buf, mtd->writesize);
1122 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1127 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1128 struct nand_chip *chip, uint8_t *buf, int oob_required,
1131 struct pxa3xx_nand_host *host = mtd->priv;
1132 struct pxa3xx_nand_info *info = host->info_data;
1134 chip->read_buf(mtd, buf, mtd->writesize);
1135 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1137 if (info->retcode == ERR_CORERR && info->use_ecc) {
1138 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1140 } else if (info->retcode == ERR_UNCORERR) {
1142 * for blank page (all 0xff), HW will calculate its ECC as
1143 * 0, which is different from the ECC information within
1144 * OOB, ignore such uncorrectable errors
1146 if (is_buf_blank(buf, mtd->writesize))
1147 info->retcode = ERR_NONE;
1149 mtd->ecc_stats.failed++;
1152 return info->max_bitflips;
1155 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1157 struct pxa3xx_nand_host *host = mtd->priv;
1158 struct pxa3xx_nand_info *info = host->info_data;
1161 if (info->buf_start < info->buf_count)
1162 /* Has just send a new command? */
1163 retval = info->data_buff[info->buf_start++];
1168 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1170 struct pxa3xx_nand_host *host = mtd->priv;
1171 struct pxa3xx_nand_info *info = host->info_data;
1172 u16 retval = 0xFFFF;
1174 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1175 retval = *((u16 *)(info->data_buff+info->buf_start));
1176 info->buf_start += 2;
1181 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1183 struct pxa3xx_nand_host *host = mtd->priv;
1184 struct pxa3xx_nand_info *info = host->info_data;
1185 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1187 memcpy(buf, info->data_buff + info->buf_start, real_len);
1188 info->buf_start += real_len;
1191 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1192 const uint8_t *buf, int len)
1194 struct pxa3xx_nand_host *host = mtd->priv;
1195 struct pxa3xx_nand_info *info = host->info_data;
1196 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1198 memcpy(info->data_buff + info->buf_start, buf, real_len);
1199 info->buf_start += real_len;
1202 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1207 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1209 struct pxa3xx_nand_host *host = mtd->priv;
1210 struct pxa3xx_nand_info *info = host->info_data;
1212 if (info->need_wait) {
1213 info->need_wait = 0;
1214 if (!wait_for_completion_timeout(&info->dev_ready,
1215 CHIP_DELAY_TIMEOUT)) {
1216 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1217 return NAND_STATUS_FAIL;
1221 /* pxa3xx_nand_send_command has waited for command complete */
1222 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1223 if (info->retcode == ERR_NONE)
1226 return NAND_STATUS_FAIL;
1229 return NAND_STATUS_READY;
1232 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1233 const struct pxa3xx_nand_flash *f)
1235 struct platform_device *pdev = info->pdev;
1236 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1237 struct pxa3xx_nand_host *host = info->host[info->cs];
1238 uint32_t ndcr = 0x0; /* enable all interrupts */
1240 if (f->page_size != 2048 && f->page_size != 512) {
1241 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1245 if (f->flash_width != 16 && f->flash_width != 8) {
1246 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1250 /* calculate flash information */
1251 host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
1253 /* calculate addressing information */
1254 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1256 if (f->num_blocks * f->page_per_block > 65536)
1257 host->row_addr_cycles = 3;
1259 host->row_addr_cycles = 2;
1261 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1262 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1263 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1264 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1265 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1266 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1268 ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1269 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1271 info->reg_ndcr = ndcr;
1273 pxa3xx_nand_set_timing(host, f->timing);
1277 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1280 * We set 0 by hard coding here, for we don't support keep_config
1281 * when there is more than one chip attached to the controller
1283 struct pxa3xx_nand_host *host = info->host[0];
1284 uint32_t ndcr = nand_readl(info, NDCR);
1286 if (ndcr & NDCR_PAGE_SZ) {
1287 /* Controller's FIFO size */
1288 info->chunk_size = 2048;
1289 host->read_id_bytes = 4;
1291 info->chunk_size = 512;
1292 host->read_id_bytes = 2;
1295 /* Set an initial chunk size */
1296 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1297 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1298 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1303 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1305 struct platform_device *pdev = info->pdev;
1306 int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
1309 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1310 if (info->data_buff == NULL)
1315 info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
1316 &info->data_buff_phys, GFP_KERNEL);
1317 if (info->data_buff == NULL) {
1318 dev_err(&pdev->dev, "failed to allocate dma buffer\n");
1322 info->data_desc = (void *)info->data_buff + data_desc_offset;
1323 info->data_desc_addr = info->data_buff_phys + data_desc_offset;
1325 info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW,
1326 pxa3xx_nand_data_dma_irq, info);
1327 if (info->data_dma_ch < 0) {
1328 dev_err(&pdev->dev, "failed to request data dma\n");
1329 dma_free_coherent(&pdev->dev, info->buf_size,
1330 info->data_buff, info->data_buff_phys);
1331 return info->data_dma_ch;
1335 * Now that DMA buffers are allocated we turn on
1336 * DMA proper for I/O operations.
1342 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1344 struct platform_device *pdev = info->pdev;
1345 if (info->use_dma) {
1346 pxa_free_dma(info->data_dma_ch);
1347 dma_free_coherent(&pdev->dev, info->buf_size,
1348 info->data_buff, info->data_buff_phys);
1350 kfree(info->data_buff);
1354 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1356 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1357 if (info->data_buff == NULL)
1362 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1364 kfree(info->data_buff);
1368 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1370 struct mtd_info *mtd;
1371 struct nand_chip *chip;
1374 mtd = info->host[info->cs]->mtd;
1377 /* use the common timing to make a try */
1378 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1382 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1383 ret = chip->waitfunc(mtd, chip);
1384 if (ret & NAND_STATUS_FAIL)
1390 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1391 struct nand_ecc_ctrl *ecc,
1392 int strength, int ecc_stepsize, int page_size)
1394 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1395 info->chunk_size = 2048;
1396 info->spare_size = 40;
1397 info->ecc_size = 24;
1398 ecc->mode = NAND_ECC_HW;
1402 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1403 info->chunk_size = 512;
1404 info->spare_size = 8;
1406 ecc->mode = NAND_ECC_HW;
1411 * Required ECC: 4-bit correction per 512 bytes
1412 * Select: 16-bit correction per 2048 bytes
1414 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1416 info->chunk_size = 2048;
1417 info->spare_size = 32;
1418 info->ecc_size = 32;
1419 ecc->mode = NAND_ECC_HW;
1420 ecc->size = info->chunk_size;
1421 ecc->layout = &ecc_layout_2KB_bch4bit;
1424 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1426 info->chunk_size = 2048;
1427 info->spare_size = 32;
1428 info->ecc_size = 32;
1429 ecc->mode = NAND_ECC_HW;
1430 ecc->size = info->chunk_size;
1431 ecc->layout = &ecc_layout_4KB_bch4bit;
1435 * Required ECC: 8-bit correction per 512 bytes
1436 * Select: 16-bit correction per 1024 bytes
1438 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1440 info->chunk_size = 1024;
1441 info->spare_size = 0;
1442 info->ecc_size = 32;
1443 ecc->mode = NAND_ECC_HW;
1444 ecc->size = info->chunk_size;
1445 ecc->layout = &ecc_layout_4KB_bch8bit;
1448 dev_err(&info->pdev->dev,
1449 "ECC strength %d at page size %d is not supported\n",
1450 strength, page_size);
1454 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1455 ecc->strength, ecc->size);
1459 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1461 struct pxa3xx_nand_host *host = mtd->priv;
1462 struct pxa3xx_nand_info *info = host->info_data;
1463 struct platform_device *pdev = info->pdev;
1464 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1465 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1466 const struct pxa3xx_nand_flash *f = NULL;
1467 struct nand_chip *chip = mtd->priv;
1471 uint16_t ecc_strength, ecc_step;
1473 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1476 ret = pxa3xx_nand_sensing(info);
1478 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1484 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1485 id = *((uint16_t *)(info->data_buff));
1487 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1489 dev_warn(&info->pdev->dev,
1490 "Read out ID 0, potential timing set wrong!!\n");
1495 num = ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1;
1496 for (i = 0; i < num; i++) {
1497 if (i < pdata->num_flash)
1498 f = pdata->flash + i;
1500 f = &builtin_flash_types[i - pdata->num_flash + 1];
1502 /* find the chip in default list */
1503 if (f->chip_id == id)
1507 if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
1508 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1513 ret = pxa3xx_nand_config_flash(info, f);
1515 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1519 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1521 pxa3xx_flash_ids[0].name = f->name;
1522 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1523 pxa3xx_flash_ids[0].pagesize = f->page_size;
1524 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1525 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1526 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1527 if (f->flash_width == 16)
1528 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1529 pxa3xx_flash_ids[1].name = NULL;
1530 def = pxa3xx_flash_ids;
1532 if (info->reg_ndcr & NDCR_DWIDTH_M)
1533 chip->options |= NAND_BUSWIDTH_16;
1535 /* Device detection must be done with ECC disabled */
1536 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1537 nand_writel(info, NDECCCTRL, 0x0);
1539 if (nand_scan_ident(mtd, 1, def))
1542 if (pdata->flash_bbt) {
1544 * We'll use a bad block table stored in-flash and don't
1545 * allow writing the bad block marker to the flash.
1547 chip->bbt_options |= NAND_BBT_USE_FLASH |
1548 NAND_BBT_NO_OOB_BBM;
1549 chip->bbt_td = &bbt_main_descr;
1550 chip->bbt_md = &bbt_mirror_descr;
1554 * If the page size is bigger than the FIFO size, let's check
1555 * we are given the right variant and then switch to the extended
1556 * (aka splitted) command handling,
1558 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1559 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1560 chip->cmdfunc = nand_cmdfunc_extended;
1562 dev_err(&info->pdev->dev,
1563 "unsupported page size on this variant\n");
1568 if (pdata->ecc_strength && pdata->ecc_step_size) {
1569 ecc_strength = pdata->ecc_strength;
1570 ecc_step = pdata->ecc_step_size;
1572 ecc_strength = chip->ecc_strength_ds;
1573 ecc_step = chip->ecc_step_ds;
1576 /* Set default ECC strength requirements on non-ONFI devices */
1577 if (ecc_strength < 1 && ecc_step < 1) {
1582 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1583 ecc_step, mtd->writesize);
1587 /* calculate addressing information */
1588 if (mtd->writesize >= 2048)
1589 host->col_addr_cycles = 2;
1591 host->col_addr_cycles = 1;
1593 /* release the initial buffer */
1594 kfree(info->data_buff);
1596 /* allocate the real data + oob buffer */
1597 info->buf_size = mtd->writesize + mtd->oobsize;
1598 ret = pxa3xx_nand_init_buff(info);
1601 info->oob_buff = info->data_buff + mtd->writesize;
1603 if ((mtd->size >> chip->page_shift) > 65536)
1604 host->row_addr_cycles = 3;
1606 host->row_addr_cycles = 2;
1607 return nand_scan_tail(mtd);
1610 static int alloc_nand_resource(struct platform_device *pdev)
1612 struct pxa3xx_nand_platform_data *pdata;
1613 struct pxa3xx_nand_info *info;
1614 struct pxa3xx_nand_host *host;
1615 struct nand_chip *chip = NULL;
1616 struct mtd_info *mtd;
1620 pdata = dev_get_platdata(&pdev->dev);
1621 if (pdata->num_cs <= 0)
1623 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1624 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1629 info->variant = pxa3xx_nand_get_variant(pdev);
1630 for (cs = 0; cs < pdata->num_cs; cs++) {
1631 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1632 chip = (struct nand_chip *)(&mtd[1]);
1633 host = (struct pxa3xx_nand_host *)chip;
1634 info->host[cs] = host;
1637 host->info_data = info;
1639 mtd->owner = THIS_MODULE;
1641 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1642 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1643 chip->controller = &info->controller;
1644 chip->waitfunc = pxa3xx_nand_waitfunc;
1645 chip->select_chip = pxa3xx_nand_select_chip;
1646 chip->read_word = pxa3xx_nand_read_word;
1647 chip->read_byte = pxa3xx_nand_read_byte;
1648 chip->read_buf = pxa3xx_nand_read_buf;
1649 chip->write_buf = pxa3xx_nand_write_buf;
1650 chip->options |= NAND_NO_SUBPAGE_WRITE;
1651 chip->cmdfunc = nand_cmdfunc;
1654 spin_lock_init(&chip->controller->lock);
1655 init_waitqueue_head(&chip->controller->wq);
1656 info->clk = devm_clk_get(&pdev->dev, NULL);
1657 if (IS_ERR(info->clk)) {
1658 dev_err(&pdev->dev, "failed to get nand clock\n");
1659 return PTR_ERR(info->clk);
1661 ret = clk_prepare_enable(info->clk);
1667 * This is a dirty hack to make this driver work from
1668 * devicetree bindings. It can be removed once we have
1669 * a prober DMA controller framework for DT.
1671 if (pdev->dev.of_node &&
1672 of_machine_is_compatible("marvell,pxa3xx")) {
1673 info->drcmr_dat = 97;
1674 info->drcmr_cmd = 99;
1676 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1679 "no resource defined for data DMA\n");
1681 goto fail_disable_clk;
1683 info->drcmr_dat = r->start;
1685 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1688 "no resource defined for cmd DMA\n");
1690 goto fail_disable_clk;
1692 info->drcmr_cmd = r->start;
1696 irq = platform_get_irq(pdev, 0);
1698 dev_err(&pdev->dev, "no IRQ resource defined\n");
1700 goto fail_disable_clk;
1703 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1704 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1705 if (IS_ERR(info->mmio_base)) {
1706 ret = PTR_ERR(info->mmio_base);
1707 goto fail_disable_clk;
1709 info->mmio_phys = r->start;
1711 /* Allocate a buffer to allow flash detection */
1712 info->buf_size = INIT_BUFFER_SIZE;
1713 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1714 if (info->data_buff == NULL) {
1716 goto fail_disable_clk;
1719 /* initialize all interrupts to be disabled */
1720 disable_int(info, NDSR_MASK);
1722 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1723 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1726 dev_err(&pdev->dev, "failed to request IRQ\n");
1730 platform_set_drvdata(pdev, info);
1735 free_irq(irq, info);
1736 kfree(info->data_buff);
1738 clk_disable_unprepare(info->clk);
1742 static int pxa3xx_nand_remove(struct platform_device *pdev)
1744 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1745 struct pxa3xx_nand_platform_data *pdata;
1751 pdata = dev_get_platdata(&pdev->dev);
1753 irq = platform_get_irq(pdev, 0);
1755 free_irq(irq, info);
1756 pxa3xx_nand_free_buff(info);
1758 clk_disable_unprepare(info->clk);
1760 for (cs = 0; cs < pdata->num_cs; cs++)
1761 nand_release(info->host[cs]->mtd);
1765 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1767 struct pxa3xx_nand_platform_data *pdata;
1768 struct device_node *np = pdev->dev.of_node;
1769 const struct of_device_id *of_id =
1770 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1775 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1779 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1780 pdata->enable_arbiter = 1;
1781 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1782 pdata->keep_config = 1;
1783 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1784 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1786 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1787 if (pdata->ecc_strength < 0)
1788 pdata->ecc_strength = 0;
1790 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1791 if (pdata->ecc_step_size < 0)
1792 pdata->ecc_step_size = 0;
1794 pdev->dev.platform_data = pdata;
1799 static int pxa3xx_nand_probe(struct platform_device *pdev)
1801 struct pxa3xx_nand_platform_data *pdata;
1802 struct mtd_part_parser_data ppdata = {};
1803 struct pxa3xx_nand_info *info;
1804 int ret, cs, probe_success;
1806 #ifndef ARCH_HAS_DMA
1809 dev_warn(&pdev->dev,
1810 "This platform can't do DMA on this device\n");
1813 ret = pxa3xx_nand_probe_dt(pdev);
1817 pdata = dev_get_platdata(&pdev->dev);
1819 dev_err(&pdev->dev, "no platform data defined\n");
1823 ret = alloc_nand_resource(pdev);
1825 dev_err(&pdev->dev, "alloc nand resource failed\n");
1829 info = platform_get_drvdata(pdev);
1831 for (cs = 0; cs < pdata->num_cs; cs++) {
1832 struct mtd_info *mtd = info->host[cs]->mtd;
1835 * The mtd name matches the one used in 'mtdparts' kernel
1836 * parameter. This name cannot be changed or otherwise
1837 * user's mtd partitions configuration would get broken.
1839 mtd->name = "pxa3xx_nand-0";
1841 ret = pxa3xx_nand_scan(mtd);
1843 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1848 ppdata.of_node = pdev->dev.of_node;
1849 ret = mtd_device_parse_register(mtd, NULL,
1850 &ppdata, pdata->parts[cs],
1851 pdata->nr_parts[cs]);
1856 if (!probe_success) {
1857 pxa3xx_nand_remove(pdev);
1865 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1867 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1868 struct pxa3xx_nand_platform_data *pdata;
1869 struct mtd_info *mtd;
1872 pdata = dev_get_platdata(&pdev->dev);
1874 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1878 for (cs = 0; cs < pdata->num_cs; cs++) {
1879 mtd = info->host[cs]->mtd;
1886 static int pxa3xx_nand_resume(struct platform_device *pdev)
1888 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1889 struct pxa3xx_nand_platform_data *pdata;
1890 struct mtd_info *mtd;
1893 pdata = dev_get_platdata(&pdev->dev);
1894 /* We don't want to handle interrupt without calling mtd routine */
1895 disable_int(info, NDCR_INT_MASK);
1898 * Directly set the chip select to a invalid value,
1899 * then the driver would reset the timing according
1900 * to current chip select at the beginning of cmdfunc
1905 * As the spec says, the NDSR would be updated to 0x1800 when
1906 * doing the nand_clk disable/enable.
1907 * To prevent it damaging state machine of the driver, clear
1908 * all status before resume
1910 nand_writel(info, NDSR, NDSR_MASK);
1911 for (cs = 0; cs < pdata->num_cs; cs++) {
1912 mtd = info->host[cs]->mtd;
1919 #define pxa3xx_nand_suspend NULL
1920 #define pxa3xx_nand_resume NULL
1923 static struct platform_driver pxa3xx_nand_driver = {
1925 .name = "pxa3xx-nand",
1926 .of_match_table = pxa3xx_nand_dt_ids,
1928 .probe = pxa3xx_nand_probe,
1929 .remove = pxa3xx_nand_remove,
1930 .suspend = pxa3xx_nand_suspend,
1931 .resume = pxa3xx_nand_resume,
1934 module_platform_driver(pxa3xx_nand_driver);
1936 MODULE_LICENSE("GPL");
1937 MODULE_DESCRIPTION("PXA3xx NAND controller driver");