2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
34 #if defined(CONFIG_ARM) && (defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP))
38 #include <linux/platform_data/mtd-nand-pxa3xx.h>
40 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
41 #define NAND_STOP_DELAY msecs_to_jiffies(40)
42 #define PAGE_CHUNK_SIZE (2048)
45 * Define a buffer size for the initial command that detects the flash device:
46 * STATUS, READID and PARAM.
47 * ONFI param page is 256 bytes, and there are three redundant copies
48 * to be read. JEDEC param page is 512 bytes, and there are also three
49 * redundant copies to be read.
50 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
52 #define INIT_BUFFER_SIZE 2048
54 /* registers and bit definitions */
55 #define NDCR (0x00) /* Control register */
56 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
57 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
58 #define NDSR (0x14) /* Status Register */
59 #define NDPCR (0x18) /* Page Count Register */
60 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
61 #define NDBDR1 (0x20) /* Bad Block Register 1 */
62 #define NDECCCTRL (0x28) /* ECC control */
63 #define NDDB (0x40) /* Data Buffer */
64 #define NDCB0 (0x48) /* Command Buffer0 */
65 #define NDCB1 (0x4C) /* Command Buffer1 */
66 #define NDCB2 (0x50) /* Command Buffer2 */
68 #define NDCR_SPARE_EN (0x1 << 31)
69 #define NDCR_ECC_EN (0x1 << 30)
70 #define NDCR_DMA_EN (0x1 << 29)
71 #define NDCR_ND_RUN (0x1 << 28)
72 #define NDCR_DWIDTH_C (0x1 << 27)
73 #define NDCR_DWIDTH_M (0x1 << 26)
74 #define NDCR_PAGE_SZ (0x1 << 24)
75 #define NDCR_NCSX (0x1 << 23)
76 #define NDCR_ND_MODE (0x3 << 21)
77 #define NDCR_NAND_MODE (0x0)
78 #define NDCR_CLR_PG_CNT (0x1 << 20)
79 #define NDCR_STOP_ON_UNCOR (0x1 << 19)
80 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
81 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
83 #define NDCR_RA_START (0x1 << 15)
84 #define NDCR_PG_PER_BLK (0x1 << 14)
85 #define NDCR_ND_ARB_EN (0x1 << 12)
86 #define NDCR_INT_MASK (0xFFF)
88 #define NDSR_MASK (0xfff)
89 #define NDSR_ERR_CNT_OFF (16)
90 #define NDSR_ERR_CNT_MASK (0x1f)
91 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
92 #define NDSR_RDY (0x1 << 12)
93 #define NDSR_FLASH_RDY (0x1 << 11)
94 #define NDSR_CS0_PAGED (0x1 << 10)
95 #define NDSR_CS1_PAGED (0x1 << 9)
96 #define NDSR_CS0_CMDD (0x1 << 8)
97 #define NDSR_CS1_CMDD (0x1 << 7)
98 #define NDSR_CS0_BBD (0x1 << 6)
99 #define NDSR_CS1_BBD (0x1 << 5)
100 #define NDSR_UNCORERR (0x1 << 4)
101 #define NDSR_CORERR (0x1 << 3)
102 #define NDSR_WRDREQ (0x1 << 2)
103 #define NDSR_RDDREQ (0x1 << 1)
104 #define NDSR_WRCMDREQ (0x1)
106 #define NDCB0_LEN_OVRD (0x1 << 28)
107 #define NDCB0_ST_ROW_EN (0x1 << 26)
108 #define NDCB0_AUTO_RS (0x1 << 25)
109 #define NDCB0_CSEL (0x1 << 24)
110 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
111 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
112 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
113 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
114 #define NDCB0_NC (0x1 << 20)
115 #define NDCB0_DBC (0x1 << 19)
116 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
117 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
118 #define NDCB0_CMD2_MASK (0xff << 8)
119 #define NDCB0_CMD1_MASK (0xff)
120 #define NDCB0_ADDR_CYC_SHIFT (16)
122 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
123 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
124 #define EXT_CMD_TYPE_READ 4 /* Read */
125 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
126 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
127 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
128 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
131 * This should be large enough to read 'ONFI' and 'JEDEC'.
132 * Let's use 7 bytes, which is the maximum ID count supported
133 * by the controller (see NDCR_RD_ID_CNT_MASK).
135 #define READ_ID_BYTES 7
137 /* macros for registers read/write */
138 #define nand_writel(info, off, val) \
139 writel_relaxed((val), (info)->mmio_base + (off))
141 #define nand_readl(info, off) \
142 readl_relaxed((info)->mmio_base + (off))
144 /* error code and state */
167 enum pxa3xx_nand_variant {
168 PXA3XX_NAND_VARIANT_PXA,
169 PXA3XX_NAND_VARIANT_ARMADA370,
172 struct pxa3xx_nand_host {
173 struct nand_chip chip;
174 struct mtd_info *mtd;
177 /* page size of attached chip */
181 /* calculated from pxa3xx_nand_flash data */
182 unsigned int col_addr_cycles;
183 unsigned int row_addr_cycles;
186 struct pxa3xx_nand_info {
187 struct nand_hw_control controller;
188 struct platform_device *pdev;
191 void __iomem *mmio_base;
192 unsigned long mmio_phys;
193 struct completion cmd_complete, dev_ready;
195 unsigned int buf_start;
196 unsigned int buf_count;
197 unsigned int buf_size;
198 unsigned int data_buff_pos;
199 unsigned int oob_buff_pos;
201 /* DMA information */
202 struct scatterlist sg;
203 enum dma_data_direction dma_dir;
204 struct dma_chan *dma_chan;
205 dma_cookie_t dma_cookie;
209 unsigned char *data_buff;
210 unsigned char *oob_buff;
211 dma_addr_t data_buff_phys;
214 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
218 * This driver supports NFCv1 (as found in PXA SoC)
219 * and NFCv2 (as found in Armada 370/XP SoC).
221 enum pxa3xx_nand_variant variant;
224 int use_ecc; /* use HW ECC ? */
225 int ecc_bch; /* using BCH ECC? */
226 int use_dma; /* use DMA ? */
227 int use_spare; /* use spare ? */
230 unsigned int data_size; /* data to be read from FIFO */
231 unsigned int chunk_size; /* split commands chunk size */
232 unsigned int oob_size;
233 unsigned int spare_size;
234 unsigned int ecc_size;
235 unsigned int ecc_err_cnt;
236 unsigned int max_bitflips;
239 /* cached register value */
244 /* generated NDCBx register values */
251 static bool use_dma = 1;
252 module_param(use_dma, bool, 0444);
253 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
255 struct pxa3xx_nand_timing {
256 unsigned int tCH; /* Enable signal hold time */
257 unsigned int tCS; /* Enable signal setup time */
258 unsigned int tWH; /* ND_nWE high duration */
259 unsigned int tWP; /* ND_nWE pulse time */
260 unsigned int tRH; /* ND_nRE high duration */
261 unsigned int tRP; /* ND_nRE pulse width */
262 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
263 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
264 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
267 struct pxa3xx_nand_flash {
270 unsigned int page_per_block; /* Pages per block (PG_PER_BLK) */
271 unsigned int page_size; /* Page size in bytes (PAGE_SZ) */
272 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
273 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
274 unsigned int num_blocks; /* Number of physical blocks in Flash */
276 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
279 static struct pxa3xx_nand_timing timing[] = {
280 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
281 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
282 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
283 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
286 static struct pxa3xx_nand_flash builtin_flash_types[] = {
287 { "DEFAULT FLASH", 0, 0, 2048, 8, 8, 0, &timing[0] },
288 { "64MiB 16-bit", 0x46ec, 32, 512, 16, 16, 4096, &timing[1] },
289 { "256MiB 8-bit", 0xdaec, 64, 2048, 8, 8, 2048, &timing[1] },
290 { "4GiB 8-bit", 0xd7ec, 128, 4096, 8, 8, 8192, &timing[1] },
291 { "128MiB 8-bit", 0xa12c, 64, 2048, 8, 8, 1024, &timing[2] },
292 { "128MiB 16-bit", 0xb12c, 64, 2048, 16, 16, 1024, &timing[2] },
293 { "512MiB 8-bit", 0xdc2c, 64, 2048, 8, 8, 4096, &timing[2] },
294 { "512MiB 16-bit", 0xcc2c, 64, 2048, 16, 16, 4096, &timing[2] },
295 { "256MiB 16-bit", 0xba20, 64, 2048, 16, 16, 2048, &timing[3] },
298 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
299 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301 static struct nand_bbt_descr bbt_main_descr = {
302 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
303 | NAND_BBT_2BIT | NAND_BBT_VERSION,
307 .maxblocks = 8, /* Last 8 blocks in each chip */
308 .pattern = bbt_pattern
311 static struct nand_bbt_descr bbt_mirror_descr = {
312 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313 | NAND_BBT_2BIT | NAND_BBT_VERSION,
317 .maxblocks = 8, /* Last 8 blocks in each chip */
318 .pattern = bbt_mirror_pattern
321 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
324 32, 33, 34, 35, 36, 37, 38, 39,
325 40, 41, 42, 43, 44, 45, 46, 47,
326 48, 49, 50, 51, 52, 53, 54, 55,
327 56, 57, 58, 59, 60, 61, 62, 63},
328 .oobfree = { {2, 30} }
331 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
334 32, 33, 34, 35, 36, 37, 38, 39,
335 40, 41, 42, 43, 44, 45, 46, 47,
336 48, 49, 50, 51, 52, 53, 54, 55,
337 56, 57, 58, 59, 60, 61, 62, 63,
338 96, 97, 98, 99, 100, 101, 102, 103,
339 104, 105, 106, 107, 108, 109, 110, 111,
340 112, 113, 114, 115, 116, 117, 118, 119,
341 120, 121, 122, 123, 124, 125, 126, 127},
342 /* Bootrom looks in bytes 0 & 5 for bad blocks */
343 .oobfree = { {6, 26}, { 64, 32} }
346 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
349 32, 33, 34, 35, 36, 37, 38, 39,
350 40, 41, 42, 43, 44, 45, 46, 47,
351 48, 49, 50, 51, 52, 53, 54, 55,
352 56, 57, 58, 59, 60, 61, 62, 63},
356 /* Define a default flash type setting serve as flash detecting only */
357 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
359 #define NDTR0_tCH(c) (min((c), 7) << 19)
360 #define NDTR0_tCS(c) (min((c), 7) << 16)
361 #define NDTR0_tWH(c) (min((c), 7) << 11)
362 #define NDTR0_tWP(c) (min((c), 7) << 8)
363 #define NDTR0_tRH(c) (min((c), 7) << 3)
364 #define NDTR0_tRP(c) (min((c), 7) << 0)
366 #define NDTR1_tR(c) (min((c), 65535) << 16)
367 #define NDTR1_tWHR(c) (min((c), 15) << 4)
368 #define NDTR1_tAR(c) (min((c), 15) << 0)
370 /* convert nano-seconds to nand flash controller clock cycles */
371 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
373 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
375 .compatible = "marvell,pxa3xx-nand",
376 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
379 .compatible = "marvell,armada370-nand",
380 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
384 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
386 static enum pxa3xx_nand_variant
387 pxa3xx_nand_get_variant(struct platform_device *pdev)
389 const struct of_device_id *of_id =
390 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
392 return PXA3XX_NAND_VARIANT_PXA;
393 return (enum pxa3xx_nand_variant)of_id->data;
396 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
397 const struct pxa3xx_nand_timing *t)
399 struct pxa3xx_nand_info *info = host->info_data;
400 unsigned long nand_clk = clk_get_rate(info->clk);
401 uint32_t ndtr0, ndtr1;
403 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
404 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
405 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
406 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
407 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
408 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
410 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
411 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
412 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
414 info->ndtr0cs0 = ndtr0;
415 info->ndtr1cs0 = ndtr1;
416 nand_writel(info, NDTR0CS0, ndtr0);
417 nand_writel(info, NDTR1CS0, ndtr1);
421 * Set the data and OOB size, depending on the selected
422 * spare and ECC configuration.
423 * Only applicable to READ0, READOOB and PAGEPROG commands.
425 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
426 struct mtd_info *mtd)
428 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
430 info->data_size = mtd->writesize;
434 info->oob_size = info->spare_size;
436 info->oob_size += info->ecc_size;
440 * NOTE: it is a must to set ND_RUN firstly, then write
441 * command buffer, otherwise, it does not work.
442 * We enable all the interrupt at the same time, and
443 * let pxa3xx_nand_irq to handle all logic.
445 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
449 ndcr = info->reg_ndcr;
454 nand_writel(info, NDECCCTRL, 0x1);
456 ndcr &= ~NDCR_ECC_EN;
458 nand_writel(info, NDECCCTRL, 0x0);
464 ndcr &= ~NDCR_DMA_EN;
467 ndcr |= NDCR_SPARE_EN;
469 ndcr &= ~NDCR_SPARE_EN;
473 /* clear status bits and run */
474 nand_writel(info, NDSR, NDSR_MASK);
475 nand_writel(info, NDCR, 0);
476 nand_writel(info, NDCR, ndcr);
479 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
482 int timeout = NAND_STOP_DELAY;
484 /* wait RUN bit in NDCR become 0 */
485 ndcr = nand_readl(info, NDCR);
486 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
487 ndcr = nand_readl(info, NDCR);
492 ndcr &= ~NDCR_ND_RUN;
493 nand_writel(info, NDCR, ndcr);
496 dmaengine_terminate_all(info->dma_chan);
498 /* clear status bits */
499 nand_writel(info, NDSR, NDSR_MASK);
502 static void __maybe_unused
503 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
507 ndcr = nand_readl(info, NDCR);
508 nand_writel(info, NDCR, ndcr & ~int_mask);
511 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
515 ndcr = nand_readl(info, NDCR);
516 nand_writel(info, NDCR, ndcr | int_mask);
519 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
526 * According to the datasheet, when reading from NDDB
527 * with BCH enabled, after each 32 bytes reads, we
528 * have to make sure that the NDSR.RDDREQ bit is set.
530 * Drain the FIFO 8 32 bits reads at a time, and skip
531 * the polling on the last read.
534 readsl(info->mmio_base + NDDB, data, 8);
536 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
537 val & NDSR_RDDREQ, 1000, 5000);
539 dev_err(&info->pdev->dev,
540 "Timeout on RDDREQ while draining the FIFO\n");
549 readsl(info->mmio_base + NDDB, data, len);
552 static void handle_data_pio(struct pxa3xx_nand_info *info)
554 unsigned int do_bytes = min(info->data_size, info->chunk_size);
556 switch (info->state) {
557 case STATE_PIO_WRITING:
558 writesl(info->mmio_base + NDDB,
559 info->data_buff + info->data_buff_pos,
560 DIV_ROUND_UP(do_bytes, 4));
562 if (info->oob_size > 0)
563 writesl(info->mmio_base + NDDB,
564 info->oob_buff + info->oob_buff_pos,
565 DIV_ROUND_UP(info->oob_size, 4));
567 case STATE_PIO_READING:
569 info->data_buff + info->data_buff_pos,
570 DIV_ROUND_UP(do_bytes, 4));
572 if (info->oob_size > 0)
574 info->oob_buff + info->oob_buff_pos,
575 DIV_ROUND_UP(info->oob_size, 4));
578 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
583 /* Update buffer pointers for multi-page read/write */
584 info->data_buff_pos += do_bytes;
585 info->oob_buff_pos += info->oob_size;
586 info->data_size -= do_bytes;
589 static void pxa3xx_nand_data_dma_irq(void *data)
591 struct pxa3xx_nand_info *info = data;
592 struct dma_tx_state state;
593 enum dma_status status;
595 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
596 if (likely(status == DMA_COMPLETE)) {
597 info->state = STATE_DMA_DONE;
599 dev_err(&info->pdev->dev, "DMA error on data channel\n");
600 info->retcode = ERR_DMABUSERR;
602 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
604 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
605 enable_int(info, NDCR_INT_MASK);
608 static void start_data_dma(struct pxa3xx_nand_info *info)
610 enum dma_transfer_direction direction;
611 struct dma_async_tx_descriptor *tx;
613 switch (info->state) {
614 case STATE_DMA_WRITING:
615 info->dma_dir = DMA_TO_DEVICE;
616 direction = DMA_MEM_TO_DEV;
618 case STATE_DMA_READING:
619 info->dma_dir = DMA_FROM_DEVICE;
620 direction = DMA_DEV_TO_MEM;
623 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
627 info->sg.length = info->data_size +
628 (info->oob_size ? info->spare_size + info->ecc_size : 0);
629 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
631 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
634 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
637 tx->callback = pxa3xx_nand_data_dma_irq;
638 tx->callback_param = info;
639 info->dma_cookie = dmaengine_submit(tx);
640 dma_async_issue_pending(info->dma_chan);
641 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
642 __func__, direction, info->dma_cookie, info->sg.length);
645 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
647 struct pxa3xx_nand_info *info = data;
649 handle_data_pio(info);
651 info->state = STATE_CMD_DONE;
652 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
657 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
659 struct pxa3xx_nand_info *info = devid;
660 unsigned int status, is_completed = 0, is_ready = 0;
661 unsigned int ready, cmd_done;
662 irqreturn_t ret = IRQ_HANDLED;
665 ready = NDSR_FLASH_RDY;
666 cmd_done = NDSR_CS0_CMDD;
669 cmd_done = NDSR_CS1_CMDD;
672 status = nand_readl(info, NDSR);
674 if (status & NDSR_UNCORERR)
675 info->retcode = ERR_UNCORERR;
676 if (status & NDSR_CORERR) {
677 info->retcode = ERR_CORERR;
678 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
680 info->ecc_err_cnt = NDSR_ERR_CNT(status);
682 info->ecc_err_cnt = 1;
685 * Each chunk composing a page is corrected independently,
686 * and we need to store maximum number of corrected bitflips
687 * to return it to the MTD layer in ecc.read_page().
689 info->max_bitflips = max_t(unsigned int,
693 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
694 /* whether use dma to transfer data */
696 disable_int(info, NDCR_INT_MASK);
697 info->state = (status & NDSR_RDDREQ) ?
698 STATE_DMA_READING : STATE_DMA_WRITING;
699 start_data_dma(info);
700 goto NORMAL_IRQ_EXIT;
702 info->state = (status & NDSR_RDDREQ) ?
703 STATE_PIO_READING : STATE_PIO_WRITING;
704 ret = IRQ_WAKE_THREAD;
705 goto NORMAL_IRQ_EXIT;
708 if (status & cmd_done) {
709 info->state = STATE_CMD_DONE;
712 if (status & ready) {
713 info->state = STATE_READY;
718 * Clear all status bit before issuing the next command, which
719 * can and will alter the status bits and will deserve a new
720 * interrupt on its own. This lets the controller exit the IRQ
722 nand_writel(info, NDSR, status);
724 if (status & NDSR_WRCMDREQ) {
725 status &= ~NDSR_WRCMDREQ;
726 info->state = STATE_CMD_HANDLE;
729 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
730 * must be loaded by writing directly either 12 or 16
731 * bytes directly to NDCB0, four bytes at a time.
733 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
734 * but each NDCBx register can be read.
736 nand_writel(info, NDCB0, info->ndcb0);
737 nand_writel(info, NDCB0, info->ndcb1);
738 nand_writel(info, NDCB0, info->ndcb2);
740 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
741 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
742 nand_writel(info, NDCB0, info->ndcb3);
746 complete(&info->cmd_complete);
748 complete(&info->dev_ready);
753 static inline int is_buf_blank(uint8_t *buf, size_t len)
755 for (; len > 0; len--)
761 static void set_command_address(struct pxa3xx_nand_info *info,
762 unsigned int page_size, uint16_t column, int page_addr)
764 /* small page addr setting */
765 if (page_size < PAGE_CHUNK_SIZE) {
766 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
771 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
774 if (page_addr & 0xFF0000)
775 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
781 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
783 struct pxa3xx_nand_host *host = info->host[info->cs];
784 struct mtd_info *mtd = host->mtd;
786 /* reset data and oob column point to handle data */
790 info->data_buff_pos = 0;
791 info->oob_buff_pos = 0;
794 info->retcode = ERR_NONE;
795 info->ecc_err_cnt = 0;
801 case NAND_CMD_PAGEPROG:
803 case NAND_CMD_READOOB:
804 pxa3xx_set_datasize(info, mtd);
816 * If we are about to issue a read command, or about to set
817 * the write address, then clean the data buffer.
819 if (command == NAND_CMD_READ0 ||
820 command == NAND_CMD_READOOB ||
821 command == NAND_CMD_SEQIN) {
823 info->buf_count = mtd->writesize + mtd->oobsize;
824 memset(info->data_buff, 0xFF, info->buf_count);
829 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
830 int ext_cmd_type, uint16_t column, int page_addr)
832 int addr_cycle, exec_cmd;
833 struct pxa3xx_nand_host *host;
834 struct mtd_info *mtd;
836 host = info->host[info->cs];
842 info->ndcb0 = NDCB0_CSEL;
846 if (command == NAND_CMD_SEQIN)
849 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
850 + host->col_addr_cycles);
853 case NAND_CMD_READOOB:
855 info->buf_start = column;
856 info->ndcb0 |= NDCB0_CMD_TYPE(0)
860 if (command == NAND_CMD_READOOB)
861 info->buf_start += mtd->writesize;
864 * Multiple page read needs an 'extended command type' field,
865 * which is either naked-read or last-read according to the
868 if (mtd->writesize == PAGE_CHUNK_SIZE) {
869 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
870 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
871 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
873 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
874 info->ndcb3 = info->chunk_size +
878 set_command_address(info, mtd->writesize, column, page_addr);
883 info->buf_start = column;
884 set_command_address(info, mtd->writesize, 0, page_addr);
887 * Multiple page programming needs to execute the initial
888 * SEQIN command that sets the page address.
890 if (mtd->writesize > PAGE_CHUNK_SIZE) {
891 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
892 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
895 /* No data transfer in this case */
901 case NAND_CMD_PAGEPROG:
902 if (is_buf_blank(info->data_buff,
903 (mtd->writesize + mtd->oobsize))) {
908 /* Second command setting for large pages */
909 if (mtd->writesize > PAGE_CHUNK_SIZE) {
911 * Multiple page write uses the 'extended command'
912 * field. This can be used to issue a command dispatch
913 * or a naked-write depending on the current stage.
915 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
917 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
918 info->ndcb3 = info->chunk_size +
922 * This is the command dispatch that completes a chunked
923 * page program operation.
925 if (info->data_size == 0) {
926 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
927 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
934 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
938 | (NAND_CMD_PAGEPROG << 8)
945 info->buf_count = INIT_BUFFER_SIZE;
946 info->ndcb0 |= NDCB0_CMD_TYPE(0)
950 info->ndcb1 = (column & 0xFF);
951 info->ndcb3 = INIT_BUFFER_SIZE;
952 info->data_size = INIT_BUFFER_SIZE;
955 case NAND_CMD_READID:
956 info->buf_count = READ_ID_BYTES;
957 info->ndcb0 |= NDCB0_CMD_TYPE(3)
960 info->ndcb1 = (column & 0xFF);
964 case NAND_CMD_STATUS:
966 info->ndcb0 |= NDCB0_CMD_TYPE(4)
973 case NAND_CMD_ERASE1:
974 info->ndcb0 |= NDCB0_CMD_TYPE(2)
978 | (NAND_CMD_ERASE2 << 8)
980 info->ndcb1 = page_addr;
985 info->ndcb0 |= NDCB0_CMD_TYPE(5)
990 case NAND_CMD_ERASE2:
996 dev_err(&info->pdev->dev, "non-supported command %x\n",
1004 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1005 int column, int page_addr)
1007 struct pxa3xx_nand_host *host = mtd->priv;
1008 struct pxa3xx_nand_info *info = host->info_data;
1012 * if this is a x16 device ,then convert the input
1013 * "byte" address into a "word" address appropriate
1014 * for indexing a word-oriented device
1016 if (info->reg_ndcr & NDCR_DWIDTH_M)
1020 * There may be different NAND chip hooked to
1021 * different chip select, so check whether
1022 * chip select has been changed, if yes, reset the timing
1024 if (info->cs != host->cs) {
1025 info->cs = host->cs;
1026 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1027 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1030 prepare_start_command(info, command);
1032 info->state = STATE_PREPARED;
1033 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1036 init_completion(&info->cmd_complete);
1037 init_completion(&info->dev_ready);
1038 info->need_wait = 1;
1039 pxa3xx_nand_start(info);
1041 if (!wait_for_completion_timeout(&info->cmd_complete,
1042 CHIP_DELAY_TIMEOUT)) {
1043 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1044 /* Stop State Machine for next command cycle */
1045 pxa3xx_nand_stop(info);
1048 info->state = STATE_IDLE;
1051 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1052 const unsigned command,
1053 int column, int page_addr)
1055 struct pxa3xx_nand_host *host = mtd->priv;
1056 struct pxa3xx_nand_info *info = host->info_data;
1057 int exec_cmd, ext_cmd_type;
1060 * if this is a x16 device then convert the input
1061 * "byte" address into a "word" address appropriate
1062 * for indexing a word-oriented device
1064 if (info->reg_ndcr & NDCR_DWIDTH_M)
1068 * There may be different NAND chip hooked to
1069 * different chip select, so check whether
1070 * chip select has been changed, if yes, reset the timing
1072 if (info->cs != host->cs) {
1073 info->cs = host->cs;
1074 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1075 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1078 /* Select the extended command for the first command */
1080 case NAND_CMD_READ0:
1081 case NAND_CMD_READOOB:
1082 ext_cmd_type = EXT_CMD_TYPE_MONO;
1084 case NAND_CMD_SEQIN:
1085 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1087 case NAND_CMD_PAGEPROG:
1088 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1095 prepare_start_command(info, command);
1098 * Prepare the "is ready" completion before starting a command
1099 * transaction sequence. If the command is not executed the
1100 * completion will be completed, see below.
1102 * We can do that inside the loop because the command variable
1103 * is invariant and thus so is the exec_cmd.
1105 info->need_wait = 1;
1106 init_completion(&info->dev_ready);
1108 info->state = STATE_PREPARED;
1109 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1112 info->need_wait = 0;
1113 complete(&info->dev_ready);
1117 init_completion(&info->cmd_complete);
1118 pxa3xx_nand_start(info);
1120 if (!wait_for_completion_timeout(&info->cmd_complete,
1121 CHIP_DELAY_TIMEOUT)) {
1122 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1123 /* Stop State Machine for next command cycle */
1124 pxa3xx_nand_stop(info);
1128 /* Check if the sequence is complete */
1129 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1133 * After a splitted program command sequence has issued
1134 * the command dispatch, the command sequence is complete.
1136 if (info->data_size == 0 &&
1137 command == NAND_CMD_PAGEPROG &&
1138 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1141 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1142 /* Last read: issue a 'last naked read' */
1143 if (info->data_size == info->chunk_size)
1144 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1146 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1149 * If a splitted program command has no more data to transfer,
1150 * the command dispatch must be issued to complete.
1152 } else if (command == NAND_CMD_PAGEPROG &&
1153 info->data_size == 0) {
1154 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1158 info->state = STATE_IDLE;
1161 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1162 struct nand_chip *chip, const uint8_t *buf, int oob_required)
1164 chip->write_buf(mtd, buf, mtd->writesize);
1165 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1170 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1171 struct nand_chip *chip, uint8_t *buf, int oob_required,
1174 struct pxa3xx_nand_host *host = mtd->priv;
1175 struct pxa3xx_nand_info *info = host->info_data;
1177 chip->read_buf(mtd, buf, mtd->writesize);
1178 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1180 if (info->retcode == ERR_CORERR && info->use_ecc) {
1181 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1183 } else if (info->retcode == ERR_UNCORERR) {
1185 * for blank page (all 0xff), HW will calculate its ECC as
1186 * 0, which is different from the ECC information within
1187 * OOB, ignore such uncorrectable errors
1189 if (is_buf_blank(buf, mtd->writesize))
1190 info->retcode = ERR_NONE;
1192 mtd->ecc_stats.failed++;
1195 return info->max_bitflips;
1198 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1200 struct pxa3xx_nand_host *host = mtd->priv;
1201 struct pxa3xx_nand_info *info = host->info_data;
1204 if (info->buf_start < info->buf_count)
1205 /* Has just send a new command? */
1206 retval = info->data_buff[info->buf_start++];
1211 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1213 struct pxa3xx_nand_host *host = mtd->priv;
1214 struct pxa3xx_nand_info *info = host->info_data;
1215 u16 retval = 0xFFFF;
1217 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1218 retval = *((u16 *)(info->data_buff+info->buf_start));
1219 info->buf_start += 2;
1224 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1226 struct pxa3xx_nand_host *host = mtd->priv;
1227 struct pxa3xx_nand_info *info = host->info_data;
1228 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1230 memcpy(buf, info->data_buff + info->buf_start, real_len);
1231 info->buf_start += real_len;
1234 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1235 const uint8_t *buf, int len)
1237 struct pxa3xx_nand_host *host = mtd->priv;
1238 struct pxa3xx_nand_info *info = host->info_data;
1239 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1241 memcpy(info->data_buff + info->buf_start, buf, real_len);
1242 info->buf_start += real_len;
1245 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1250 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1252 struct pxa3xx_nand_host *host = mtd->priv;
1253 struct pxa3xx_nand_info *info = host->info_data;
1255 if (info->need_wait) {
1256 info->need_wait = 0;
1257 if (!wait_for_completion_timeout(&info->dev_ready,
1258 CHIP_DELAY_TIMEOUT)) {
1259 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1260 return NAND_STATUS_FAIL;
1264 /* pxa3xx_nand_send_command has waited for command complete */
1265 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1266 if (info->retcode == ERR_NONE)
1269 return NAND_STATUS_FAIL;
1272 return NAND_STATUS_READY;
1275 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
1276 const struct pxa3xx_nand_flash *f)
1278 struct platform_device *pdev = info->pdev;
1279 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1280 struct pxa3xx_nand_host *host = info->host[info->cs];
1281 uint32_t ndcr = 0x0; /* enable all interrupts */
1283 if (f->page_size != 2048 && f->page_size != 512) {
1284 dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
1288 if (f->flash_width != 16 && f->flash_width != 8) {
1289 dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
1293 /* calculate addressing information */
1294 host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
1296 if (f->num_blocks * f->page_per_block > 65536)
1297 host->row_addr_cycles = 3;
1299 host->row_addr_cycles = 2;
1301 ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1302 ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1303 ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
1304 ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
1305 ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
1306 ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
1308 ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1309 ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1311 info->reg_ndcr = ndcr;
1313 pxa3xx_nand_set_timing(host, f->timing);
1317 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1319 uint32_t ndcr = nand_readl(info, NDCR);
1321 /* Set an initial chunk size */
1322 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1323 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1324 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1325 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1329 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1331 struct platform_device *pdev = info->pdev;
1332 struct dma_slave_config config;
1333 dma_cap_mask_t mask;
1334 struct pxad_param param;
1337 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1338 if (info->data_buff == NULL)
1343 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1347 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1349 dma_cap_set(DMA_SLAVE, mask);
1350 param.prio = PXAD_PRIO_LOWEST;
1351 param.drcmr = info->drcmr_dat;
1352 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1355 if (!info->dma_chan) {
1356 dev_err(&pdev->dev, "unable to request data dma channel\n");
1360 memset(&config, 0, sizeof(config));
1361 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1362 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1363 config.src_addr = info->mmio_phys + NDDB;
1364 config.dst_addr = info->mmio_phys + NDDB;
1365 config.src_maxburst = 32;
1366 config.dst_maxburst = 32;
1367 ret = dmaengine_slave_config(info->dma_chan, &config);
1369 dev_err(&info->pdev->dev,
1370 "dma channel configuration failed: %d\n",
1376 * Now that DMA buffers are allocated we turn on
1377 * DMA proper for I/O operations.
1383 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1385 if (info->use_dma) {
1386 dmaengine_terminate_all(info->dma_chan);
1387 dma_release_channel(info->dma_chan);
1389 kfree(info->data_buff);
1392 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
1394 struct mtd_info *mtd;
1395 struct nand_chip *chip;
1398 mtd = info->host[info->cs]->mtd;
1401 /* use the common timing to make a try */
1402 ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
1406 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1407 ret = chip->waitfunc(mtd, chip);
1408 if (ret & NAND_STATUS_FAIL)
1414 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1415 struct nand_ecc_ctrl *ecc,
1416 int strength, int ecc_stepsize, int page_size)
1418 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1419 info->chunk_size = 2048;
1420 info->spare_size = 40;
1421 info->ecc_size = 24;
1422 ecc->mode = NAND_ECC_HW;
1426 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1427 info->chunk_size = 512;
1428 info->spare_size = 8;
1430 ecc->mode = NAND_ECC_HW;
1435 * Required ECC: 4-bit correction per 512 bytes
1436 * Select: 16-bit correction per 2048 bytes
1438 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1440 info->chunk_size = 2048;
1441 info->spare_size = 32;
1442 info->ecc_size = 32;
1443 ecc->mode = NAND_ECC_HW;
1444 ecc->size = info->chunk_size;
1445 ecc->layout = &ecc_layout_2KB_bch4bit;
1448 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1450 info->chunk_size = 2048;
1451 info->spare_size = 32;
1452 info->ecc_size = 32;
1453 ecc->mode = NAND_ECC_HW;
1454 ecc->size = info->chunk_size;
1455 ecc->layout = &ecc_layout_4KB_bch4bit;
1459 * Required ECC: 8-bit correction per 512 bytes
1460 * Select: 16-bit correction per 1024 bytes
1462 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1464 info->chunk_size = 1024;
1465 info->spare_size = 0;
1466 info->ecc_size = 32;
1467 ecc->mode = NAND_ECC_HW;
1468 ecc->size = info->chunk_size;
1469 ecc->layout = &ecc_layout_4KB_bch8bit;
1472 dev_err(&info->pdev->dev,
1473 "ECC strength %d at page size %d is not supported\n",
1474 strength, page_size);
1478 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1479 ecc->strength, ecc->size);
1483 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1485 struct pxa3xx_nand_host *host = mtd->priv;
1486 struct pxa3xx_nand_info *info = host->info_data;
1487 struct platform_device *pdev = info->pdev;
1488 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1489 struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
1490 const struct pxa3xx_nand_flash *f = NULL;
1491 struct nand_chip *chip = mtd->priv;
1495 uint16_t ecc_strength, ecc_step;
1497 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1500 /* Set a default chunk size */
1501 info->chunk_size = 512;
1503 ret = pxa3xx_nand_sensing(info);
1505 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1511 chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
1512 id = *((uint16_t *)(info->data_buff));
1514 dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
1516 dev_warn(&info->pdev->dev,
1517 "Read out ID 0, potential timing set wrong!!\n");
1522 num = ARRAY_SIZE(builtin_flash_types) - 1;
1523 for (i = 0; i < num; i++) {
1524 f = &builtin_flash_types[i + 1];
1526 /* find the chip in default list */
1527 if (f->chip_id == id)
1531 if (i >= (ARRAY_SIZE(builtin_flash_types) - 1)) {
1532 dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
1537 ret = pxa3xx_nand_config_flash(info, f);
1539 dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
1543 memset(pxa3xx_flash_ids, 0, sizeof(pxa3xx_flash_ids));
1545 pxa3xx_flash_ids[0].name = f->name;
1546 pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff;
1547 pxa3xx_flash_ids[0].pagesize = f->page_size;
1548 chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size;
1549 pxa3xx_flash_ids[0].chipsize = chipsize >> 20;
1550 pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
1551 if (f->flash_width == 16)
1552 pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
1553 pxa3xx_flash_ids[1].name = NULL;
1554 def = pxa3xx_flash_ids;
1556 if (info->reg_ndcr & NDCR_DWIDTH_M)
1557 chip->options |= NAND_BUSWIDTH_16;
1559 /* Device detection must be done with ECC disabled */
1560 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1561 nand_writel(info, NDECCCTRL, 0x0);
1563 if (nand_scan_ident(mtd, 1, def))
1566 if (pdata->flash_bbt) {
1568 * We'll use a bad block table stored in-flash and don't
1569 * allow writing the bad block marker to the flash.
1571 chip->bbt_options |= NAND_BBT_USE_FLASH |
1572 NAND_BBT_NO_OOB_BBM;
1573 chip->bbt_td = &bbt_main_descr;
1574 chip->bbt_md = &bbt_mirror_descr;
1578 * If the page size is bigger than the FIFO size, let's check
1579 * we are given the right variant and then switch to the extended
1580 * (aka splitted) command handling,
1582 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1583 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1584 chip->cmdfunc = nand_cmdfunc_extended;
1586 dev_err(&info->pdev->dev,
1587 "unsupported page size on this variant\n");
1592 if (pdata->ecc_strength && pdata->ecc_step_size) {
1593 ecc_strength = pdata->ecc_strength;
1594 ecc_step = pdata->ecc_step_size;
1596 ecc_strength = chip->ecc_strength_ds;
1597 ecc_step = chip->ecc_step_ds;
1600 /* Set default ECC strength requirements on non-ONFI devices */
1601 if (ecc_strength < 1 && ecc_step < 1) {
1606 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1607 ecc_step, mtd->writesize);
1611 /* calculate addressing information */
1612 if (mtd->writesize >= 2048)
1613 host->col_addr_cycles = 2;
1615 host->col_addr_cycles = 1;
1617 /* release the initial buffer */
1618 kfree(info->data_buff);
1620 /* allocate the real data + oob buffer */
1621 info->buf_size = mtd->writesize + mtd->oobsize;
1622 ret = pxa3xx_nand_init_buff(info);
1625 info->oob_buff = info->data_buff + mtd->writesize;
1627 if ((mtd->size >> chip->page_shift) > 65536)
1628 host->row_addr_cycles = 3;
1630 host->row_addr_cycles = 2;
1631 return nand_scan_tail(mtd);
1634 static int alloc_nand_resource(struct platform_device *pdev)
1636 struct pxa3xx_nand_platform_data *pdata;
1637 struct pxa3xx_nand_info *info;
1638 struct pxa3xx_nand_host *host;
1639 struct nand_chip *chip = NULL;
1640 struct mtd_info *mtd;
1644 pdata = dev_get_platdata(&pdev->dev);
1645 if (pdata->num_cs <= 0)
1647 info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
1648 sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
1653 info->variant = pxa3xx_nand_get_variant(pdev);
1654 for (cs = 0; cs < pdata->num_cs; cs++) {
1655 mtd = (void *)&info[1] + (sizeof(*mtd) + sizeof(*host)) * cs;
1656 chip = (struct nand_chip *)(&mtd[1]);
1657 host = (struct pxa3xx_nand_host *)chip;
1658 info->host[cs] = host;
1661 host->info_data = info;
1663 mtd->owner = THIS_MODULE;
1665 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1666 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1667 chip->controller = &info->controller;
1668 chip->waitfunc = pxa3xx_nand_waitfunc;
1669 chip->select_chip = pxa3xx_nand_select_chip;
1670 chip->read_word = pxa3xx_nand_read_word;
1671 chip->read_byte = pxa3xx_nand_read_byte;
1672 chip->read_buf = pxa3xx_nand_read_buf;
1673 chip->write_buf = pxa3xx_nand_write_buf;
1674 chip->options |= NAND_NO_SUBPAGE_WRITE;
1675 chip->cmdfunc = nand_cmdfunc;
1678 spin_lock_init(&chip->controller->lock);
1679 init_waitqueue_head(&chip->controller->wq);
1680 info->clk = devm_clk_get(&pdev->dev, NULL);
1681 if (IS_ERR(info->clk)) {
1682 dev_err(&pdev->dev, "failed to get nand clock\n");
1683 return PTR_ERR(info->clk);
1685 ret = clk_prepare_enable(info->clk);
1690 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1693 "no resource defined for data DMA\n");
1695 goto fail_disable_clk;
1697 info->drcmr_dat = r->start;
1699 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1702 "no resource defined for cmd DMA\n");
1704 goto fail_disable_clk;
1706 info->drcmr_cmd = r->start;
1709 irq = platform_get_irq(pdev, 0);
1711 dev_err(&pdev->dev, "no IRQ resource defined\n");
1713 goto fail_disable_clk;
1716 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1717 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1718 if (IS_ERR(info->mmio_base)) {
1719 ret = PTR_ERR(info->mmio_base);
1720 goto fail_disable_clk;
1722 info->mmio_phys = r->start;
1724 /* Allocate a buffer to allow flash detection */
1725 info->buf_size = INIT_BUFFER_SIZE;
1726 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1727 if (info->data_buff == NULL) {
1729 goto fail_disable_clk;
1732 /* initialize all interrupts to be disabled */
1733 disable_int(info, NDSR_MASK);
1735 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1736 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1739 dev_err(&pdev->dev, "failed to request IRQ\n");
1743 platform_set_drvdata(pdev, info);
1748 free_irq(irq, info);
1749 kfree(info->data_buff);
1751 clk_disable_unprepare(info->clk);
1755 static int pxa3xx_nand_remove(struct platform_device *pdev)
1757 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1758 struct pxa3xx_nand_platform_data *pdata;
1764 pdata = dev_get_platdata(&pdev->dev);
1766 irq = platform_get_irq(pdev, 0);
1768 free_irq(irq, info);
1769 pxa3xx_nand_free_buff(info);
1771 clk_disable_unprepare(info->clk);
1773 for (cs = 0; cs < pdata->num_cs; cs++)
1774 nand_release(info->host[cs]->mtd);
1778 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1780 struct pxa3xx_nand_platform_data *pdata;
1781 struct device_node *np = pdev->dev.of_node;
1782 const struct of_device_id *of_id =
1783 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1788 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1792 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1793 pdata->enable_arbiter = 1;
1794 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1795 pdata->keep_config = 1;
1796 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1797 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1799 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1800 if (pdata->ecc_strength < 0)
1801 pdata->ecc_strength = 0;
1803 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1804 if (pdata->ecc_step_size < 0)
1805 pdata->ecc_step_size = 0;
1807 pdev->dev.platform_data = pdata;
1812 static int pxa3xx_nand_probe(struct platform_device *pdev)
1814 struct pxa3xx_nand_platform_data *pdata;
1815 struct mtd_part_parser_data ppdata = {};
1816 struct pxa3xx_nand_info *info;
1817 int ret, cs, probe_success, dma_available;
1819 dma_available = IS_ENABLED(CONFIG_ARM) &&
1820 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1821 if (use_dma && !dma_available) {
1823 dev_warn(&pdev->dev,
1824 "This platform can't do DMA on this device\n");
1827 ret = pxa3xx_nand_probe_dt(pdev);
1831 pdata = dev_get_platdata(&pdev->dev);
1833 dev_err(&pdev->dev, "no platform data defined\n");
1837 ret = alloc_nand_resource(pdev);
1839 dev_err(&pdev->dev, "alloc nand resource failed\n");
1843 info = platform_get_drvdata(pdev);
1845 for (cs = 0; cs < pdata->num_cs; cs++) {
1846 struct mtd_info *mtd = info->host[cs]->mtd;
1849 * The mtd name matches the one used in 'mtdparts' kernel
1850 * parameter. This name cannot be changed or otherwise
1851 * user's mtd partitions configuration would get broken.
1853 mtd->name = "pxa3xx_nand-0";
1855 ret = pxa3xx_nand_scan(mtd);
1857 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1862 ppdata.of_node = pdev->dev.of_node;
1863 ret = mtd_device_parse_register(mtd, NULL,
1864 &ppdata, pdata->parts[cs],
1865 pdata->nr_parts[cs]);
1870 if (!probe_success) {
1871 pxa3xx_nand_remove(pdev);
1879 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
1881 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1882 struct pxa3xx_nand_platform_data *pdata;
1883 struct mtd_info *mtd;
1886 pdata = dev_get_platdata(&pdev->dev);
1888 dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
1892 for (cs = 0; cs < pdata->num_cs; cs++) {
1893 mtd = info->host[cs]->mtd;
1900 static int pxa3xx_nand_resume(struct platform_device *pdev)
1902 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1903 struct pxa3xx_nand_platform_data *pdata;
1904 struct mtd_info *mtd;
1907 pdata = dev_get_platdata(&pdev->dev);
1908 /* We don't want to handle interrupt without calling mtd routine */
1909 disable_int(info, NDCR_INT_MASK);
1912 * Directly set the chip select to a invalid value,
1913 * then the driver would reset the timing according
1914 * to current chip select at the beginning of cmdfunc
1919 * As the spec says, the NDSR would be updated to 0x1800 when
1920 * doing the nand_clk disable/enable.
1921 * To prevent it damaging state machine of the driver, clear
1922 * all status before resume
1924 nand_writel(info, NDSR, NDSR_MASK);
1925 for (cs = 0; cs < pdata->num_cs; cs++) {
1926 mtd = info->host[cs]->mtd;
1933 #define pxa3xx_nand_suspend NULL
1934 #define pxa3xx_nand_resume NULL
1937 static struct platform_driver pxa3xx_nand_driver = {
1939 .name = "pxa3xx-nand",
1940 .of_match_table = pxa3xx_nand_dt_ids,
1942 .probe = pxa3xx_nand_probe,
1943 .remove = pxa3xx_nand_remove,
1944 .suspend = pxa3xx_nand_suspend,
1945 .resume = pxa3xx_nand_resume,
1948 module_platform_driver(pxa3xx_nand_driver);
1950 MODULE_LICENSE("GPL");
1951 MODULE_DESCRIPTION("PXA3xx NAND controller driver");