2 * drivers/mtd/nand/pxa3xx_nand.c
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/dmaengine.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/dma/pxa-dma.h>
21 #include <linux/delay.h>
22 #include <linux/clk.h>
23 #include <linux/mtd/mtd.h>
24 #include <linux/mtd/nand.h>
25 #include <linux/mtd/partitions.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/slab.h>
31 #include <linux/of_device.h>
32 #include <linux/of_mtd.h>
33 #include <linux/platform_data/mtd-nand-pxa3xx.h>
35 #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
36 #define NAND_STOP_DELAY msecs_to_jiffies(40)
37 #define PAGE_CHUNK_SIZE (2048)
40 * Define a buffer size for the initial command that detects the flash device:
41 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
47 #define INIT_BUFFER_SIZE 2048
49 /* registers and bit definitions */
50 #define NDCR (0x00) /* Control register */
51 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53 #define NDSR (0x14) /* Status Register */
54 #define NDPCR (0x18) /* Page Count Register */
55 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
56 #define NDBDR1 (0x20) /* Bad Block Register 1 */
57 #define NDECCCTRL (0x28) /* ECC control */
58 #define NDDB (0x40) /* Data Buffer */
59 #define NDCB0 (0x48) /* Command Buffer0 */
60 #define NDCB1 (0x4C) /* Command Buffer1 */
61 #define NDCB2 (0x50) /* Command Buffer2 */
63 #define NDCR_SPARE_EN (0x1 << 31)
64 #define NDCR_ECC_EN (0x1 << 30)
65 #define NDCR_DMA_EN (0x1 << 29)
66 #define NDCR_ND_RUN (0x1 << 28)
67 #define NDCR_DWIDTH_C (0x1 << 27)
68 #define NDCR_DWIDTH_M (0x1 << 26)
69 #define NDCR_PAGE_SZ (0x1 << 24)
70 #define NDCR_NCSX (0x1 << 23)
71 #define NDCR_ND_MODE (0x3 << 21)
72 #define NDCR_NAND_MODE (0x0)
73 #define NDCR_CLR_PG_CNT (0x1 << 20)
74 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
75 #define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
76 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
77 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
79 #define NDCR_RA_START (0x1 << 15)
80 #define NDCR_PG_PER_BLK (0x1 << 14)
81 #define NDCR_ND_ARB_EN (0x1 << 12)
82 #define NDCR_INT_MASK (0xFFF)
84 #define NDSR_MASK (0xfff)
85 #define NDSR_ERR_CNT_OFF (16)
86 #define NDSR_ERR_CNT_MASK (0x1f)
87 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
88 #define NDSR_RDY (0x1 << 12)
89 #define NDSR_FLASH_RDY (0x1 << 11)
90 #define NDSR_CS0_PAGED (0x1 << 10)
91 #define NDSR_CS1_PAGED (0x1 << 9)
92 #define NDSR_CS0_CMDD (0x1 << 8)
93 #define NDSR_CS1_CMDD (0x1 << 7)
94 #define NDSR_CS0_BBD (0x1 << 6)
95 #define NDSR_CS1_BBD (0x1 << 5)
96 #define NDSR_UNCORERR (0x1 << 4)
97 #define NDSR_CORERR (0x1 << 3)
98 #define NDSR_WRDREQ (0x1 << 2)
99 #define NDSR_RDDREQ (0x1 << 1)
100 #define NDSR_WRCMDREQ (0x1)
102 #define NDCB0_LEN_OVRD (0x1 << 28)
103 #define NDCB0_ST_ROW_EN (0x1 << 26)
104 #define NDCB0_AUTO_RS (0x1 << 25)
105 #define NDCB0_CSEL (0x1 << 24)
106 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
107 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
108 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
109 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
110 #define NDCB0_NC (0x1 << 20)
111 #define NDCB0_DBC (0x1 << 19)
112 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
113 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
114 #define NDCB0_CMD2_MASK (0xff << 8)
115 #define NDCB0_CMD1_MASK (0xff)
116 #define NDCB0_ADDR_CYC_SHIFT (16)
118 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
119 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
120 #define EXT_CMD_TYPE_READ 4 /* Read */
121 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
122 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
123 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
124 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
127 * This should be large enough to read 'ONFI' and 'JEDEC'.
128 * Let's use 7 bytes, which is the maximum ID count supported
129 * by the controller (see NDCR_RD_ID_CNT_MASK).
131 #define READ_ID_BYTES 7
133 /* macros for registers read/write */
134 #define nand_writel(info, off, val) \
136 dev_vdbg(&info->pdev->dev, \
137 "%s():%d nand_writel(0x%x, 0x%04x)\n", \
138 __func__, __LINE__, (val), (off)); \
139 writel_relaxed((val), (info)->mmio_base + (off)); \
142 #define nand_readl(info, off) \
145 _v = readl_relaxed((info)->mmio_base + (off)); \
146 dev_vdbg(&info->pdev->dev, \
147 "%s():%d nand_readl(0x%04x) = 0x%x\n", \
148 __func__, __LINE__, (off), _v); \
152 /* error code and state */
175 enum pxa3xx_nand_variant {
176 PXA3XX_NAND_VARIANT_PXA,
177 PXA3XX_NAND_VARIANT_ARMADA370,
180 struct pxa3xx_nand_host {
181 struct nand_chip chip;
184 /* page size of attached chip */
188 /* calculated from pxa3xx_nand_flash data */
189 unsigned int col_addr_cycles;
190 unsigned int row_addr_cycles;
193 struct pxa3xx_nand_info {
194 struct nand_hw_control controller;
195 struct platform_device *pdev;
198 void __iomem *mmio_base;
199 unsigned long mmio_phys;
200 struct completion cmd_complete, dev_ready;
202 unsigned int buf_start;
203 unsigned int buf_count;
204 unsigned int buf_size;
205 unsigned int data_buff_pos;
206 unsigned int oob_buff_pos;
208 /* DMA information */
209 struct scatterlist sg;
210 enum dma_data_direction dma_dir;
211 struct dma_chan *dma_chan;
212 dma_cookie_t dma_cookie;
216 unsigned char *data_buff;
217 unsigned char *oob_buff;
218 dma_addr_t data_buff_phys;
221 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
225 * This driver supports NFCv1 (as found in PXA SoC)
226 * and NFCv2 (as found in Armada 370/XP SoC).
228 enum pxa3xx_nand_variant variant;
231 int use_ecc; /* use HW ECC ? */
232 int ecc_bch; /* using BCH ECC? */
233 int use_dma; /* use DMA ? */
234 int use_spare; /* use spare ? */
237 unsigned int data_size; /* data to be read from FIFO */
238 unsigned int chunk_size; /* split commands chunk size */
239 unsigned int oob_size;
240 unsigned int spare_size;
241 unsigned int ecc_size;
242 unsigned int ecc_err_cnt;
243 unsigned int max_bitflips;
246 /* cached register value */
251 /* generated NDCBx register values */
258 static bool use_dma = 1;
259 module_param(use_dma, bool, 0444);
260 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
262 struct pxa3xx_nand_timing {
263 unsigned int tCH; /* Enable signal hold time */
264 unsigned int tCS; /* Enable signal setup time */
265 unsigned int tWH; /* ND_nWE high duration */
266 unsigned int tWP; /* ND_nWE pulse time */
267 unsigned int tRH; /* ND_nRE high duration */
268 unsigned int tRP; /* ND_nRE pulse width */
269 unsigned int tR; /* ND_nWE high to ND_nRE low for read */
270 unsigned int tWHR; /* ND_nWE high to ND_nRE low for status read */
271 unsigned int tAR; /* ND_ALE low to ND_nRE low delay */
274 struct pxa3xx_nand_flash {
276 unsigned int flash_width; /* Width of Flash memory (DWIDTH_M) */
277 unsigned int dfc_width; /* Width of flash controller(DWIDTH_C) */
278 struct pxa3xx_nand_timing *timing; /* NAND Flash timing */
281 static struct pxa3xx_nand_timing timing[] = {
282 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
283 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
284 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
285 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
288 static struct pxa3xx_nand_flash builtin_flash_types[] = {
289 { 0x46ec, 16, 16, &timing[1] },
290 { 0xdaec, 8, 8, &timing[1] },
291 { 0xd7ec, 8, 8, &timing[1] },
292 { 0xa12c, 8, 8, &timing[2] },
293 { 0xb12c, 16, 16, &timing[2] },
294 { 0xdc2c, 8, 8, &timing[2] },
295 { 0xcc2c, 16, 16, &timing[2] },
296 { 0xba20, 16, 16, &timing[3] },
299 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
302 static struct nand_bbt_descr bbt_main_descr = {
303 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304 | NAND_BBT_2BIT | NAND_BBT_VERSION,
308 .maxblocks = 8, /* Last 8 blocks in each chip */
309 .pattern = bbt_pattern
312 static struct nand_bbt_descr bbt_mirror_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_mirror_pattern
322 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree = { {2, 30} }
332 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 96, 97, 98, 99, 100, 101, 102, 103,
340 104, 105, 106, 107, 108, 109, 110, 111,
341 112, 113, 114, 115, 116, 117, 118, 119,
342 120, 121, 122, 123, 124, 125, 126, 127},
343 /* Bootrom looks in bytes 0 & 5 for bad blocks */
344 .oobfree = { {6, 26}, { 64, 32} }
347 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63},
357 #define NDTR0_tCH(c) (min((c), 7) << 19)
358 #define NDTR0_tCS(c) (min((c), 7) << 16)
359 #define NDTR0_tWH(c) (min((c), 7) << 11)
360 #define NDTR0_tWP(c) (min((c), 7) << 8)
361 #define NDTR0_tRH(c) (min((c), 7) << 3)
362 #define NDTR0_tRP(c) (min((c), 7) << 0)
364 #define NDTR1_tR(c) (min((c), 65535) << 16)
365 #define NDTR1_tWHR(c) (min((c), 15) << 4)
366 #define NDTR1_tAR(c) (min((c), 15) << 0)
368 /* convert nano-seconds to nand flash controller clock cycles */
369 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
371 static const struct of_device_id pxa3xx_nand_dt_ids[] = {
373 .compatible = "marvell,pxa3xx-nand",
374 .data = (void *)PXA3XX_NAND_VARIANT_PXA,
377 .compatible = "marvell,armada370-nand",
378 .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
382 MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
384 static enum pxa3xx_nand_variant
385 pxa3xx_nand_get_variant(struct platform_device *pdev)
387 const struct of_device_id *of_id =
388 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
390 return PXA3XX_NAND_VARIANT_PXA;
391 return (enum pxa3xx_nand_variant)of_id->data;
394 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
395 const struct pxa3xx_nand_timing *t)
397 struct pxa3xx_nand_info *info = host->info_data;
398 unsigned long nand_clk = clk_get_rate(info->clk);
399 uint32_t ndtr0, ndtr1;
401 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
402 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
403 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
404 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
405 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
406 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
408 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
409 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
410 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
412 info->ndtr0cs0 = ndtr0;
413 info->ndtr1cs0 = ndtr1;
414 nand_writel(info, NDTR0CS0, ndtr0);
415 nand_writel(info, NDTR1CS0, ndtr1);
418 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
419 const struct nand_sdr_timings *t)
421 struct pxa3xx_nand_info *info = host->info_data;
422 struct nand_chip *chip = &host->chip;
423 unsigned long nand_clk = clk_get_rate(info->clk);
424 uint32_t ndtr0, ndtr1;
426 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
427 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
428 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
429 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
430 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
431 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
432 u32 tR = chip->chip_delay * 1000;
433 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
434 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
436 /* fallback to a default value if tR = 0 */
440 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
441 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
442 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
443 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
444 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
445 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
447 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
448 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
449 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
451 info->ndtr0cs0 = ndtr0;
452 info->ndtr1cs0 = ndtr1;
453 nand_writel(info, NDTR0CS0, ndtr0);
454 nand_writel(info, NDTR1CS0, ndtr1);
457 static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
458 unsigned int *flash_width,
459 unsigned int *dfc_width)
461 struct nand_chip *chip = &host->chip;
462 struct pxa3xx_nand_info *info = host->info_data;
463 const struct pxa3xx_nand_flash *f = NULL;
464 struct mtd_info *mtd = nand_to_mtd(&host->chip);
467 ntypes = ARRAY_SIZE(builtin_flash_types);
469 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
471 id = chip->read_byte(mtd);
472 id |= chip->read_byte(mtd) << 0x8;
474 for (i = 0; i < ntypes; i++) {
475 f = &builtin_flash_types[i];
477 if (f->chip_id == id)
482 dev_err(&info->pdev->dev, "Error: timings not found\n");
486 pxa3xx_nand_set_timing(host, f->timing);
488 *flash_width = f->flash_width;
489 *dfc_width = f->dfc_width;
494 static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
497 const struct nand_sdr_timings *timings;
499 mode = fls(mode) - 1;
503 timings = onfi_async_timing_mode_to_sdr_timings(mode);
505 return PTR_ERR(timings);
507 pxa3xx_nand_set_sdr_timing(host, timings);
512 static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
514 struct nand_chip *chip = &host->chip;
515 struct pxa3xx_nand_info *info = host->info_data;
516 unsigned int flash_width = 0, dfc_width = 0;
519 mode = onfi_get_async_timing_mode(chip);
520 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
521 err = pxa3xx_nand_init_timings_compat(host, &flash_width,
526 if (flash_width == 16) {
527 info->reg_ndcr |= NDCR_DWIDTH_M;
528 chip->options |= NAND_BUSWIDTH_16;
531 info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
533 err = pxa3xx_nand_init_timings_onfi(host, mode);
542 * Set the data and OOB size, depending on the selected
543 * spare and ECC configuration.
544 * Only applicable to READ0, READOOB and PAGEPROG commands.
546 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
547 struct mtd_info *mtd)
549 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
551 info->data_size = mtd->writesize;
555 info->oob_size = info->spare_size;
557 info->oob_size += info->ecc_size;
561 * NOTE: it is a must to set ND_RUN firstly, then write
562 * command buffer, otherwise, it does not work.
563 * We enable all the interrupt at the same time, and
564 * let pxa3xx_nand_irq to handle all logic.
566 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
570 ndcr = info->reg_ndcr;
575 nand_writel(info, NDECCCTRL, 0x1);
577 ndcr &= ~NDCR_ECC_EN;
579 nand_writel(info, NDECCCTRL, 0x0);
585 ndcr &= ~NDCR_DMA_EN;
588 ndcr |= NDCR_SPARE_EN;
590 ndcr &= ~NDCR_SPARE_EN;
594 /* clear status bits and run */
595 nand_writel(info, NDSR, NDSR_MASK);
596 nand_writel(info, NDCR, 0);
597 nand_writel(info, NDCR, ndcr);
600 static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
603 int timeout = NAND_STOP_DELAY;
605 /* wait RUN bit in NDCR become 0 */
606 ndcr = nand_readl(info, NDCR);
607 while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
608 ndcr = nand_readl(info, NDCR);
613 ndcr &= ~NDCR_ND_RUN;
614 nand_writel(info, NDCR, ndcr);
617 dmaengine_terminate_all(info->dma_chan);
619 /* clear status bits */
620 nand_writel(info, NDSR, NDSR_MASK);
623 static void __maybe_unused
624 enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
628 ndcr = nand_readl(info, NDCR);
629 nand_writel(info, NDCR, ndcr & ~int_mask);
632 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
636 ndcr = nand_readl(info, NDCR);
637 nand_writel(info, NDCR, ndcr | int_mask);
640 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
647 * According to the datasheet, when reading from NDDB
648 * with BCH enabled, after each 32 bytes reads, we
649 * have to make sure that the NDSR.RDDREQ bit is set.
651 * Drain the FIFO 8 32 bits reads at a time, and skip
652 * the polling on the last read.
655 ioread32_rep(info->mmio_base + NDDB, data, 8);
657 ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
658 val & NDSR_RDDREQ, 1000, 5000);
660 dev_err(&info->pdev->dev,
661 "Timeout on RDDREQ while draining the FIFO\n");
670 ioread32_rep(info->mmio_base + NDDB, data, len);
673 static void handle_data_pio(struct pxa3xx_nand_info *info)
675 unsigned int do_bytes = min(info->data_size, info->chunk_size);
677 switch (info->state) {
678 case STATE_PIO_WRITING:
679 writesl(info->mmio_base + NDDB,
680 info->data_buff + info->data_buff_pos,
681 DIV_ROUND_UP(do_bytes, 4));
683 if (info->oob_size > 0)
684 writesl(info->mmio_base + NDDB,
685 info->oob_buff + info->oob_buff_pos,
686 DIV_ROUND_UP(info->oob_size, 4));
688 case STATE_PIO_READING:
690 info->data_buff + info->data_buff_pos,
691 DIV_ROUND_UP(do_bytes, 4));
693 if (info->oob_size > 0)
695 info->oob_buff + info->oob_buff_pos,
696 DIV_ROUND_UP(info->oob_size, 4));
699 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
704 /* Update buffer pointers for multi-page read/write */
705 info->data_buff_pos += do_bytes;
706 info->oob_buff_pos += info->oob_size;
707 info->data_size -= do_bytes;
710 static void pxa3xx_nand_data_dma_irq(void *data)
712 struct pxa3xx_nand_info *info = data;
713 struct dma_tx_state state;
714 enum dma_status status;
716 status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
717 if (likely(status == DMA_COMPLETE)) {
718 info->state = STATE_DMA_DONE;
720 dev_err(&info->pdev->dev, "DMA error on data channel\n");
721 info->retcode = ERR_DMABUSERR;
723 dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
725 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
726 enable_int(info, NDCR_INT_MASK);
729 static void start_data_dma(struct pxa3xx_nand_info *info)
731 enum dma_transfer_direction direction;
732 struct dma_async_tx_descriptor *tx;
734 switch (info->state) {
735 case STATE_DMA_WRITING:
736 info->dma_dir = DMA_TO_DEVICE;
737 direction = DMA_MEM_TO_DEV;
739 case STATE_DMA_READING:
740 info->dma_dir = DMA_FROM_DEVICE;
741 direction = DMA_DEV_TO_MEM;
744 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
748 info->sg.length = info->data_size +
749 (info->oob_size ? info->spare_size + info->ecc_size : 0);
750 dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
752 tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
755 dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
758 tx->callback = pxa3xx_nand_data_dma_irq;
759 tx->callback_param = info;
760 info->dma_cookie = dmaengine_submit(tx);
761 dma_async_issue_pending(info->dma_chan);
762 dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
763 __func__, direction, info->dma_cookie, info->sg.length);
766 static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
768 struct pxa3xx_nand_info *info = data;
770 handle_data_pio(info);
772 info->state = STATE_CMD_DONE;
773 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
778 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
780 struct pxa3xx_nand_info *info = devid;
781 unsigned int status, is_completed = 0, is_ready = 0;
782 unsigned int ready, cmd_done;
783 irqreturn_t ret = IRQ_HANDLED;
786 ready = NDSR_FLASH_RDY;
787 cmd_done = NDSR_CS0_CMDD;
790 cmd_done = NDSR_CS1_CMDD;
793 status = nand_readl(info, NDSR);
795 if (status & NDSR_UNCORERR)
796 info->retcode = ERR_UNCORERR;
797 if (status & NDSR_CORERR) {
798 info->retcode = ERR_CORERR;
799 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
801 info->ecc_err_cnt = NDSR_ERR_CNT(status);
803 info->ecc_err_cnt = 1;
806 * Each chunk composing a page is corrected independently,
807 * and we need to store maximum number of corrected bitflips
808 * to return it to the MTD layer in ecc.read_page().
810 info->max_bitflips = max_t(unsigned int,
814 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
815 /* whether use dma to transfer data */
817 disable_int(info, NDCR_INT_MASK);
818 info->state = (status & NDSR_RDDREQ) ?
819 STATE_DMA_READING : STATE_DMA_WRITING;
820 start_data_dma(info);
821 goto NORMAL_IRQ_EXIT;
823 info->state = (status & NDSR_RDDREQ) ?
824 STATE_PIO_READING : STATE_PIO_WRITING;
825 ret = IRQ_WAKE_THREAD;
826 goto NORMAL_IRQ_EXIT;
829 if (status & cmd_done) {
830 info->state = STATE_CMD_DONE;
833 if (status & ready) {
834 info->state = STATE_READY;
839 * Clear all status bit before issuing the next command, which
840 * can and will alter the status bits and will deserve a new
841 * interrupt on its own. This lets the controller exit the IRQ
843 nand_writel(info, NDSR, status);
845 if (status & NDSR_WRCMDREQ) {
846 status &= ~NDSR_WRCMDREQ;
847 info->state = STATE_CMD_HANDLE;
850 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
851 * must be loaded by writing directly either 12 or 16
852 * bytes directly to NDCB0, four bytes at a time.
854 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
855 * but each NDCBx register can be read.
857 nand_writel(info, NDCB0, info->ndcb0);
858 nand_writel(info, NDCB0, info->ndcb1);
859 nand_writel(info, NDCB0, info->ndcb2);
861 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
862 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
863 nand_writel(info, NDCB0, info->ndcb3);
867 complete(&info->cmd_complete);
869 complete(&info->dev_ready);
874 static inline int is_buf_blank(uint8_t *buf, size_t len)
876 for (; len > 0; len--)
882 static void set_command_address(struct pxa3xx_nand_info *info,
883 unsigned int page_size, uint16_t column, int page_addr)
885 /* small page addr setting */
886 if (page_size < PAGE_CHUNK_SIZE) {
887 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
892 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
895 if (page_addr & 0xFF0000)
896 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
902 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
904 struct pxa3xx_nand_host *host = info->host[info->cs];
905 struct mtd_info *mtd = nand_to_mtd(&host->chip);
907 /* reset data and oob column point to handle data */
911 info->data_buff_pos = 0;
912 info->oob_buff_pos = 0;
915 info->retcode = ERR_NONE;
916 info->ecc_err_cnt = 0;
922 case NAND_CMD_PAGEPROG:
924 case NAND_CMD_READOOB:
925 pxa3xx_set_datasize(info, mtd);
937 * If we are about to issue a read command, or about to set
938 * the write address, then clean the data buffer.
940 if (command == NAND_CMD_READ0 ||
941 command == NAND_CMD_READOOB ||
942 command == NAND_CMD_SEQIN) {
944 info->buf_count = mtd->writesize + mtd->oobsize;
945 memset(info->data_buff, 0xFF, info->buf_count);
950 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
951 int ext_cmd_type, uint16_t column, int page_addr)
953 int addr_cycle, exec_cmd;
954 struct pxa3xx_nand_host *host;
955 struct mtd_info *mtd;
957 host = info->host[info->cs];
958 mtd = nand_to_mtd(&host->chip);
963 info->ndcb0 = NDCB0_CSEL;
967 if (command == NAND_CMD_SEQIN)
970 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
971 + host->col_addr_cycles);
974 case NAND_CMD_READOOB:
976 info->buf_start = column;
977 info->ndcb0 |= NDCB0_CMD_TYPE(0)
981 if (command == NAND_CMD_READOOB)
982 info->buf_start += mtd->writesize;
985 * Multiple page read needs an 'extended command type' field,
986 * which is either naked-read or last-read according to the
989 if (mtd->writesize == PAGE_CHUNK_SIZE) {
990 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
991 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
992 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
994 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
995 info->ndcb3 = info->chunk_size +
999 set_command_address(info, mtd->writesize, column, page_addr);
1002 case NAND_CMD_SEQIN:
1004 info->buf_start = column;
1005 set_command_address(info, mtd->writesize, 0, page_addr);
1008 * Multiple page programming needs to execute the initial
1009 * SEQIN command that sets the page address.
1011 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1012 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1013 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1016 /* No data transfer in this case */
1017 info->data_size = 0;
1022 case NAND_CMD_PAGEPROG:
1023 if (is_buf_blank(info->data_buff,
1024 (mtd->writesize + mtd->oobsize))) {
1029 /* Second command setting for large pages */
1030 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1032 * Multiple page write uses the 'extended command'
1033 * field. This can be used to issue a command dispatch
1034 * or a naked-write depending on the current stage.
1036 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1038 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
1039 info->ndcb3 = info->chunk_size +
1043 * This is the command dispatch that completes a chunked
1044 * page program operation.
1046 if (info->data_size == 0) {
1047 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
1048 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
1055 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
1059 | (NAND_CMD_PAGEPROG << 8)
1065 case NAND_CMD_PARAM:
1066 info->buf_count = INIT_BUFFER_SIZE;
1067 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1071 info->ndcb1 = (column & 0xFF);
1072 info->ndcb3 = INIT_BUFFER_SIZE;
1073 info->data_size = INIT_BUFFER_SIZE;
1076 case NAND_CMD_READID:
1077 info->buf_count = READ_ID_BYTES;
1078 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1081 info->ndcb1 = (column & 0xFF);
1083 info->data_size = 8;
1085 case NAND_CMD_STATUS:
1086 info->buf_count = 1;
1087 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1091 info->data_size = 8;
1094 case NAND_CMD_ERASE1:
1095 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1099 | (NAND_CMD_ERASE2 << 8)
1101 info->ndcb1 = page_addr;
1105 case NAND_CMD_RESET:
1106 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1111 case NAND_CMD_ERASE2:
1117 dev_err(&info->pdev->dev, "non-supported command %x\n",
1125 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1126 int column, int page_addr)
1128 struct nand_chip *chip = mtd_to_nand(mtd);
1129 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1130 struct pxa3xx_nand_info *info = host->info_data;
1134 * if this is a x16 device ,then convert the input
1135 * "byte" address into a "word" address appropriate
1136 * for indexing a word-oriented device
1138 if (info->reg_ndcr & NDCR_DWIDTH_M)
1142 * There may be different NAND chip hooked to
1143 * different chip select, so check whether
1144 * chip select has been changed, if yes, reset the timing
1146 if (info->cs != host->cs) {
1147 info->cs = host->cs;
1148 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1149 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1152 prepare_start_command(info, command);
1154 info->state = STATE_PREPARED;
1155 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1158 init_completion(&info->cmd_complete);
1159 init_completion(&info->dev_ready);
1160 info->need_wait = 1;
1161 pxa3xx_nand_start(info);
1163 if (!wait_for_completion_timeout(&info->cmd_complete,
1164 CHIP_DELAY_TIMEOUT)) {
1165 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1166 /* Stop State Machine for next command cycle */
1167 pxa3xx_nand_stop(info);
1170 info->state = STATE_IDLE;
1173 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1174 const unsigned command,
1175 int column, int page_addr)
1177 struct nand_chip *chip = mtd_to_nand(mtd);
1178 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1179 struct pxa3xx_nand_info *info = host->info_data;
1180 int exec_cmd, ext_cmd_type;
1183 * if this is a x16 device then convert the input
1184 * "byte" address into a "word" address appropriate
1185 * for indexing a word-oriented device
1187 if (info->reg_ndcr & NDCR_DWIDTH_M)
1191 * There may be different NAND chip hooked to
1192 * different chip select, so check whether
1193 * chip select has been changed, if yes, reset the timing
1195 if (info->cs != host->cs) {
1196 info->cs = host->cs;
1197 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1198 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1201 /* Select the extended command for the first command */
1203 case NAND_CMD_READ0:
1204 case NAND_CMD_READOOB:
1205 ext_cmd_type = EXT_CMD_TYPE_MONO;
1207 case NAND_CMD_SEQIN:
1208 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1210 case NAND_CMD_PAGEPROG:
1211 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1218 prepare_start_command(info, command);
1221 * Prepare the "is ready" completion before starting a command
1222 * transaction sequence. If the command is not executed the
1223 * completion will be completed, see below.
1225 * We can do that inside the loop because the command variable
1226 * is invariant and thus so is the exec_cmd.
1228 info->need_wait = 1;
1229 init_completion(&info->dev_ready);
1231 info->state = STATE_PREPARED;
1232 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1235 info->need_wait = 0;
1236 complete(&info->dev_ready);
1240 init_completion(&info->cmd_complete);
1241 pxa3xx_nand_start(info);
1243 if (!wait_for_completion_timeout(&info->cmd_complete,
1244 CHIP_DELAY_TIMEOUT)) {
1245 dev_err(&info->pdev->dev, "Wait time out!!!\n");
1246 /* Stop State Machine for next command cycle */
1247 pxa3xx_nand_stop(info);
1251 /* Check if the sequence is complete */
1252 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1256 * After a splitted program command sequence has issued
1257 * the command dispatch, the command sequence is complete.
1259 if (info->data_size == 0 &&
1260 command == NAND_CMD_PAGEPROG &&
1261 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1264 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1265 /* Last read: issue a 'last naked read' */
1266 if (info->data_size == info->chunk_size)
1267 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1269 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1272 * If a splitted program command has no more data to transfer,
1273 * the command dispatch must be issued to complete.
1275 } else if (command == NAND_CMD_PAGEPROG &&
1276 info->data_size == 0) {
1277 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1281 info->state = STATE_IDLE;
1284 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1285 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1288 chip->write_buf(mtd, buf, mtd->writesize);
1289 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1294 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1295 struct nand_chip *chip, uint8_t *buf, int oob_required,
1298 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1299 struct pxa3xx_nand_info *info = host->info_data;
1301 chip->read_buf(mtd, buf, mtd->writesize);
1302 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1304 if (info->retcode == ERR_CORERR && info->use_ecc) {
1305 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1307 } else if (info->retcode == ERR_UNCORERR) {
1309 * for blank page (all 0xff), HW will calculate its ECC as
1310 * 0, which is different from the ECC information within
1311 * OOB, ignore such uncorrectable errors
1313 if (is_buf_blank(buf, mtd->writesize))
1314 info->retcode = ERR_NONE;
1316 mtd->ecc_stats.failed++;
1319 return info->max_bitflips;
1322 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1324 struct nand_chip *chip = mtd_to_nand(mtd);
1325 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1326 struct pxa3xx_nand_info *info = host->info_data;
1329 if (info->buf_start < info->buf_count)
1330 /* Has just send a new command? */
1331 retval = info->data_buff[info->buf_start++];
1336 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1338 struct nand_chip *chip = mtd_to_nand(mtd);
1339 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1340 struct pxa3xx_nand_info *info = host->info_data;
1341 u16 retval = 0xFFFF;
1343 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1344 retval = *((u16 *)(info->data_buff+info->buf_start));
1345 info->buf_start += 2;
1350 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1352 struct nand_chip *chip = mtd_to_nand(mtd);
1353 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1354 struct pxa3xx_nand_info *info = host->info_data;
1355 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1357 memcpy(buf, info->data_buff + info->buf_start, real_len);
1358 info->buf_start += real_len;
1361 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1362 const uint8_t *buf, int len)
1364 struct nand_chip *chip = mtd_to_nand(mtd);
1365 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1366 struct pxa3xx_nand_info *info = host->info_data;
1367 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1369 memcpy(info->data_buff + info->buf_start, buf, real_len);
1370 info->buf_start += real_len;
1373 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1378 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1380 struct nand_chip *chip = mtd_to_nand(mtd);
1381 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1382 struct pxa3xx_nand_info *info = host->info_data;
1384 if (info->need_wait) {
1385 info->need_wait = 0;
1386 if (!wait_for_completion_timeout(&info->dev_ready,
1387 CHIP_DELAY_TIMEOUT)) {
1388 dev_err(&info->pdev->dev, "Ready time out!!!\n");
1389 return NAND_STATUS_FAIL;
1393 /* pxa3xx_nand_send_command has waited for command complete */
1394 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1395 if (info->retcode == ERR_NONE)
1398 return NAND_STATUS_FAIL;
1401 return NAND_STATUS_READY;
1404 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1406 struct pxa3xx_nand_host *host = info->host[info->cs];
1407 struct platform_device *pdev = info->pdev;
1408 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1409 const struct nand_sdr_timings *timings;
1411 /* Configure default flash values */
1412 info->chunk_size = PAGE_CHUNK_SIZE;
1413 info->reg_ndcr = 0x0; /* enable all interrupts */
1414 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1415 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1416 info->reg_ndcr |= NDCR_SPARE_EN;
1418 /* use the common timing to make a try */
1419 timings = onfi_async_timing_mode_to_sdr_timings(0);
1420 if (IS_ERR(timings))
1421 return PTR_ERR(timings);
1423 pxa3xx_nand_set_sdr_timing(host, timings);
1427 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1429 struct pxa3xx_nand_host *host = info->host[info->cs];
1430 struct nand_chip *chip = &host->chip;
1431 struct mtd_info *mtd = nand_to_mtd(chip);
1433 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1434 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1435 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1438 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1440 struct platform_device *pdev = info->pdev;
1441 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1442 uint32_t ndcr = nand_readl(info, NDCR);
1444 /* Set an initial chunk size */
1445 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1446 info->reg_ndcr = ndcr &
1447 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1448 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1449 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1450 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1453 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1455 struct platform_device *pdev = info->pdev;
1456 struct dma_slave_config config;
1457 dma_cap_mask_t mask;
1458 struct pxad_param param;
1461 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1462 if (info->data_buff == NULL)
1467 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1471 sg_init_one(&info->sg, info->data_buff, info->buf_size);
1473 dma_cap_set(DMA_SLAVE, mask);
1474 param.prio = PXAD_PRIO_LOWEST;
1475 param.drcmr = info->drcmr_dat;
1476 info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
1479 if (!info->dma_chan) {
1480 dev_err(&pdev->dev, "unable to request data dma channel\n");
1484 memset(&config, 0, sizeof(config));
1485 config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1486 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1487 config.src_addr = info->mmio_phys + NDDB;
1488 config.dst_addr = info->mmio_phys + NDDB;
1489 config.src_maxburst = 32;
1490 config.dst_maxburst = 32;
1491 ret = dmaengine_slave_config(info->dma_chan, &config);
1493 dev_err(&info->pdev->dev,
1494 "dma channel configuration failed: %d\n",
1500 * Now that DMA buffers are allocated we turn on
1501 * DMA proper for I/O operations.
1507 static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
1509 if (info->use_dma) {
1510 dmaengine_terminate_all(info->dma_chan);
1511 dma_release_channel(info->dma_chan);
1513 kfree(info->data_buff);
1516 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1517 struct nand_ecc_ctrl *ecc,
1518 int strength, int ecc_stepsize, int page_size)
1520 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1521 info->chunk_size = 2048;
1522 info->spare_size = 40;
1523 info->ecc_size = 24;
1524 ecc->mode = NAND_ECC_HW;
1528 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1529 info->chunk_size = 512;
1530 info->spare_size = 8;
1532 ecc->mode = NAND_ECC_HW;
1537 * Required ECC: 4-bit correction per 512 bytes
1538 * Select: 16-bit correction per 2048 bytes
1540 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1542 info->chunk_size = 2048;
1543 info->spare_size = 32;
1544 info->ecc_size = 32;
1545 ecc->mode = NAND_ECC_HW;
1546 ecc->size = info->chunk_size;
1547 ecc->layout = &ecc_layout_2KB_bch4bit;
1550 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1552 info->chunk_size = 2048;
1553 info->spare_size = 32;
1554 info->ecc_size = 32;
1555 ecc->mode = NAND_ECC_HW;
1556 ecc->size = info->chunk_size;
1557 ecc->layout = &ecc_layout_4KB_bch4bit;
1561 * Required ECC: 8-bit correction per 512 bytes
1562 * Select: 16-bit correction per 1024 bytes
1564 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1566 info->chunk_size = 1024;
1567 info->spare_size = 0;
1568 info->ecc_size = 32;
1569 ecc->mode = NAND_ECC_HW;
1570 ecc->size = info->chunk_size;
1571 ecc->layout = &ecc_layout_4KB_bch8bit;
1574 dev_err(&info->pdev->dev,
1575 "ECC strength %d at page size %d is not supported\n",
1576 strength, page_size);
1580 dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
1581 ecc->strength, ecc->size);
1585 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1587 struct nand_chip *chip = mtd_to_nand(mtd);
1588 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1589 struct pxa3xx_nand_info *info = host->info_data;
1590 struct platform_device *pdev = info->pdev;
1591 struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
1593 uint16_t ecc_strength, ecc_step;
1595 if (pdata->keep_config) {
1596 pxa3xx_nand_detect_config(info);
1598 ret = pxa3xx_nand_config_ident(info);
1603 if (info->reg_ndcr & NDCR_DWIDTH_M)
1604 chip->options |= NAND_BUSWIDTH_16;
1606 /* Device detection must be done with ECC disabled */
1607 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1608 nand_writel(info, NDECCCTRL, 0x0);
1610 if (nand_scan_ident(mtd, 1, NULL))
1613 if (!pdata->keep_config) {
1614 ret = pxa3xx_nand_init(host);
1616 dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
1622 if (pdata->flash_bbt) {
1624 * We'll use a bad block table stored in-flash and don't
1625 * allow writing the bad block marker to the flash.
1627 chip->bbt_options |= NAND_BBT_USE_FLASH |
1628 NAND_BBT_NO_OOB_BBM;
1629 chip->bbt_td = &bbt_main_descr;
1630 chip->bbt_md = &bbt_mirror_descr;
1634 * If the page size is bigger than the FIFO size, let's check
1635 * we are given the right variant and then switch to the extended
1636 * (aka splitted) command handling,
1638 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1639 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1640 chip->cmdfunc = nand_cmdfunc_extended;
1642 dev_err(&info->pdev->dev,
1643 "unsupported page size on this variant\n");
1648 if (pdata->ecc_strength && pdata->ecc_step_size) {
1649 ecc_strength = pdata->ecc_strength;
1650 ecc_step = pdata->ecc_step_size;
1652 ecc_strength = chip->ecc_strength_ds;
1653 ecc_step = chip->ecc_step_ds;
1656 /* Set default ECC strength requirements on non-ONFI devices */
1657 if (ecc_strength < 1 && ecc_step < 1) {
1662 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1663 ecc_step, mtd->writesize);
1667 /* calculate addressing information */
1668 if (mtd->writesize >= 2048)
1669 host->col_addr_cycles = 2;
1671 host->col_addr_cycles = 1;
1673 /* release the initial buffer */
1674 kfree(info->data_buff);
1676 /* allocate the real data + oob buffer */
1677 info->buf_size = mtd->writesize + mtd->oobsize;
1678 ret = pxa3xx_nand_init_buff(info);
1681 info->oob_buff = info->data_buff + mtd->writesize;
1683 if ((mtd->size >> chip->page_shift) > 65536)
1684 host->row_addr_cycles = 3;
1686 host->row_addr_cycles = 2;
1688 if (!pdata->keep_config)
1689 pxa3xx_nand_config_tail(info);
1691 return nand_scan_tail(mtd);
1694 static int alloc_nand_resource(struct platform_device *pdev)
1696 struct device_node *np = pdev->dev.of_node;
1697 struct pxa3xx_nand_platform_data *pdata;
1698 struct pxa3xx_nand_info *info;
1699 struct pxa3xx_nand_host *host;
1700 struct nand_chip *chip = NULL;
1701 struct mtd_info *mtd;
1705 pdata = dev_get_platdata(&pdev->dev);
1706 if (pdata->num_cs <= 0)
1708 info = devm_kzalloc(&pdev->dev,
1709 sizeof(*info) + sizeof(*host) * pdata->num_cs,
1715 info->variant = pxa3xx_nand_get_variant(pdev);
1716 for (cs = 0; cs < pdata->num_cs; cs++) {
1717 host = (void *)&info[1] + sizeof(*host) * cs;
1719 nand_set_controller_data(chip, host);
1720 mtd = nand_to_mtd(chip);
1721 info->host[cs] = host;
1723 host->info_data = info;
1724 mtd->dev.parent = &pdev->dev;
1725 /* FIXME: all chips use the same device tree partitions */
1726 nand_set_flash_node(chip, np);
1728 nand_set_controller_data(chip, host);
1729 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1730 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1731 chip->controller = &info->controller;
1732 chip->waitfunc = pxa3xx_nand_waitfunc;
1733 chip->select_chip = pxa3xx_nand_select_chip;
1734 chip->read_word = pxa3xx_nand_read_word;
1735 chip->read_byte = pxa3xx_nand_read_byte;
1736 chip->read_buf = pxa3xx_nand_read_buf;
1737 chip->write_buf = pxa3xx_nand_write_buf;
1738 chip->options |= NAND_NO_SUBPAGE_WRITE;
1739 chip->cmdfunc = nand_cmdfunc;
1742 spin_lock_init(&chip->controller->lock);
1743 init_waitqueue_head(&chip->controller->wq);
1744 info->clk = devm_clk_get(&pdev->dev, NULL);
1745 if (IS_ERR(info->clk)) {
1746 dev_err(&pdev->dev, "failed to get nand clock\n");
1747 return PTR_ERR(info->clk);
1749 ret = clk_prepare_enable(info->clk);
1754 r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1757 "no resource defined for data DMA\n");
1759 goto fail_disable_clk;
1761 info->drcmr_dat = r->start;
1763 r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
1766 "no resource defined for cmd DMA\n");
1768 goto fail_disable_clk;
1770 info->drcmr_cmd = r->start;
1773 irq = platform_get_irq(pdev, 0);
1775 dev_err(&pdev->dev, "no IRQ resource defined\n");
1777 goto fail_disable_clk;
1780 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1781 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
1782 if (IS_ERR(info->mmio_base)) {
1783 ret = PTR_ERR(info->mmio_base);
1784 goto fail_disable_clk;
1786 info->mmio_phys = r->start;
1788 /* Allocate a buffer to allow flash detection */
1789 info->buf_size = INIT_BUFFER_SIZE;
1790 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1791 if (info->data_buff == NULL) {
1793 goto fail_disable_clk;
1796 /* initialize all interrupts to be disabled */
1797 disable_int(info, NDSR_MASK);
1799 ret = request_threaded_irq(irq, pxa3xx_nand_irq,
1800 pxa3xx_nand_irq_thread, IRQF_ONESHOT,
1803 dev_err(&pdev->dev, "failed to request IRQ\n");
1807 platform_set_drvdata(pdev, info);
1812 free_irq(irq, info);
1813 kfree(info->data_buff);
1815 clk_disable_unprepare(info->clk);
1819 static int pxa3xx_nand_remove(struct platform_device *pdev)
1821 struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
1822 struct pxa3xx_nand_platform_data *pdata;
1828 pdata = dev_get_platdata(&pdev->dev);
1830 irq = platform_get_irq(pdev, 0);
1832 free_irq(irq, info);
1833 pxa3xx_nand_free_buff(info);
1836 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
1837 * In order to prevent a lockup of the system bus, the DFI bus
1838 * arbitration is granted to SMC upon driver removal. This is done by
1839 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
1840 * access to the bus anymore.
1842 nand_writel(info, NDCR,
1843 (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
1844 NFCV1_NDCR_ARB_CNTL);
1845 clk_disable_unprepare(info->clk);
1847 for (cs = 0; cs < pdata->num_cs; cs++)
1848 nand_release(nand_to_mtd(&info->host[cs]->chip));
1852 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
1854 struct pxa3xx_nand_platform_data *pdata;
1855 struct device_node *np = pdev->dev.of_node;
1856 const struct of_device_id *of_id =
1857 of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
1862 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1866 if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
1867 pdata->enable_arbiter = 1;
1868 if (of_get_property(np, "marvell,nand-keep-config", NULL))
1869 pdata->keep_config = 1;
1870 of_property_read_u32(np, "num-cs", &pdata->num_cs);
1871 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1873 pdata->ecc_strength = of_get_nand_ecc_strength(np);
1874 if (pdata->ecc_strength < 0)
1875 pdata->ecc_strength = 0;
1877 pdata->ecc_step_size = of_get_nand_ecc_step_size(np);
1878 if (pdata->ecc_step_size < 0)
1879 pdata->ecc_step_size = 0;
1881 pdev->dev.platform_data = pdata;
1886 static int pxa3xx_nand_probe(struct platform_device *pdev)
1888 struct pxa3xx_nand_platform_data *pdata;
1889 struct pxa3xx_nand_info *info;
1890 int ret, cs, probe_success, dma_available;
1892 dma_available = IS_ENABLED(CONFIG_ARM) &&
1893 (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
1894 if (use_dma && !dma_available) {
1896 dev_warn(&pdev->dev,
1897 "This platform can't do DMA on this device\n");
1900 ret = pxa3xx_nand_probe_dt(pdev);
1904 pdata = dev_get_platdata(&pdev->dev);
1906 dev_err(&pdev->dev, "no platform data defined\n");
1910 ret = alloc_nand_resource(pdev);
1912 dev_err(&pdev->dev, "alloc nand resource failed\n");
1916 info = platform_get_drvdata(pdev);
1918 for (cs = 0; cs < pdata->num_cs; cs++) {
1919 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1922 * The mtd name matches the one used in 'mtdparts' kernel
1923 * parameter. This name cannot be changed or otherwise
1924 * user's mtd partitions configuration would get broken.
1926 mtd->name = "pxa3xx_nand-0";
1928 ret = pxa3xx_nand_scan(mtd);
1930 dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
1935 ret = mtd_device_register(mtd, pdata->parts[cs],
1936 pdata->nr_parts[cs]);
1941 if (!probe_success) {
1942 pxa3xx_nand_remove(pdev);
1950 static int pxa3xx_nand_suspend(struct device *dev)
1952 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1955 dev_err(dev, "driver busy, state = %d\n", info->state);
1959 clk_disable(info->clk);
1963 static int pxa3xx_nand_resume(struct device *dev)
1965 struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
1968 ret = clk_enable(info->clk);
1972 /* We don't want to handle interrupt without calling mtd routine */
1973 disable_int(info, NDCR_INT_MASK);
1976 * Directly set the chip select to a invalid value,
1977 * then the driver would reset the timing according
1978 * to current chip select at the beginning of cmdfunc
1983 * As the spec says, the NDSR would be updated to 0x1800 when
1984 * doing the nand_clk disable/enable.
1985 * To prevent it damaging state machine of the driver, clear
1986 * all status before resume
1988 nand_writel(info, NDSR, NDSR_MASK);
1993 #define pxa3xx_nand_suspend NULL
1994 #define pxa3xx_nand_resume NULL
1997 static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
1998 .suspend = pxa3xx_nand_suspend,
1999 .resume = pxa3xx_nand_resume,
2002 static struct platform_driver pxa3xx_nand_driver = {
2004 .name = "pxa3xx-nand",
2005 .of_match_table = pxa3xx_nand_dt_ids,
2006 .pm = &pxa3xx_nand_pm_ops,
2008 .probe = pxa3xx_nand_probe,
2009 .remove = pxa3xx_nand_remove,
2012 module_platform_driver(pxa3xx_nand_driver);
2014 MODULE_LICENSE("GPL");
2015 MODULE_DESCRIPTION("PXA3xx NAND controller driver");