2 * Copyright © 2010-2015 Broadcom Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/clk.h>
15 #include <linux/version.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/platform_device.h>
21 #include <linux/err.h>
22 #include <linux/completion.h>
23 #include <linux/interrupt.h>
24 #include <linux/spinlock.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/ioport.h>
27 #include <linux/bug.h>
28 #include <linux/kernel.h>
29 #include <linux/bitops.h>
31 #include <linux/mtd/mtd.h>
32 #include <linux/mtd/nand.h>
33 #include <linux/mtd/partitions.h>
35 #include <linux/of_mtd.h>
36 #include <linux/of_platform.h>
37 #include <linux/slab.h>
38 #include <linux/list.h>
39 #include <linux/log2.h>
44 * This flag controls if WP stays on between erase/write commands to mitigate
45 * flash corruption due to power glitches. Values:
46 * 0: NAND_WP is not used or not available
47 * 1: NAND_WP is set by default, cleared for erase/write operations
48 * 2: NAND_WP is always cleared
51 module_param(wp_on, int, 0444);
53 /***********************************************************************
55 ***********************************************************************/
57 #define DRV_NAME "brcmnand"
60 #define CMD_PAGE_READ 0x01
61 #define CMD_SPARE_AREA_READ 0x02
62 #define CMD_STATUS_READ 0x03
63 #define CMD_PROGRAM_PAGE 0x04
64 #define CMD_PROGRAM_SPARE_AREA 0x05
65 #define CMD_COPY_BACK 0x06
66 #define CMD_DEVICE_ID_READ 0x07
67 #define CMD_BLOCK_ERASE 0x08
68 #define CMD_FLASH_RESET 0x09
69 #define CMD_BLOCKS_LOCK 0x0a
70 #define CMD_BLOCKS_LOCK_DOWN 0x0b
71 #define CMD_BLOCKS_UNLOCK 0x0c
72 #define CMD_READ_BLOCKS_LOCK_STATUS 0x0d
73 #define CMD_PARAMETER_READ 0x0e
74 #define CMD_PARAMETER_CHANGE_COL 0x0f
75 #define CMD_LOW_LEVEL_OP 0x10
77 struct brcm_nand_dma_desc {
92 /* Bitfields for brcm_nand_dma_desc::status_valid */
93 #define FLASH_DMA_ECC_ERROR (1 << 8)
94 #define FLASH_DMA_CORR_ERROR (1 << 9)
96 /* 512B flash cache in the NAND controller HW */
99 #define FC_WORDS (FC_BYTES >> 2)
101 #define BRCMNAND_MIN_PAGESIZE 512
102 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
103 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
105 /* Controller feature flags */
107 BRCMNAND_HAS_1K_SECTORS = BIT(0),
108 BRCMNAND_HAS_PREFETCH = BIT(1),
109 BRCMNAND_HAS_CACHE_MODE = BIT(2),
110 BRCMNAND_HAS_WP = BIT(3),
113 struct brcmnand_controller {
115 struct nand_hw_control controller;
116 void __iomem *nand_base;
117 void __iomem *nand_fc; /* flash cache */
118 void __iomem *flash_dma_base;
120 unsigned int dma_irq;
123 /* Some SoCs provide custom interrupt status register(s) */
124 struct brcmnand_soc *soc;
126 /* Some SoCs have a gateable clock for the controller */
131 struct completion done;
132 struct completion dma_done;
134 /* List of NAND hosts (one for each chip-select) */
135 struct list_head host_list;
137 struct brcm_nand_dma_desc *dma_desc;
140 /* in-memory cache of the FLASH_CACHE, used only for some commands */
141 u8 flash_cache[FC_BYTES];
143 /* Controller revision details */
144 const u16 *reg_offsets;
145 unsigned int reg_spacing; /* between CS1, CS2, ... regs */
146 const u8 *cs_offsets; /* within each chip-select */
147 const u8 *cs0_offsets; /* within CS0, if different */
148 unsigned int max_block_size;
149 const unsigned int *block_sizes;
150 unsigned int max_page_size;
151 const unsigned int *page_sizes;
152 unsigned int max_oob;
155 /* for low-power standby/resume only */
156 u32 nand_cs_nand_select;
157 u32 nand_cs_nand_xor;
158 u32 corr_stat_threshold;
162 struct brcmnand_cfg {
164 unsigned int block_size;
165 unsigned int page_size;
166 unsigned int spare_area_size;
167 unsigned int device_width;
168 unsigned int col_adr_bytes;
169 unsigned int blk_adr_bytes;
170 unsigned int ful_adr_bytes;
171 unsigned int sector_size_1k;
172 unsigned int ecc_level;
173 /* use for low-power standby/resume only */
181 struct brcmnand_host {
182 struct list_head node;
184 struct nand_chip chip;
186 struct platform_device *pdev;
189 unsigned int last_cmd;
190 unsigned int last_byte;
192 struct brcmnand_cfg hwcfg;
193 struct brcmnand_controller *ctrl;
197 BRCMNAND_CMD_START = 0,
198 BRCMNAND_CMD_EXT_ADDRESS,
199 BRCMNAND_CMD_ADDRESS,
200 BRCMNAND_INTFC_STATUS,
205 BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
206 BRCMNAND_CORR_THRESHOLD,
207 BRCMNAND_CORR_THRESHOLD_EXT,
208 BRCMNAND_UNCORR_COUNT,
210 BRCMNAND_CORR_EXT_ADDR,
212 BRCMNAND_UNCORR_EXT_ADDR,
213 BRCMNAND_UNCORR_ADDR,
218 BRCMNAND_OOB_READ_BASE,
219 BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
220 BRCMNAND_OOB_WRITE_BASE,
221 BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
226 static const u16 brcmnand_regs_v40[] = {
227 [BRCMNAND_CMD_START] = 0x04,
228 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
229 [BRCMNAND_CMD_ADDRESS] = 0x0c,
230 [BRCMNAND_INTFC_STATUS] = 0x6c,
231 [BRCMNAND_CS_SELECT] = 0x14,
232 [BRCMNAND_CS_XOR] = 0x18,
233 [BRCMNAND_LL_OP] = 0x178,
234 [BRCMNAND_CS0_BASE] = 0x40,
235 [BRCMNAND_CS1_BASE] = 0xd0,
236 [BRCMNAND_CORR_THRESHOLD] = 0x84,
237 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
238 [BRCMNAND_UNCORR_COUNT] = 0,
239 [BRCMNAND_CORR_COUNT] = 0,
240 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
241 [BRCMNAND_CORR_ADDR] = 0x74,
242 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
243 [BRCMNAND_UNCORR_ADDR] = 0x7c,
244 [BRCMNAND_SEMAPHORE] = 0x58,
245 [BRCMNAND_ID] = 0x60,
246 [BRCMNAND_ID_EXT] = 0x64,
247 [BRCMNAND_LL_RDATA] = 0x17c,
248 [BRCMNAND_OOB_READ_BASE] = 0x20,
249 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
250 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
251 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
252 [BRCMNAND_FC_BASE] = 0x200,
256 static const u16 brcmnand_regs_v50[] = {
257 [BRCMNAND_CMD_START] = 0x04,
258 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
259 [BRCMNAND_CMD_ADDRESS] = 0x0c,
260 [BRCMNAND_INTFC_STATUS] = 0x6c,
261 [BRCMNAND_CS_SELECT] = 0x14,
262 [BRCMNAND_CS_XOR] = 0x18,
263 [BRCMNAND_LL_OP] = 0x178,
264 [BRCMNAND_CS0_BASE] = 0x40,
265 [BRCMNAND_CS1_BASE] = 0xd0,
266 [BRCMNAND_CORR_THRESHOLD] = 0x84,
267 [BRCMNAND_CORR_THRESHOLD_EXT] = 0,
268 [BRCMNAND_UNCORR_COUNT] = 0,
269 [BRCMNAND_CORR_COUNT] = 0,
270 [BRCMNAND_CORR_EXT_ADDR] = 0x70,
271 [BRCMNAND_CORR_ADDR] = 0x74,
272 [BRCMNAND_UNCORR_EXT_ADDR] = 0x78,
273 [BRCMNAND_UNCORR_ADDR] = 0x7c,
274 [BRCMNAND_SEMAPHORE] = 0x58,
275 [BRCMNAND_ID] = 0x60,
276 [BRCMNAND_ID_EXT] = 0x64,
277 [BRCMNAND_LL_RDATA] = 0x17c,
278 [BRCMNAND_OOB_READ_BASE] = 0x20,
279 [BRCMNAND_OOB_READ_10_BASE] = 0x130,
280 [BRCMNAND_OOB_WRITE_BASE] = 0x30,
281 [BRCMNAND_OOB_WRITE_10_BASE] = 0x140,
282 [BRCMNAND_FC_BASE] = 0x200,
285 /* BRCMNAND v6.0 - v7.1 */
286 static const u16 brcmnand_regs_v60[] = {
287 [BRCMNAND_CMD_START] = 0x04,
288 [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
289 [BRCMNAND_CMD_ADDRESS] = 0x0c,
290 [BRCMNAND_INTFC_STATUS] = 0x14,
291 [BRCMNAND_CS_SELECT] = 0x18,
292 [BRCMNAND_CS_XOR] = 0x1c,
293 [BRCMNAND_LL_OP] = 0x20,
294 [BRCMNAND_CS0_BASE] = 0x50,
295 [BRCMNAND_CS1_BASE] = 0,
296 [BRCMNAND_CORR_THRESHOLD] = 0xc0,
297 [BRCMNAND_CORR_THRESHOLD_EXT] = 0xc4,
298 [BRCMNAND_UNCORR_COUNT] = 0xfc,
299 [BRCMNAND_CORR_COUNT] = 0x100,
300 [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
301 [BRCMNAND_CORR_ADDR] = 0x110,
302 [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
303 [BRCMNAND_UNCORR_ADDR] = 0x118,
304 [BRCMNAND_SEMAPHORE] = 0x150,
305 [BRCMNAND_ID] = 0x194,
306 [BRCMNAND_ID_EXT] = 0x198,
307 [BRCMNAND_LL_RDATA] = 0x19c,
308 [BRCMNAND_OOB_READ_BASE] = 0x200,
309 [BRCMNAND_OOB_READ_10_BASE] = 0,
310 [BRCMNAND_OOB_WRITE_BASE] = 0x280,
311 [BRCMNAND_OOB_WRITE_10_BASE] = 0,
312 [BRCMNAND_FC_BASE] = 0x400,
315 enum brcmnand_cs_reg {
316 BRCMNAND_CS_CFG_EXT = 0,
318 BRCMNAND_CS_ACC_CONTROL,
323 /* Per chip-select offsets for v7.1 */
324 static const u8 brcmnand_cs_offsets_v71[] = {
325 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
326 [BRCMNAND_CS_CFG_EXT] = 0x04,
327 [BRCMNAND_CS_CFG] = 0x08,
328 [BRCMNAND_CS_TIMING1] = 0x0c,
329 [BRCMNAND_CS_TIMING2] = 0x10,
332 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
333 static const u8 brcmnand_cs_offsets[] = {
334 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
335 [BRCMNAND_CS_CFG_EXT] = 0x04,
336 [BRCMNAND_CS_CFG] = 0x04,
337 [BRCMNAND_CS_TIMING1] = 0x08,
338 [BRCMNAND_CS_TIMING2] = 0x0c,
341 /* Per chip-select offset for <= v5.0 on CS0 only */
342 static const u8 brcmnand_cs_offsets_cs0[] = {
343 [BRCMNAND_CS_ACC_CONTROL] = 0x00,
344 [BRCMNAND_CS_CFG_EXT] = 0x08,
345 [BRCMNAND_CS_CFG] = 0x08,
346 [BRCMNAND_CS_TIMING1] = 0x10,
347 [BRCMNAND_CS_TIMING2] = 0x14,
351 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
352 * one config register, but once the bitfields overflowed, newer controllers
353 * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
356 CFG_BLK_ADR_BYTES_SHIFT = 8,
357 CFG_COL_ADR_BYTES_SHIFT = 12,
358 CFG_FUL_ADR_BYTES_SHIFT = 16,
359 CFG_BUS_WIDTH_SHIFT = 23,
360 CFG_BUS_WIDTH = BIT(CFG_BUS_WIDTH_SHIFT),
361 CFG_DEVICE_SIZE_SHIFT = 24,
363 /* Only for pre-v7.1 (with no CFG_EXT register) */
364 CFG_PAGE_SIZE_SHIFT = 20,
365 CFG_BLK_SIZE_SHIFT = 28,
367 /* Only for v7.1+ (with CFG_EXT register) */
368 CFG_EXT_PAGE_SIZE_SHIFT = 0,
369 CFG_EXT_BLK_SIZE_SHIFT = 4,
372 /* BRCMNAND_INTFC_STATUS */
374 INTFC_FLASH_STATUS = GENMASK(7, 0),
376 INTFC_ERASED = BIT(27),
377 INTFC_OOB_VALID = BIT(28),
378 INTFC_CACHE_VALID = BIT(29),
379 INTFC_FLASH_READY = BIT(30),
380 INTFC_CTLR_READY = BIT(31),
383 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
385 return brcmnand_readl(ctrl->nand_base + offs);
388 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
391 brcmnand_writel(val, ctrl->nand_base + offs);
394 static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
396 static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
397 static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
398 static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
400 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
402 /* Only support v4.0+? */
403 if (ctrl->nand_version < 0x0400) {
404 dev_err(ctrl->dev, "version %#x not supported\n",
409 /* Register offsets */
410 if (ctrl->nand_version >= 0x0600)
411 ctrl->reg_offsets = brcmnand_regs_v60;
412 else if (ctrl->nand_version >= 0x0500)
413 ctrl->reg_offsets = brcmnand_regs_v50;
414 else if (ctrl->nand_version >= 0x0400)
415 ctrl->reg_offsets = brcmnand_regs_v40;
417 /* Chip-select stride */
418 if (ctrl->nand_version >= 0x0701)
419 ctrl->reg_spacing = 0x14;
421 ctrl->reg_spacing = 0x10;
423 /* Per chip-select registers */
424 if (ctrl->nand_version >= 0x0701) {
425 ctrl->cs_offsets = brcmnand_cs_offsets_v71;
427 ctrl->cs_offsets = brcmnand_cs_offsets;
429 /* v5.0 and earlier has a different CS0 offset layout */
430 if (ctrl->nand_version <= 0x0500)
431 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
434 /* Page / block sizes */
435 if (ctrl->nand_version >= 0x0701) {
436 /* >= v7.1 use nice power-of-2 values! */
437 ctrl->max_page_size = 16 * 1024;
438 ctrl->max_block_size = 2 * 1024 * 1024;
440 ctrl->page_sizes = page_sizes;
441 if (ctrl->nand_version >= 0x0600)
442 ctrl->block_sizes = block_sizes_v6;
444 ctrl->block_sizes = block_sizes_v4;
446 if (ctrl->nand_version < 0x0400) {
447 ctrl->max_page_size = 4096;
448 ctrl->max_block_size = 512 * 1024;
452 /* Maximum spare area sector size (per 512B) */
453 if (ctrl->nand_version >= 0x0600)
455 else if (ctrl->nand_version >= 0x0500)
460 /* v6.0 and newer (except v6.1) have prefetch support */
461 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
462 ctrl->features |= BRCMNAND_HAS_PREFETCH;
465 * v6.x has cache mode, but it's implemented differently. Ignore it for
468 if (ctrl->nand_version >= 0x0700)
469 ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
471 if (ctrl->nand_version >= 0x0500)
472 ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
474 if (ctrl->nand_version >= 0x0700)
475 ctrl->features |= BRCMNAND_HAS_WP;
476 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
477 ctrl->features |= BRCMNAND_HAS_WP;
482 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
483 enum brcmnand_reg reg)
485 u16 offs = ctrl->reg_offsets[reg];
488 return nand_readreg(ctrl, offs);
493 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
494 enum brcmnand_reg reg, u32 val)
496 u16 offs = ctrl->reg_offsets[reg];
499 nand_writereg(ctrl, offs, val);
502 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
503 enum brcmnand_reg reg, u32 mask, unsigned
506 u32 tmp = brcmnand_read_reg(ctrl, reg);
510 brcmnand_write_reg(ctrl, reg, tmp);
513 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
515 return __raw_readl(ctrl->nand_fc + word * 4);
518 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
521 __raw_writel(val, ctrl->nand_fc + word * 4);
524 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
525 enum brcmnand_cs_reg reg)
527 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
528 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
531 if (cs == 0 && ctrl->cs0_offsets)
532 cs_offs = ctrl->cs0_offsets[reg];
534 cs_offs = ctrl->cs_offsets[reg];
537 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
539 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
542 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
544 if (ctrl->nand_version < 0x0600)
546 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
549 static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
551 struct brcmnand_controller *ctrl = host->ctrl;
552 unsigned int shift = 0, bits;
553 enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
556 if (ctrl->nand_version >= 0x0600)
558 else if (ctrl->nand_version >= 0x0500)
563 if (ctrl->nand_version >= 0x0600) {
565 reg = BRCMNAND_CORR_THRESHOLD_EXT;
566 shift = (cs % 5) * bits;
568 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
571 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
573 if (ctrl->nand_version < 0x0700)
578 /***********************************************************************
579 * NAND ACC CONTROL bitfield
581 * Some bits have remained constant throughout hardware revision, while
582 * others have shifted around.
583 ***********************************************************************/
585 /* Constant for all versions (where supported) */
587 /* See BRCMNAND_HAS_CACHE_MODE */
588 ACC_CONTROL_CACHE_MODE = BIT(22),
590 /* See BRCMNAND_HAS_PREFETCH */
591 ACC_CONTROL_PREFETCH = BIT(23),
593 ACC_CONTROL_PAGE_HIT = BIT(24),
594 ACC_CONTROL_WR_PREEMPT = BIT(25),
595 ACC_CONTROL_PARTIAL_PAGE = BIT(26),
596 ACC_CONTROL_RD_ERASED = BIT(27),
597 ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
598 ACC_CONTROL_WR_ECC = BIT(30),
599 ACC_CONTROL_RD_ECC = BIT(31),
602 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
604 if (ctrl->nand_version >= 0x0600)
605 return GENMASK(6, 0);
607 return GENMASK(5, 0);
610 #define NAND_ACC_CONTROL_ECC_SHIFT 16
612 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
614 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
616 return mask << NAND_ACC_CONTROL_ECC_SHIFT;
619 static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
621 struct brcmnand_controller *ctrl = host->ctrl;
622 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
623 u32 acc_control = nand_readreg(ctrl, offs);
624 u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
627 acc_control |= ecc_flags; /* enable RD/WR ECC */
628 acc_control |= host->hwcfg.ecc_level
629 << NAND_ACC_CONTROL_ECC_SHIFT;
631 acc_control &= ~ecc_flags; /* disable RD/WR ECC */
632 acc_control &= ~brcmnand_ecc_level_mask(ctrl);
635 nand_writereg(ctrl, offs, acc_control);
638 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
640 if (ctrl->nand_version >= 0x0600)
642 else if (ctrl->nand_version >= 0x0500)
648 static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
650 struct brcmnand_controller *ctrl = host->ctrl;
651 int shift = brcmnand_sector_1k_shift(ctrl);
652 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
653 BRCMNAND_CS_ACC_CONTROL);
658 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
661 static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
663 struct brcmnand_controller *ctrl = host->ctrl;
664 int shift = brcmnand_sector_1k_shift(ctrl);
665 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
666 BRCMNAND_CS_ACC_CONTROL);
672 tmp = nand_readreg(ctrl, acc_control_offs);
673 tmp &= ~(1 << shift);
674 tmp |= (!!val) << shift;
675 nand_writereg(ctrl, acc_control_offs, tmp);
678 /***********************************************************************
680 ***********************************************************************/
683 CS_SELECT_NAND_WP = BIT(29),
684 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
687 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
689 u32 val = en ? CS_SELECT_NAND_WP : 0;
691 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
694 /***********************************************************************
696 ***********************************************************************/
699 FLASH_DMA_REVISION = 0x00,
700 FLASH_DMA_FIRST_DESC = 0x04,
701 FLASH_DMA_FIRST_DESC_EXT = 0x08,
702 FLASH_DMA_CTRL = 0x0c,
703 FLASH_DMA_MODE = 0x10,
704 FLASH_DMA_STATUS = 0x14,
705 FLASH_DMA_INTERRUPT_DESC = 0x18,
706 FLASH_DMA_INTERRUPT_DESC_EXT = 0x1c,
707 FLASH_DMA_ERROR_STATUS = 0x20,
708 FLASH_DMA_CURRENT_DESC = 0x24,
709 FLASH_DMA_CURRENT_DESC_EXT = 0x28,
712 static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
714 return ctrl->flash_dma_base;
717 static inline bool flash_dma_buf_ok(const void *buf)
719 return buf && !is_vmalloc_addr(buf) &&
720 likely(IS_ALIGNED((uintptr_t)buf, 4));
723 static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
726 brcmnand_writel(val, ctrl->flash_dma_base + offs);
729 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
731 return brcmnand_readl(ctrl->flash_dma_base + offs);
734 /* Low-level operation types: command, address, write, or read */
735 enum brcmnand_llop_type {
742 /***********************************************************************
743 * Internal support functions
744 ***********************************************************************/
746 static inline bool is_hamming_ecc(struct brcmnand_cfg *cfg)
748 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
749 cfg->ecc_level == 15;
753 * Returns a nand_ecclayout strucutre for the given layout/configuration.
754 * Returns NULL on failure.
756 static struct nand_ecclayout *brcmnand_create_layout(int ecc_level,
757 struct brcmnand_host *host)
759 struct brcmnand_cfg *cfg = &host->hwcfg;
761 struct nand_ecclayout *layout;
767 layout = devm_kzalloc(&host->pdev->dev, sizeof(*layout), GFP_KERNEL);
771 sectors = cfg->page_size / (512 << cfg->sector_size_1k);
772 sas = cfg->spare_area_size << cfg->sector_size_1k;
775 if (is_hamming_ecc(cfg)) {
776 for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
777 /* First sector of each page may have BBI */
779 layout->oobfree[idx2].offset = i * sas + 1;
780 /* Small-page NAND use byte 6 for BBI */
781 if (cfg->page_size == 512)
782 layout->oobfree[idx2].offset--;
783 layout->oobfree[idx2].length = 5;
785 layout->oobfree[idx2].offset = i * sas;
786 layout->oobfree[idx2].length = 6;
789 layout->eccpos[idx1++] = i * sas + 6;
790 layout->eccpos[idx1++] = i * sas + 7;
791 layout->eccpos[idx1++] = i * sas + 8;
792 layout->oobfree[idx2].offset = i * sas + 9;
793 layout->oobfree[idx2].length = 7;
795 /* Leave zero-terminated entry for OOBFREE */
796 if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
797 idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
804 * CONTROLLER_VERSION:
805 * < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
806 * >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
807 * But we will just be conservative.
809 req = DIV_ROUND_UP(ecc_level * 14, 8);
811 dev_err(&host->pdev->dev,
812 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
817 layout->eccbytes = req * sectors;
818 for (i = 0, idx1 = 0, idx2 = 0; i < sectors; i++) {
819 for (j = sas - req; j < sas && idx1 <
820 MTD_MAX_ECCPOS_ENTRIES_LARGE; j++, idx1++)
821 layout->eccpos[idx1] = i * sas + j;
823 /* First sector of each page may have BBI */
825 if (cfg->page_size == 512 && (sas - req >= 6)) {
826 /* Small-page NAND use byte 6 for BBI */
827 layout->oobfree[idx2].offset = 0;
828 layout->oobfree[idx2].length = 5;
831 layout->oobfree[idx2].offset = 6;
832 layout->oobfree[idx2].length =
836 } else if (sas > req + 1) {
837 layout->oobfree[idx2].offset = i * sas + 1;
838 layout->oobfree[idx2].length = sas - req - 1;
841 } else if (sas > req) {
842 layout->oobfree[idx2].offset = i * sas;
843 layout->oobfree[idx2].length = sas - req;
846 /* Leave zero-terminated entry for OOBFREE */
847 if (idx1 >= MTD_MAX_ECCPOS_ENTRIES_LARGE ||
848 idx2 >= MTD_MAX_OOBFREE_ENTRIES_LARGE - 1)
852 /* Sum available OOB */
853 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES_LARGE; i++)
854 layout->oobavail += layout->oobfree[i].length;
858 static struct nand_ecclayout *brcmstb_choose_ecc_layout(
859 struct brcmnand_host *host)
861 struct nand_ecclayout *layout;
862 struct brcmnand_cfg *p = &host->hwcfg;
863 unsigned int ecc_level = p->ecc_level;
865 if (p->sector_size_1k)
868 layout = brcmnand_create_layout(ecc_level, host);
870 dev_err(&host->pdev->dev,
871 "no proper ecc_layout for this NAND cfg\n");
878 static void brcmnand_wp(struct mtd_info *mtd, int wp)
880 struct nand_chip *chip = mtd_to_nand(mtd);
881 struct brcmnand_host *host = chip->priv;
882 struct brcmnand_controller *ctrl = host->ctrl;
884 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
885 static int old_wp = -1;
888 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
891 brcmnand_set_wp(ctrl, wp);
895 /* Helper functions for reading and writing OOB registers */
896 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
898 u16 offset0, offset10, reg_offs;
900 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
901 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
903 if (offs >= ctrl->max_oob)
906 if (offs >= 16 && offset10)
907 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
909 reg_offs = offset0 + (offs & ~0x03);
911 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
914 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
917 u16 offset0, offset10, reg_offs;
919 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
920 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
922 if (offs >= ctrl->max_oob)
925 if (offs >= 16 && offset10)
926 reg_offs = offset10 + ((offs - 0x10) & ~0x03);
928 reg_offs = offset0 + (offs & ~0x03);
930 nand_writereg(ctrl, reg_offs, data);
934 * read_oob_from_regs - read data from OOB registers
935 * @ctrl: NAND controller
936 * @i: sub-page sector index
937 * @oob: buffer to read to
938 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
939 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
941 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
942 int sas, int sector_1k)
944 int tbytes = sas << sector_1k;
947 /* Adjust OOB values for 1K sector size */
948 if (sector_1k && (i & 0x01))
949 tbytes = max(0, tbytes - (int)ctrl->max_oob);
950 tbytes = min_t(int, tbytes, ctrl->max_oob);
952 for (j = 0; j < tbytes; j++)
953 oob[j] = oob_reg_read(ctrl, j);
958 * write_oob_to_regs - write data to OOB registers
959 * @i: sub-page sector index
960 * @oob: buffer to write from
961 * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
962 * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
964 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
965 const u8 *oob, int sas, int sector_1k)
967 int tbytes = sas << sector_1k;
970 /* Adjust OOB values for 1K sector size */
971 if (sector_1k && (i & 0x01))
972 tbytes = max(0, tbytes - (int)ctrl->max_oob);
973 tbytes = min_t(int, tbytes, ctrl->max_oob);
975 for (j = 0; j < tbytes; j += 4)
976 oob_reg_write(ctrl, j,
984 static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
986 struct brcmnand_controller *ctrl = data;
988 /* Discard all NAND_CTLRDY interrupts during DMA */
989 if (ctrl->dma_pending)
992 complete(&ctrl->done);
996 /* Handle SoC-specific interrupt hardware */
997 static irqreturn_t brcmnand_irq(int irq, void *data)
999 struct brcmnand_controller *ctrl = data;
1001 if (ctrl->soc->ctlrdy_ack(ctrl->soc))
1002 return brcmnand_ctlrdy_irq(irq, data);
1007 static irqreturn_t brcmnand_dma_irq(int irq, void *data)
1009 struct brcmnand_controller *ctrl = data;
1011 complete(&ctrl->dma_done);
1016 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
1018 struct brcmnand_controller *ctrl = host->ctrl;
1021 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
1022 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
1023 BUG_ON(ctrl->cmd_pending != 0);
1024 ctrl->cmd_pending = cmd;
1026 intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1027 BUG_ON(!(intfc & INTFC_CTLR_READY));
1029 mb(); /* flush previous writes */
1030 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
1031 cmd << brcmnand_cmd_shift(ctrl));
1034 /***********************************************************************
1035 * NAND MTD API: read/program/erase
1036 ***********************************************************************/
1038 static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
1041 /* intentionally left blank */
1044 static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1046 struct nand_chip *chip = mtd_to_nand(mtd);
1047 struct brcmnand_host *host = chip->priv;
1048 struct brcmnand_controller *ctrl = host->ctrl;
1049 unsigned long timeo = msecs_to_jiffies(100);
1051 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
1052 if (ctrl->cmd_pending &&
1053 wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
1054 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
1055 >> brcmnand_cmd_shift(ctrl);
1057 dev_err_ratelimited(ctrl->dev,
1058 "timeout waiting for command %#02x\n", cmd);
1059 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
1060 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
1062 ctrl->cmd_pending = 0;
1063 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1072 LLOP_RETURN_IDLE = BIT(31),
1074 LLOP_DATA_MASK = GENMASK(15, 0),
1077 static int brcmnand_low_level_op(struct brcmnand_host *host,
1078 enum brcmnand_llop_type type, u32 data,
1081 struct mtd_info *mtd = &host->mtd;
1082 struct nand_chip *chip = &host->chip;
1083 struct brcmnand_controller *ctrl = host->ctrl;
1086 tmp = data & LLOP_DATA_MASK;
1089 tmp |= LLOP_WE | LLOP_CLE;
1093 tmp |= LLOP_WE | LLOP_ALE;
1106 tmp |= LLOP_RETURN_IDLE;
1108 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
1110 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
1111 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
1113 brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
1114 return brcmnand_waitfunc(mtd, chip);
1117 static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
1118 int column, int page_addr)
1120 struct nand_chip *chip = mtd_to_nand(mtd);
1121 struct brcmnand_host *host = chip->priv;
1122 struct brcmnand_controller *ctrl = host->ctrl;
1123 u64 addr = (u64)page_addr << chip->page_shift;
1126 if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
1127 command == NAND_CMD_RNDOUT)
1129 /* Avoid propagating a negative, don't-care address */
1130 else if (page_addr < 0)
1133 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
1134 (unsigned long long)addr);
1136 host->last_cmd = command;
1137 host->last_byte = 0;
1138 host->last_addr = addr;
1141 case NAND_CMD_RESET:
1142 native_cmd = CMD_FLASH_RESET;
1144 case NAND_CMD_STATUS:
1145 native_cmd = CMD_STATUS_READ;
1147 case NAND_CMD_READID:
1148 native_cmd = CMD_DEVICE_ID_READ;
1150 case NAND_CMD_READOOB:
1151 native_cmd = CMD_SPARE_AREA_READ;
1153 case NAND_CMD_ERASE1:
1154 native_cmd = CMD_BLOCK_ERASE;
1155 brcmnand_wp(mtd, 0);
1157 case NAND_CMD_PARAM:
1158 native_cmd = CMD_PARAMETER_READ;
1160 case NAND_CMD_SET_FEATURES:
1161 case NAND_CMD_GET_FEATURES:
1162 brcmnand_low_level_op(host, LL_OP_CMD, command, false);
1163 brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
1165 case NAND_CMD_RNDOUT:
1166 native_cmd = CMD_PARAMETER_CHANGE_COL;
1167 addr &= ~((u64)(FC_BYTES - 1));
1169 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
1170 * NB: hwcfg.sector_size_1k may not be initialized yet
1172 if (brcmnand_get_sector_size_1k(host)) {
1173 host->hwcfg.sector_size_1k =
1174 brcmnand_get_sector_size_1k(host);
1175 brcmnand_set_sector_size_1k(host, 0);
1183 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1184 (host->cs << 16) | ((addr >> 32) & 0xffff));
1185 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1186 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
1187 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1189 brcmnand_send_cmd(host, native_cmd);
1190 brcmnand_waitfunc(mtd, chip);
1192 if (native_cmd == CMD_PARAMETER_READ ||
1193 native_cmd == CMD_PARAMETER_CHANGE_COL) {
1194 /* Copy flash cache word-wise */
1195 u32 *flash_cache = (u32 *)ctrl->flash_cache;
1198 brcmnand_soc_data_bus_prepare(ctrl->soc);
1201 * Must cache the FLASH_CACHE now, since changes in
1202 * SECTOR_SIZE_1K may invalidate it
1204 for (i = 0; i < FC_WORDS; i++)
1206 * Flash cache is big endian for parameter pages, at
1209 flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
1211 brcmnand_soc_data_bus_unprepare(ctrl->soc);
1213 /* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
1214 if (host->hwcfg.sector_size_1k)
1215 brcmnand_set_sector_size_1k(host,
1216 host->hwcfg.sector_size_1k);
1219 /* Re-enable protection is necessary only after erase */
1220 if (command == NAND_CMD_ERASE1)
1221 brcmnand_wp(mtd, 1);
1224 static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
1226 struct nand_chip *chip = mtd_to_nand(mtd);
1227 struct brcmnand_host *host = chip->priv;
1228 struct brcmnand_controller *ctrl = host->ctrl;
1232 switch (host->last_cmd) {
1233 case NAND_CMD_READID:
1234 if (host->last_byte < 4)
1235 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
1236 (24 - (host->last_byte << 3));
1237 else if (host->last_byte < 8)
1238 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
1239 (56 - (host->last_byte << 3));
1242 case NAND_CMD_READOOB:
1243 ret = oob_reg_read(ctrl, host->last_byte);
1246 case NAND_CMD_STATUS:
1247 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
1249 if (wp_on) /* hide WP status */
1250 ret |= NAND_STATUS_WP;
1253 case NAND_CMD_PARAM:
1254 case NAND_CMD_RNDOUT:
1255 addr = host->last_addr + host->last_byte;
1256 offs = addr & (FC_BYTES - 1);
1258 /* At FC_BYTES boundary, switch to next column */
1259 if (host->last_byte > 0 && offs == 0)
1260 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
1262 ret = ctrl->flash_cache[offs];
1264 case NAND_CMD_GET_FEATURES:
1265 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
1268 bool last = host->last_byte ==
1269 ONFI_SUBFEATURE_PARAM_LEN - 1;
1270 brcmnand_low_level_op(host, LL_OP_RD, 0, last);
1271 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
1275 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
1281 static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1285 for (i = 0; i < len; i++, buf++)
1286 *buf = brcmnand_read_byte(mtd);
1289 static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
1293 struct nand_chip *chip = mtd_to_nand(mtd);
1294 struct brcmnand_host *host = chip->priv;
1296 switch (host->last_cmd) {
1297 case NAND_CMD_SET_FEATURES:
1298 for (i = 0; i < len; i++)
1299 brcmnand_low_level_op(host, LL_OP_WR, buf[i],
1309 * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
1310 * following ahead of time:
1311 * - Is this descriptor the beginning or end of a linked list?
1312 * - What is the (DMA) address of the next descriptor in the linked list?
1314 static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
1315 struct brcm_nand_dma_desc *desc, u64 addr,
1316 dma_addr_t buf, u32 len, u8 dma_cmd,
1317 bool begin, bool end,
1318 dma_addr_t next_desc)
1320 memset(desc, 0, sizeof(*desc));
1321 /* Descriptors are written in native byte order (wordwise) */
1322 desc->next_desc = lower_32_bits(next_desc);
1323 desc->next_desc_ext = upper_32_bits(next_desc);
1324 desc->cmd_irq = (dma_cmd << 24) |
1325 (end ? (0x03 << 8) : 0) | /* IRQ | STOP */
1326 (!!begin) | ((!!end) << 1); /* head, tail */
1327 #ifdef CONFIG_CPU_BIG_ENDIAN
1328 desc->cmd_irq |= 0x01 << 12;
1330 desc->dram_addr = lower_32_bits(buf);
1331 desc->dram_addr_ext = upper_32_bits(buf);
1332 desc->tfr_len = len;
1333 desc->total_len = len;
1334 desc->flash_addr = lower_32_bits(addr);
1335 desc->flash_addr_ext = upper_32_bits(addr);
1336 desc->cs = host->cs;
1337 desc->status_valid = 0x01;
1342 * Kick the FLASH_DMA engine, with a given DMA descriptor
1344 static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
1346 struct brcmnand_controller *ctrl = host->ctrl;
1347 unsigned long timeo = msecs_to_jiffies(100);
1349 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
1350 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
1351 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
1352 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
1354 /* Start FLASH_DMA engine */
1355 ctrl->dma_pending = true;
1356 mb(); /* flush previous writes */
1357 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
1359 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
1361 "timeout waiting for DMA; status %#x, error status %#x\n",
1362 flash_dma_readl(ctrl, FLASH_DMA_STATUS),
1363 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
1365 ctrl->dma_pending = false;
1366 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
1369 static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
1370 u32 len, u8 dma_cmd)
1372 struct brcmnand_controller *ctrl = host->ctrl;
1374 int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1376 buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
1377 if (dma_mapping_error(ctrl->dev, buf_pa)) {
1378 dev_err(ctrl->dev, "unable to map buffer for DMA\n");
1382 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
1383 dma_cmd, true, true, 0);
1385 brcmnand_dma_run(host, ctrl->dma_pa);
1387 dma_unmap_single(ctrl->dev, buf_pa, len, dir);
1389 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
1391 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
1398 * Assumes proper CS is already set
1400 static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
1401 u64 addr, unsigned int trans, u32 *buf,
1402 u8 *oob, u64 *err_addr)
1404 struct brcmnand_host *host = chip->priv;
1405 struct brcmnand_controller *ctrl = host->ctrl;
1408 /* Clear error addresses */
1409 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
1410 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
1411 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
1412 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
1414 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1415 (host->cs << 16) | ((addr >> 32) & 0xffff));
1416 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1418 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1419 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1420 lower_32_bits(addr));
1421 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1422 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
1423 brcmnand_send_cmd(host, CMD_PAGE_READ);
1424 brcmnand_waitfunc(mtd, chip);
1427 brcmnand_soc_data_bus_prepare(ctrl->soc);
1429 for (j = 0; j < FC_WORDS; j++, buf++)
1430 *buf = brcmnand_read_fc(ctrl, j);
1432 brcmnand_soc_data_bus_unprepare(ctrl->soc);
1436 oob += read_oob_from_regs(ctrl, i, oob,
1437 mtd->oobsize / trans,
1438 host->hwcfg.sector_size_1k);
1441 *err_addr = brcmnand_read_reg(ctrl,
1442 BRCMNAND_UNCORR_ADDR) |
1443 ((u64)(brcmnand_read_reg(ctrl,
1444 BRCMNAND_UNCORR_EXT_ADDR)
1451 *err_addr = brcmnand_read_reg(ctrl,
1452 BRCMNAND_CORR_ADDR) |
1453 ((u64)(brcmnand_read_reg(ctrl,
1454 BRCMNAND_CORR_EXT_ADDR)
1464 static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
1465 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
1467 struct brcmnand_host *host = chip->priv;
1468 struct brcmnand_controller *ctrl = host->ctrl;
1472 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
1474 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
1476 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1477 err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
1480 if (mtd_is_bitflip_or_eccerr(err))
1487 memset(oob, 0x99, mtd->oobsize);
1489 err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
1493 if (mtd_is_eccerr(err)) {
1494 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
1495 (unsigned long long)err_addr);
1496 mtd->ecc_stats.failed++;
1497 /* NAND layer expects zero on ECC errors */
1501 if (mtd_is_bitflip(err)) {
1502 unsigned int corrected = brcmnand_count_corrected(ctrl);
1504 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
1505 (unsigned long long)err_addr);
1506 mtd->ecc_stats.corrected += corrected;
1507 /* Always exceed the software-imposed threshold */
1508 return max(mtd->bitflip_threshold, corrected);
1514 static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1515 uint8_t *buf, int oob_required, int page)
1517 struct brcmnand_host *host = chip->priv;
1518 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1520 return brcmnand_read(mtd, chip, host->last_addr,
1521 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1524 static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1525 uint8_t *buf, int oob_required, int page)
1527 struct brcmnand_host *host = chip->priv;
1528 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
1531 brcmnand_set_ecc_enabled(host, 0);
1532 ret = brcmnand_read(mtd, chip, host->last_addr,
1533 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
1534 brcmnand_set_ecc_enabled(host, 1);
1538 static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1541 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1542 mtd->writesize >> FC_SHIFT,
1543 NULL, (u8 *)chip->oob_poi);
1546 static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1549 struct brcmnand_host *host = chip->priv;
1551 brcmnand_set_ecc_enabled(host, 0);
1552 brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
1553 mtd->writesize >> FC_SHIFT,
1554 NULL, (u8 *)chip->oob_poi);
1555 brcmnand_set_ecc_enabled(host, 1);
1559 static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
1560 u64 addr, const u32 *buf, u8 *oob)
1562 struct brcmnand_host *host = chip->priv;
1563 struct brcmnand_controller *ctrl = host->ctrl;
1564 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
1565 int status, ret = 0;
1567 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
1569 if (unlikely((unsigned long)buf & 0x03)) {
1570 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
1571 buf = (u32 *)((unsigned long)buf & ~0x03);
1574 brcmnand_wp(mtd, 0);
1576 for (i = 0; i < ctrl->max_oob; i += 4)
1577 oob_reg_write(ctrl, i, 0xffffffff);
1579 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
1580 if (brcmnand_dma_trans(host, addr, (u32 *)buf,
1581 mtd->writesize, CMD_PROGRAM_PAGE))
1586 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
1587 (host->cs << 16) | ((addr >> 32) & 0xffff));
1588 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
1590 for (i = 0; i < trans; i++, addr += FC_BYTES) {
1591 /* full address MUST be set before populating FC */
1592 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
1593 lower_32_bits(addr));
1594 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
1597 brcmnand_soc_data_bus_prepare(ctrl->soc);
1599 for (j = 0; j < FC_WORDS; j++, buf++)
1600 brcmnand_write_fc(ctrl, j, *buf);
1602 brcmnand_soc_data_bus_unprepare(ctrl->soc);
1604 for (j = 0; j < FC_WORDS; j++)
1605 brcmnand_write_fc(ctrl, j, 0xffffffff);
1609 oob += write_oob_to_regs(ctrl, i, oob,
1610 mtd->oobsize / trans,
1611 host->hwcfg.sector_size_1k);
1614 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
1615 brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
1616 status = brcmnand_waitfunc(mtd, chip);
1618 if (status & NAND_STATUS_FAIL) {
1619 dev_info(ctrl->dev, "program failed at %llx\n",
1620 (unsigned long long)addr);
1626 brcmnand_wp(mtd, 1);
1630 static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1631 const uint8_t *buf, int oob_required, int page)
1633 struct brcmnand_host *host = chip->priv;
1634 void *oob = oob_required ? chip->oob_poi : NULL;
1636 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
1640 static int brcmnand_write_page_raw(struct mtd_info *mtd,
1641 struct nand_chip *chip, const uint8_t *buf,
1642 int oob_required, int page)
1644 struct brcmnand_host *host = chip->priv;
1645 void *oob = oob_required ? chip->oob_poi : NULL;
1647 brcmnand_set_ecc_enabled(host, 0);
1648 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
1649 brcmnand_set_ecc_enabled(host, 1);
1653 static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1656 return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
1657 NULL, chip->oob_poi);
1660 static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1663 struct brcmnand_host *host = chip->priv;
1666 brcmnand_set_ecc_enabled(host, 0);
1667 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
1668 (u8 *)chip->oob_poi);
1669 brcmnand_set_ecc_enabled(host, 1);
1674 /***********************************************************************
1675 * Per-CS setup (1 NAND device)
1676 ***********************************************************************/
1678 static int brcmnand_set_cfg(struct brcmnand_host *host,
1679 struct brcmnand_cfg *cfg)
1681 struct brcmnand_controller *ctrl = host->ctrl;
1682 struct nand_chip *chip = &host->chip;
1683 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
1684 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
1685 BRCMNAND_CS_CFG_EXT);
1686 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
1687 BRCMNAND_CS_ACC_CONTROL);
1688 u8 block_size = 0, page_size = 0, device_size = 0;
1691 if (ctrl->block_sizes) {
1694 for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
1695 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
1700 dev_warn(ctrl->dev, "invalid block size %u\n",
1705 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
1708 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
1709 cfg->block_size > ctrl->max_block_size)) {
1710 dev_warn(ctrl->dev, "invalid block size %u\n",
1715 if (ctrl->page_sizes) {
1718 for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
1719 if (ctrl->page_sizes[i] == cfg->page_size) {
1724 dev_warn(ctrl->dev, "invalid page size %u\n",
1729 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
1732 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
1733 cfg->page_size > ctrl->max_page_size)) {
1734 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
1738 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
1739 dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
1740 (unsigned long long)cfg->device_size);
1743 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
1745 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
1746 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
1747 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
1748 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
1749 (device_size << CFG_DEVICE_SIZE_SHIFT);
1750 if (cfg_offs == cfg_ext_offs) {
1751 tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
1752 (block_size << CFG_BLK_SIZE_SHIFT);
1753 nand_writereg(ctrl, cfg_offs, tmp);
1755 nand_writereg(ctrl, cfg_offs, tmp);
1756 tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
1757 (block_size << CFG_EXT_BLK_SIZE_SHIFT);
1758 nand_writereg(ctrl, cfg_ext_offs, tmp);
1761 tmp = nand_readreg(ctrl, acc_control_offs);
1762 tmp &= ~brcmnand_ecc_level_mask(ctrl);
1763 tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
1764 tmp &= ~brcmnand_spare_area_mask(ctrl);
1765 tmp |= cfg->spare_area_size;
1766 nand_writereg(ctrl, acc_control_offs, tmp);
1768 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
1770 /* threshold = ceil(BCH-level * 0.75) */
1771 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
1776 static void brcmnand_print_cfg(char *buf, struct brcmnand_cfg *cfg)
1779 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
1780 (unsigned long long)cfg->device_size >> 20,
1781 cfg->block_size >> 10,
1782 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
1783 cfg->page_size >= 1024 ? "KiB" : "B",
1784 cfg->spare_area_size, cfg->device_width);
1786 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
1787 if (is_hamming_ecc(cfg))
1788 sprintf(buf, ", Hamming ECC");
1789 else if (cfg->sector_size_1k)
1790 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
1792 sprintf(buf, ", BCH-%u", cfg->ecc_level);
1796 * Minimum number of bytes to address a page. Calculated as:
1797 * roundup(log2(size / page-size) / 8)
1799 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
1800 * OK because many other things will break if 'size' is irregular...
1802 static inline int get_blk_adr_bytes(u64 size, u32 writesize)
1804 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
1807 static int brcmnand_setup_dev(struct brcmnand_host *host)
1809 struct mtd_info *mtd = &host->mtd;
1810 struct nand_chip *chip = &host->chip;
1811 struct brcmnand_controller *ctrl = host->ctrl;
1812 struct brcmnand_cfg *cfg = &host->hwcfg;
1814 u32 offs, tmp, oob_sector;
1817 memset(cfg, 0, sizeof(*cfg));
1819 ret = of_property_read_u32(nand_get_flash_node(chip),
1820 "brcm,nand-oob-sector-size",
1823 /* Use detected size */
1824 cfg->spare_area_size = mtd->oobsize /
1825 (mtd->writesize >> FC_SHIFT);
1827 cfg->spare_area_size = oob_sector;
1829 if (cfg->spare_area_size > ctrl->max_oob)
1830 cfg->spare_area_size = ctrl->max_oob;
1832 * Set oobsize to be consistent with controller's spare_area_size, as
1833 * the rest is inaccessible.
1835 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
1837 cfg->device_size = mtd->size;
1838 cfg->block_size = mtd->erasesize;
1839 cfg->page_size = mtd->writesize;
1840 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
1841 cfg->col_adr_bytes = 2;
1842 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
1844 switch (chip->ecc.size) {
1846 if (chip->ecc.strength == 1) /* Hamming */
1847 cfg->ecc_level = 15;
1849 cfg->ecc_level = chip->ecc.strength;
1850 cfg->sector_size_1k = 0;
1853 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
1854 dev_err(ctrl->dev, "1KB sectors not supported\n");
1857 if (chip->ecc.strength & 0x1) {
1859 "odd ECC not supported with 1KB sectors\n");
1863 cfg->ecc_level = chip->ecc.strength >> 1;
1864 cfg->sector_size_1k = 1;
1867 dev_err(ctrl->dev, "unsupported ECC size: %d\n",
1872 cfg->ful_adr_bytes = cfg->blk_adr_bytes;
1873 if (mtd->writesize > 512)
1874 cfg->ful_adr_bytes += cfg->col_adr_bytes;
1876 cfg->ful_adr_bytes += 1;
1878 ret = brcmnand_set_cfg(host, cfg);
1882 brcmnand_set_ecc_enabled(host, 1);
1884 brcmnand_print_cfg(msg, cfg);
1885 dev_info(ctrl->dev, "detected %s\n", msg);
1887 /* Configure ACC_CONTROL */
1888 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
1889 tmp = nand_readreg(ctrl, offs);
1890 tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
1891 tmp &= ~ACC_CONTROL_RD_ERASED;
1892 tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
1893 if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
1895 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
1898 if (has_flash_dma(ctrl))
1899 tmp &= ~ACC_CONTROL_PREFETCH;
1901 tmp |= ACC_CONTROL_PREFETCH;
1903 nand_writereg(ctrl, offs, tmp);
1908 static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
1910 struct brcmnand_controller *ctrl = host->ctrl;
1911 struct platform_device *pdev = host->pdev;
1912 struct mtd_info *mtd;
1913 struct nand_chip *chip;
1917 ret = of_property_read_u32(dn, "reg", &host->cs);
1919 dev_err(&pdev->dev, "can't get chip-select\n");
1926 nand_set_flash_node(chip, dn);
1929 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
1931 mtd->owner = THIS_MODULE;
1932 mtd->dev.parent = &pdev->dev;
1934 chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
1935 chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
1937 chip->cmd_ctrl = brcmnand_cmd_ctrl;
1938 chip->cmdfunc = brcmnand_cmdfunc;
1939 chip->waitfunc = brcmnand_waitfunc;
1940 chip->read_byte = brcmnand_read_byte;
1941 chip->read_buf = brcmnand_read_buf;
1942 chip->write_buf = brcmnand_write_buf;
1944 chip->ecc.mode = NAND_ECC_HW;
1945 chip->ecc.read_page = brcmnand_read_page;
1946 chip->ecc.write_page = brcmnand_write_page;
1947 chip->ecc.read_page_raw = brcmnand_read_page_raw;
1948 chip->ecc.write_page_raw = brcmnand_write_page_raw;
1949 chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
1950 chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
1951 chip->ecc.read_oob = brcmnand_read_oob;
1952 chip->ecc.write_oob = brcmnand_write_oob;
1954 chip->controller = &ctrl->controller;
1957 * The bootloader might have configured 16bit mode but
1958 * NAND READID command only works in 8bit mode. We force
1959 * 8bit mode here to ensure that NAND READID commands works.
1961 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
1962 nand_writereg(ctrl, cfg_offs,
1963 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
1965 if (nand_scan_ident(mtd, 1, NULL))
1968 chip->options |= NAND_NO_SUBPAGE_WRITE;
1970 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
1971 * to/from, and have nand_base pass us a bounce buffer instead, as
1974 chip->options |= NAND_USE_BOUNCE_BUFFER;
1976 if (of_get_nand_on_flash_bbt(dn))
1977 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1979 if (brcmnand_setup_dev(host))
1982 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
1983 /* only use our internal HW threshold */
1984 mtd->bitflip_threshold = 1;
1986 chip->ecc.layout = brcmstb_choose_ecc_layout(host);
1987 if (!chip->ecc.layout)
1990 if (nand_scan_tail(mtd))
1993 return mtd_device_register(mtd, NULL, 0);
1996 static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
1999 struct brcmnand_controller *ctrl = host->ctrl;
2000 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
2001 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
2002 BRCMNAND_CS_CFG_EXT);
2003 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
2004 BRCMNAND_CS_ACC_CONTROL);
2005 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
2006 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
2009 nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
2010 if (cfg_offs != cfg_ext_offs)
2011 nand_writereg(ctrl, cfg_ext_offs,
2012 host->hwcfg.config_ext);
2013 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
2014 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
2015 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
2017 host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
2018 if (cfg_offs != cfg_ext_offs)
2019 host->hwcfg.config_ext =
2020 nand_readreg(ctrl, cfg_ext_offs);
2021 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
2022 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
2023 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
2027 static int brcmnand_suspend(struct device *dev)
2029 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2030 struct brcmnand_host *host;
2032 list_for_each_entry(host, &ctrl->host_list, node)
2033 brcmnand_save_restore_cs_config(host, 0);
2035 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
2036 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
2037 ctrl->corr_stat_threshold =
2038 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
2040 if (has_flash_dma(ctrl))
2041 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
2046 static int brcmnand_resume(struct device *dev)
2048 struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
2049 struct brcmnand_host *host;
2051 if (has_flash_dma(ctrl)) {
2052 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
2053 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2056 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
2057 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
2058 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
2059 ctrl->corr_stat_threshold);
2061 /* Clear/re-enable interrupt */
2062 ctrl->soc->ctlrdy_ack(ctrl->soc);
2063 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2066 list_for_each_entry(host, &ctrl->host_list, node) {
2067 struct mtd_info *mtd = &host->mtd;
2068 struct nand_chip *chip = mtd_to_nand(mtd);
2070 brcmnand_save_restore_cs_config(host, 1);
2072 /* Reset the chip, required by some chips after power-up */
2073 chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
2079 const struct dev_pm_ops brcmnand_pm_ops = {
2080 .suspend = brcmnand_suspend,
2081 .resume = brcmnand_resume,
2083 EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
2085 static const struct of_device_id brcmnand_of_match[] = {
2086 { .compatible = "brcm,brcmnand-v4.0" },
2087 { .compatible = "brcm,brcmnand-v5.0" },
2088 { .compatible = "brcm,brcmnand-v6.0" },
2089 { .compatible = "brcm,brcmnand-v6.1" },
2090 { .compatible = "brcm,brcmnand-v7.0" },
2091 { .compatible = "brcm,brcmnand-v7.1" },
2094 MODULE_DEVICE_TABLE(of, brcmnand_of_match);
2096 /***********************************************************************
2097 * Platform driver setup (per controller)
2098 ***********************************************************************/
2100 int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
2102 struct device *dev = &pdev->dev;
2103 struct device_node *dn = dev->of_node, *child;
2104 struct brcmnand_controller *ctrl;
2105 struct resource *res;
2108 /* We only support device-tree instantiation */
2112 if (!of_match_node(brcmnand_of_match, dn))
2115 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
2119 dev_set_drvdata(dev, ctrl);
2122 init_completion(&ctrl->done);
2123 init_completion(&ctrl->dma_done);
2124 spin_lock_init(&ctrl->controller.lock);
2125 init_waitqueue_head(&ctrl->controller.wq);
2126 INIT_LIST_HEAD(&ctrl->host_list);
2128 /* NAND register range */
2129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2130 ctrl->nand_base = devm_ioremap_resource(dev, res);
2131 if (IS_ERR(ctrl->nand_base))
2132 return PTR_ERR(ctrl->nand_base);
2134 /* Enable clock before using NAND registers */
2135 ctrl->clk = devm_clk_get(dev, "nand");
2136 if (!IS_ERR(ctrl->clk)) {
2137 ret = clk_prepare_enable(ctrl->clk);
2141 ret = PTR_ERR(ctrl->clk);
2142 if (ret == -EPROBE_DEFER)
2148 /* Initialize NAND revision */
2149 ret = brcmnand_revision_init(ctrl);
2154 * Most chips have this cache at a fixed offset within 'nand' block.
2155 * Some must specify this region separately.
2157 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
2159 ctrl->nand_fc = devm_ioremap_resource(dev, res);
2160 if (IS_ERR(ctrl->nand_fc)) {
2161 ret = PTR_ERR(ctrl->nand_fc);
2165 ctrl->nand_fc = ctrl->nand_base +
2166 ctrl->reg_offsets[BRCMNAND_FC_BASE];
2170 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
2172 ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
2173 if (IS_ERR(ctrl->flash_dma_base)) {
2174 ret = PTR_ERR(ctrl->flash_dma_base);
2178 flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
2179 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
2181 /* Allocate descriptor(s) */
2182 ctrl->dma_desc = dmam_alloc_coherent(dev,
2183 sizeof(*ctrl->dma_desc),
2184 &ctrl->dma_pa, GFP_KERNEL);
2185 if (!ctrl->dma_desc) {
2190 ctrl->dma_irq = platform_get_irq(pdev, 1);
2191 if ((int)ctrl->dma_irq < 0) {
2192 dev_err(dev, "missing FLASH_DMA IRQ\n");
2197 ret = devm_request_irq(dev, ctrl->dma_irq,
2198 brcmnand_dma_irq, 0, DRV_NAME,
2201 dev_err(dev, "can't allocate IRQ %d: error %d\n",
2202 ctrl->dma_irq, ret);
2206 dev_info(dev, "enabling FLASH_DMA\n");
2209 /* Disable automatic device ID config, direct addressing */
2210 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
2211 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
2212 /* Disable XOR addressing */
2213 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
2215 if (ctrl->features & BRCMNAND_HAS_WP) {
2216 /* Permanently disable write protection */
2218 brcmnand_set_wp(ctrl, false);
2224 ctrl->irq = platform_get_irq(pdev, 0);
2225 if ((int)ctrl->irq < 0) {
2226 dev_err(dev, "no IRQ defined\n");
2232 * Some SoCs integrate this controller (e.g., its interrupt bits) in
2238 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
2241 /* Enable interrupt */
2242 ctrl->soc->ctlrdy_ack(ctrl->soc);
2243 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
2245 /* Use standard interrupt infrastructure */
2246 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
2250 dev_err(dev, "can't allocate IRQ %d: error %d\n",
2255 for_each_available_child_of_node(dn, child) {
2256 if (of_device_is_compatible(child, "brcm,nandcs")) {
2257 struct brcmnand_host *host;
2259 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
2268 ret = brcmnand_init_cs(host, child);
2270 devm_kfree(dev, host);
2271 continue; /* Try all chip-selects */
2274 list_add_tail(&host->node, &ctrl->host_list);
2278 /* No chip-selects could initialize properly */
2279 if (list_empty(&ctrl->host_list)) {
2287 clk_disable_unprepare(ctrl->clk);
2291 EXPORT_SYMBOL_GPL(brcmnand_probe);
2293 int brcmnand_remove(struct platform_device *pdev)
2295 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
2296 struct brcmnand_host *host;
2298 list_for_each_entry(host, &ctrl->host_list, node)
2299 nand_release(&host->mtd);
2301 clk_disable_unprepare(ctrl->clk);
2303 dev_set_drvdata(&pdev->dev, NULL);
2307 EXPORT_SYMBOL_GPL(brcmnand_remove);
2309 MODULE_LICENSE("GPL v2");
2310 MODULE_AUTHOR("Kevin Cernekee");
2311 MODULE_AUTHOR("Brian Norris");
2312 MODULE_DESCRIPTION("NAND driver for Broadcom chips");
2313 MODULE_ALIAS("platform:brcmnand");