]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mtd/nand/lpc32xx_mlc.c
Merge remote-tracking branch 'usb/usb-next'
[karo-tx-linux.git] / drivers / mtd / nand / lpc32xx_mlc.c
1 /*
2  * Driver for NAND MLC Controller in LPC32xx
3  *
4  * Author: Roland Stigge <stigge@antcom.de>
5  *
6  * Copyright © 2011 WORK Microwave GmbH
7  * Copyright © 2011, 2012 Roland Stigge
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  *
20  * NAND Flash Controller Operation:
21  * - Read: Auto Decode
22  * - Write: Auto Encode
23  * - Tested Page Sizes: 2048, 4096
24  */
25
26 #include <linux/slab.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/mtd/mtd.h>
30 #include <linux/mtd/nand.h>
31 #include <linux/mtd/partitions.h>
32 #include <linux/clk.h>
33 #include <linux/err.h>
34 #include <linux/delay.h>
35 #include <linux/completion.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_mtd.h>
39 #include <linux/of_gpio.h>
40 #include <linux/mtd/lpc32xx_mlc.h>
41 #include <linux/io.h>
42 #include <linux/mm.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/dmaengine.h>
45 #include <linux/mtd/nand_ecc.h>
46
47 #define DRV_NAME "lpc32xx_mlc"
48
49 /**********************************************************************
50 * MLC NAND controller register offsets
51 **********************************************************************/
52
53 #define MLC_BUFF(x)                     (x + 0x00000)
54 #define MLC_DATA(x)                     (x + 0x08000)
55 #define MLC_CMD(x)                      (x + 0x10000)
56 #define MLC_ADDR(x)                     (x + 0x10004)
57 #define MLC_ECC_ENC_REG(x)              (x + 0x10008)
58 #define MLC_ECC_DEC_REG(x)              (x + 0x1000C)
59 #define MLC_ECC_AUTO_ENC_REG(x)         (x + 0x10010)
60 #define MLC_ECC_AUTO_DEC_REG(x)         (x + 0x10014)
61 #define MLC_RPR(x)                      (x + 0x10018)
62 #define MLC_WPR(x)                      (x + 0x1001C)
63 #define MLC_RUBP(x)                     (x + 0x10020)
64 #define MLC_ROBP(x)                     (x + 0x10024)
65 #define MLC_SW_WP_ADD_LOW(x)            (x + 0x10028)
66 #define MLC_SW_WP_ADD_HIG(x)            (x + 0x1002C)
67 #define MLC_ICR(x)                      (x + 0x10030)
68 #define MLC_TIME_REG(x)                 (x + 0x10034)
69 #define MLC_IRQ_MR(x)                   (x + 0x10038)
70 #define MLC_IRQ_SR(x)                   (x + 0x1003C)
71 #define MLC_LOCK_PR(x)                  (x + 0x10044)
72 #define MLC_ISR(x)                      (x + 0x10048)
73 #define MLC_CEH(x)                      (x + 0x1004C)
74
75 /**********************************************************************
76 * MLC_CMD bit definitions
77 **********************************************************************/
78 #define MLCCMD_RESET                    0xFF
79
80 /**********************************************************************
81 * MLC_ICR bit definitions
82 **********************************************************************/
83 #define MLCICR_WPROT                    (1 << 3)
84 #define MLCICR_LARGEBLOCK               (1 << 2)
85 #define MLCICR_LONGADDR                 (1 << 1)
86 #define MLCICR_16BIT                    (1 << 0)  /* unsupported by LPC32x0! */
87
88 /**********************************************************************
89 * MLC_TIME_REG bit definitions
90 **********************************************************************/
91 #define MLCTIMEREG_TCEA_DELAY(n)        (((n) & 0x03) << 24)
92 #define MLCTIMEREG_BUSY_DELAY(n)        (((n) & 0x1F) << 19)
93 #define MLCTIMEREG_NAND_TA(n)           (((n) & 0x07) << 16)
94 #define MLCTIMEREG_RD_HIGH(n)           (((n) & 0x0F) << 12)
95 #define MLCTIMEREG_RD_LOW(n)            (((n) & 0x0F) << 8)
96 #define MLCTIMEREG_WR_HIGH(n)           (((n) & 0x0F) << 4)
97 #define MLCTIMEREG_WR_LOW(n)            (((n) & 0x0F) << 0)
98
99 /**********************************************************************
100 * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
101 **********************************************************************/
102 #define MLCIRQ_NAND_READY               (1 << 5)
103 #define MLCIRQ_CONTROLLER_READY         (1 << 4)
104 #define MLCIRQ_DECODE_FAILURE           (1 << 3)
105 #define MLCIRQ_DECODE_ERROR             (1 << 2)
106 #define MLCIRQ_ECC_READY                (1 << 1)
107 #define MLCIRQ_WRPROT_FAULT             (1 << 0)
108
109 /**********************************************************************
110 * MLC_LOCK_PR bit definitions
111 **********************************************************************/
112 #define MLCLOCKPR_MAGIC                 0xA25E
113
114 /**********************************************************************
115 * MLC_ISR bit definitions
116 **********************************************************************/
117 #define MLCISR_DECODER_FAILURE          (1 << 6)
118 #define MLCISR_ERRORS                   ((1 << 4) | (1 << 5))
119 #define MLCISR_ERRORS_DETECTED          (1 << 3)
120 #define MLCISR_ECC_READY                (1 << 2)
121 #define MLCISR_CONTROLLER_READY         (1 << 1)
122 #define MLCISR_NAND_READY               (1 << 0)
123
124 /**********************************************************************
125 * MLC_CEH bit definitions
126 **********************************************************************/
127 #define MLCCEH_NORMAL                   (1 << 0)
128
129 struct lpc32xx_nand_cfg_mlc {
130         uint32_t tcea_delay;
131         uint32_t busy_delay;
132         uint32_t nand_ta;
133         uint32_t rd_high;
134         uint32_t rd_low;
135         uint32_t wr_high;
136         uint32_t wr_low;
137         int wp_gpio;
138         struct mtd_partition *parts;
139         unsigned num_parts;
140 };
141
142 static struct nand_ecclayout lpc32xx_nand_oob = {
143         .eccbytes = 40,
144         .eccpos = { 6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
145                    22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
146                    38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
147                    54, 55, 56, 57, 58, 59, 60, 61, 62, 63 },
148         .oobfree = {
149                 { .offset = 0,
150                   .length = 6, },
151                 { .offset = 16,
152                   .length = 6, },
153                 { .offset = 32,
154                   .length = 6, },
155                 { .offset = 48,
156                   .length = 6, },
157                 },
158 };
159
160 static struct nand_bbt_descr lpc32xx_nand_bbt = {
161         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
162                    NAND_BBT_WRITE,
163         .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
164 };
165
166 static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
167         .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
168                    NAND_BBT_WRITE,
169         .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
170 };
171
172 struct lpc32xx_nand_host {
173         struct nand_chip        nand_chip;
174         struct lpc32xx_mlc_platform_data *pdata;
175         struct clk              *clk;
176         void __iomem            *io_base;
177         int                     irq;
178         struct lpc32xx_nand_cfg_mlc     *ncfg;
179         struct completion       comp_nand;
180         struct completion       comp_controller;
181         uint32_t llptr;
182         /*
183          * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
184          */
185         dma_addr_t              oob_buf_phy;
186         /*
187          * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
188          */
189         uint8_t                 *oob_buf;
190         /* Physical address of DMA base address */
191         dma_addr_t              io_base_phy;
192
193         struct completion       comp_dma;
194         struct dma_chan         *dma_chan;
195         struct dma_slave_config dma_slave_config;
196         struct scatterlist      sgl;
197         uint8_t                 *dma_buf;
198         uint8_t                 *dummy_buf;
199         int                     mlcsubpages; /* number of 512bytes-subpages */
200 };
201
202 /*
203  * Activate/Deactivate DMA Operation:
204  *
205  * Using the PL080 DMA Controller for transferring the 512 byte subpages
206  * instead of doing readl() / writel() in a loop slows it down significantly.
207  * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
208  *
209  * - readl() of 128 x 32 bits in a loop: ~20us
210  * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
211  * - DMA read of 512 bytes (32 bit, no bursts): ~100us
212  *
213  * This applies to the transfer itself. In the DMA case: only the
214  * wait_for_completion() (DMA setup _not_ included).
215  *
216  * Note that the 512 bytes subpage transfer is done directly from/to a
217  * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
218  * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
219  * controller transferring data between its internal buffer to/from the NAND
220  * chip.)
221  *
222  * Therefore, using the PL080 DMA is disabled by default, for now.
223  *
224  */
225 static int use_dma;
226
227 static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
228 {
229         uint32_t clkrate, tmp;
230
231         /* Reset MLC controller */
232         writel(MLCCMD_RESET, MLC_CMD(host->io_base));
233         udelay(1000);
234
235         /* Get base clock for MLC block */
236         clkrate = clk_get_rate(host->clk);
237         if (clkrate == 0)
238                 clkrate = 104000000;
239
240         /* Unlock MLC_ICR
241          * (among others, will be locked again automatically) */
242         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
243
244         /* Configure MLC Controller: Large Block, 5 Byte Address */
245         tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
246         writel(tmp, MLC_ICR(host->io_base));
247
248         /* Unlock MLC_TIME_REG
249          * (among others, will be locked again automatically) */
250         writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
251
252         /* Compute clock setup values, see LPC and NAND manual */
253         tmp = 0;
254         tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
255         tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
256         tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
257         tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
258         tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
259         tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
260         tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
261         writel(tmp, MLC_TIME_REG(host->io_base));
262
263         /* Enable IRQ for CONTROLLER_READY and NAND_READY */
264         writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
265                         MLC_IRQ_MR(host->io_base));
266
267         /* Normal nCE operation: nCE controlled by controller */
268         writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
269 }
270
271 /*
272  * Hardware specific access to control lines
273  */
274 static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
275                                   unsigned int ctrl)
276 {
277         struct nand_chip *nand_chip = mtd_to_nand(mtd);
278         struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
279
280         if (cmd != NAND_CMD_NONE) {
281                 if (ctrl & NAND_CLE)
282                         writel(cmd, MLC_CMD(host->io_base));
283                 else
284                         writel(cmd, MLC_ADDR(host->io_base));
285         }
286 }
287
288 /*
289  * Read Device Ready (NAND device _and_ controller ready)
290  */
291 static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
292 {
293         struct nand_chip *nand_chip = mtd_to_nand(mtd);
294         struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
295
296         if ((readb(MLC_ISR(host->io_base)) &
297              (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
298             (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
299                 return  1;
300
301         return 0;
302 }
303
304 static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
305 {
306         uint8_t sr;
307
308         /* Clear interrupt flag by reading status */
309         sr = readb(MLC_IRQ_SR(host->io_base));
310         if (sr & MLCIRQ_NAND_READY)
311                 complete(&host->comp_nand);
312         if (sr & MLCIRQ_CONTROLLER_READY)
313                 complete(&host->comp_controller);
314
315         return IRQ_HANDLED;
316 }
317
318 static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
319 {
320         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
321
322         if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
323                 goto exit;
324
325         wait_for_completion(&host->comp_nand);
326
327         while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
328                 /* Seems to be delayed sometimes by controller */
329                 dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
330                 cpu_relax();
331         }
332
333 exit:
334         return NAND_STATUS_READY;
335 }
336
337 static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
338                                        struct nand_chip *chip)
339 {
340         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
341
342         if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
343                 goto exit;
344
345         wait_for_completion(&host->comp_controller);
346
347         while (!(readb(MLC_ISR(host->io_base)) &
348                  MLCISR_CONTROLLER_READY)) {
349                 dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
350                 cpu_relax();
351         }
352
353 exit:
354         return NAND_STATUS_READY;
355 }
356
357 static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
358 {
359         lpc32xx_waitfunc_nand(mtd, chip);
360         lpc32xx_waitfunc_controller(mtd, chip);
361
362         return NAND_STATUS_READY;
363 }
364
365 /*
366  * Enable NAND write protect
367  */
368 static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
369 {
370         if (gpio_is_valid(host->ncfg->wp_gpio))
371                 gpio_set_value(host->ncfg->wp_gpio, 0);
372 }
373
374 /*
375  * Disable NAND write protect
376  */
377 static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
378 {
379         if (gpio_is_valid(host->ncfg->wp_gpio))
380                 gpio_set_value(host->ncfg->wp_gpio, 1);
381 }
382
383 static void lpc32xx_dma_complete_func(void *completion)
384 {
385         complete(completion);
386 }
387
388 static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
389                             enum dma_transfer_direction dir)
390 {
391         struct nand_chip *chip = mtd_to_nand(mtd);
392         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
393         struct dma_async_tx_descriptor *desc;
394         int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
395         int res;
396
397         sg_init_one(&host->sgl, mem, len);
398
399         res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
400                          DMA_BIDIRECTIONAL);
401         if (res != 1) {
402                 dev_err(mtd->dev.parent, "Failed to map sg list\n");
403                 return -ENXIO;
404         }
405         desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
406                                        flags);
407         if (!desc) {
408                 dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
409                 goto out1;
410         }
411
412         init_completion(&host->comp_dma);
413         desc->callback = lpc32xx_dma_complete_func;
414         desc->callback_param = &host->comp_dma;
415
416         dmaengine_submit(desc);
417         dma_async_issue_pending(host->dma_chan);
418
419         wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
420
421         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
422                      DMA_BIDIRECTIONAL);
423         return 0;
424 out1:
425         dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
426                      DMA_BIDIRECTIONAL);
427         return -ENXIO;
428 }
429
430 static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
431                              uint8_t *buf, int oob_required, int page)
432 {
433         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
434         int i, j;
435         uint8_t *oobbuf = chip->oob_poi;
436         uint32_t mlc_isr;
437         int res;
438         uint8_t *dma_buf;
439         bool dma_mapped;
440
441         if ((void *)buf <= high_memory) {
442                 dma_buf = buf;
443                 dma_mapped = true;
444         } else {
445                 dma_buf = host->dma_buf;
446                 dma_mapped = false;
447         }
448
449         /* Writing Command and Address */
450         chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
451
452         /* For all sub-pages */
453         for (i = 0; i < host->mlcsubpages; i++) {
454                 /* Start Auto Decode Command */
455                 writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
456
457                 /* Wait for Controller Ready */
458                 lpc32xx_waitfunc_controller(mtd, chip);
459
460                 /* Check ECC Error status */
461                 mlc_isr = readl(MLC_ISR(host->io_base));
462                 if (mlc_isr & MLCISR_DECODER_FAILURE) {
463                         mtd->ecc_stats.failed++;
464                         dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
465                 } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
466                         mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
467                 }
468
469                 /* Read 512 + 16 Bytes */
470                 if (use_dma) {
471                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
472                                                DMA_DEV_TO_MEM);
473                         if (res)
474                                 return res;
475                 } else {
476                         for (j = 0; j < (512 >> 2); j++) {
477                                 *((uint32_t *)(buf)) =
478                                         readl(MLC_BUFF(host->io_base));
479                                 buf += 4;
480                         }
481                 }
482                 for (j = 0; j < (16 >> 2); j++) {
483                         *((uint32_t *)(oobbuf)) =
484                                 readl(MLC_BUFF(host->io_base));
485                         oobbuf += 4;
486                 }
487         }
488
489         if (use_dma && !dma_mapped)
490                 memcpy(buf, dma_buf, mtd->writesize);
491
492         return 0;
493 }
494
495 static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
496                                        struct nand_chip *chip,
497                                        const uint8_t *buf, int oob_required,
498                                        int page)
499 {
500         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
501         const uint8_t *oobbuf = chip->oob_poi;
502         uint8_t *dma_buf = (uint8_t *)buf;
503         int res;
504         int i, j;
505
506         if (use_dma && (void *)buf >= high_memory) {
507                 dma_buf = host->dma_buf;
508                 memcpy(dma_buf, buf, mtd->writesize);
509         }
510
511         for (i = 0; i < host->mlcsubpages; i++) {
512                 /* Start Encode */
513                 writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
514
515                 /* Write 512 + 6 Bytes to Buffer */
516                 if (use_dma) {
517                         res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
518                                                DMA_MEM_TO_DEV);
519                         if (res)
520                                 return res;
521                 } else {
522                         for (j = 0; j < (512 >> 2); j++) {
523                                 writel(*((uint32_t *)(buf)),
524                                        MLC_BUFF(host->io_base));
525                                 buf += 4;
526                         }
527                 }
528                 writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
529                 oobbuf += 4;
530                 writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
531                 oobbuf += 12;
532
533                 /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
534                 writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
535
536                 /* Wait for Controller Ready */
537                 lpc32xx_waitfunc_controller(mtd, chip);
538         }
539         return 0;
540 }
541
542 static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
543                             int page)
544 {
545         struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
546
547         /* Read whole page - necessary with MLC controller! */
548         lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
549
550         return 0;
551 }
552
553 static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
554                               int page)
555 {
556         /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
557         return 0;
558 }
559
560 /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
561 static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
562 {
563         /* Always enabled! */
564 }
565
566 static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
567 {
568         struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
569         dma_cap_mask_t mask;
570
571         if (!host->pdata || !host->pdata->dma_filter) {
572                 dev_err(mtd->dev.parent, "no DMA platform data\n");
573                 return -ENOENT;
574         }
575
576         dma_cap_zero(mask);
577         dma_cap_set(DMA_SLAVE, mask);
578         host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
579                                              "nand-mlc");
580         if (!host->dma_chan) {
581                 dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
582                 return -EBUSY;
583         }
584
585         /*
586          * Set direction to a sensible value even if the dmaengine driver
587          * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
588          * driver criticizes it as "alien transfer direction".
589          */
590         host->dma_slave_config.direction = DMA_DEV_TO_MEM;
591         host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
592         host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
593         host->dma_slave_config.src_maxburst = 128;
594         host->dma_slave_config.dst_maxburst = 128;
595         /* DMA controller does flow control: */
596         host->dma_slave_config.device_fc = false;
597         host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
598         host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
599         if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
600                 dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
601                 goto out1;
602         }
603
604         return 0;
605 out1:
606         dma_release_channel(host->dma_chan);
607         return -ENXIO;
608 }
609
610 static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
611 {
612         struct lpc32xx_nand_cfg_mlc *ncfg;
613         struct device_node *np = dev->of_node;
614
615         ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
616         if (!ncfg)
617                 return NULL;
618
619         of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
620         of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
621         of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
622         of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
623         of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
624         of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
625         of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
626
627         if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
628             !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
629             !ncfg->wr_low) {
630                 dev_err(dev, "chip parameters not specified correctly\n");
631                 return NULL;
632         }
633
634         ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
635
636         return ncfg;
637 }
638
639 /*
640  * Probe for NAND controller
641  */
642 static int lpc32xx_nand_probe(struct platform_device *pdev)
643 {
644         struct lpc32xx_nand_host *host;
645         struct mtd_info *mtd;
646         struct nand_chip *nand_chip;
647         struct resource *rc;
648         int res;
649
650         /* Allocate memory for the device structure (and zero it) */
651         host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
652         if (!host)
653                 return -ENOMEM;
654
655         rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
656         host->io_base = devm_ioremap_resource(&pdev->dev, rc);
657         if (IS_ERR(host->io_base))
658                 return PTR_ERR(host->io_base);
659         
660         host->io_base_phy = rc->start;
661
662         nand_chip = &host->nand_chip;
663         mtd = nand_to_mtd(nand_chip);
664         if (pdev->dev.of_node)
665                 host->ncfg = lpc32xx_parse_dt(&pdev->dev);
666         if (!host->ncfg) {
667                 dev_err(&pdev->dev,
668                         "Missing or bad NAND config from device tree\n");
669                 return -ENOENT;
670         }
671         if (host->ncfg->wp_gpio == -EPROBE_DEFER)
672                 return -EPROBE_DEFER;
673         if (gpio_is_valid(host->ncfg->wp_gpio) &&
674                         gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
675                 dev_err(&pdev->dev, "GPIO not available\n");
676                 return -EBUSY;
677         }
678         lpc32xx_wp_disable(host);
679
680         host->pdata = dev_get_platdata(&pdev->dev);
681
682         /* link the private data structures */
683         nand_set_controller_data(nand_chip, host);
684         nand_set_flash_node(nand_chip, pdev->dev.of_node);
685         mtd->dev.parent = &pdev->dev;
686
687         /* Get NAND clock */
688         host->clk = clk_get(&pdev->dev, NULL);
689         if (IS_ERR(host->clk)) {
690                 dev_err(&pdev->dev, "Clock initialization failure\n");
691                 res = -ENOENT;
692                 goto err_exit1;
693         }
694         clk_prepare_enable(host->clk);
695
696         nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
697         nand_chip->dev_ready = lpc32xx_nand_device_ready;
698         nand_chip->chip_delay = 25; /* us */
699         nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
700         nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
701
702         /* Init NAND controller */
703         lpc32xx_nand_setup(host);
704
705         platform_set_drvdata(pdev, host);
706
707         /* Initialize function pointers */
708         nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
709         nand_chip->ecc.read_page_raw = lpc32xx_read_page;
710         nand_chip->ecc.read_page = lpc32xx_read_page;
711         nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
712         nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
713         nand_chip->ecc.write_oob = lpc32xx_write_oob;
714         nand_chip->ecc.read_oob = lpc32xx_read_oob;
715         nand_chip->ecc.strength = 4;
716         nand_chip->waitfunc = lpc32xx_waitfunc;
717
718         nand_chip->options = NAND_NO_SUBPAGE_WRITE;
719         nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
720         nand_chip->bbt_td = &lpc32xx_nand_bbt;
721         nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
722
723         if (use_dma) {
724                 res = lpc32xx_dma_setup(host);
725                 if (res) {
726                         res = -EIO;
727                         goto err_exit2;
728                 }
729         }
730
731         /*
732          * Scan to find existance of the device and
733          * Get the type of NAND device SMALL block or LARGE block
734          */
735         if (nand_scan_ident(mtd, 1, NULL)) {
736                 res = -ENXIO;
737                 goto err_exit3;
738         }
739
740         host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
741         if (!host->dma_buf) {
742                 res = -ENOMEM;
743                 goto err_exit3;
744         }
745
746         host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
747         if (!host->dummy_buf) {
748                 res = -ENOMEM;
749                 goto err_exit3;
750         }
751
752         nand_chip->ecc.mode = NAND_ECC_HW;
753         nand_chip->ecc.size = 512;
754         nand_chip->ecc.layout = &lpc32xx_nand_oob;
755         host->mlcsubpages = mtd->writesize / 512;
756
757         /* initially clear interrupt status */
758         readb(MLC_IRQ_SR(host->io_base));
759
760         init_completion(&host->comp_nand);
761         init_completion(&host->comp_controller);
762
763         host->irq = platform_get_irq(pdev, 0);
764         if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
765                 dev_err(&pdev->dev, "failed to get platform irq\n");
766                 res = -EINVAL;
767                 goto err_exit3;
768         }
769
770         if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
771                         IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
772                 dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
773                 res = -ENXIO;
774                 goto err_exit3;
775         }
776
777         /*
778          * Fills out all the uninitialized function pointers with the defaults
779          * And scans for a bad block table if appropriate.
780          */
781         if (nand_scan_tail(mtd)) {
782                 res = -ENXIO;
783                 goto err_exit4;
784         }
785
786         mtd->name = DRV_NAME;
787
788         res = mtd_device_register(mtd, host->ncfg->parts,
789                                   host->ncfg->num_parts);
790         if (!res)
791                 return res;
792
793         nand_release(mtd);
794
795 err_exit4:
796         free_irq(host->irq, host);
797 err_exit3:
798         if (use_dma)
799                 dma_release_channel(host->dma_chan);
800 err_exit2:
801         clk_disable_unprepare(host->clk);
802         clk_put(host->clk);
803 err_exit1:
804         lpc32xx_wp_enable(host);
805         gpio_free(host->ncfg->wp_gpio);
806
807         return res;
808 }
809
810 /*
811  * Remove NAND device
812  */
813 static int lpc32xx_nand_remove(struct platform_device *pdev)
814 {
815         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
816         struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
817
818         nand_release(mtd);
819         free_irq(host->irq, host);
820         if (use_dma)
821                 dma_release_channel(host->dma_chan);
822
823         clk_disable_unprepare(host->clk);
824         clk_put(host->clk);
825
826         lpc32xx_wp_enable(host);
827         gpio_free(host->ncfg->wp_gpio);
828
829         return 0;
830 }
831
832 #ifdef CONFIG_PM
833 static int lpc32xx_nand_resume(struct platform_device *pdev)
834 {
835         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
836
837         /* Re-enable NAND clock */
838         clk_prepare_enable(host->clk);
839
840         /* Fresh init of NAND controller */
841         lpc32xx_nand_setup(host);
842
843         /* Disable write protect */
844         lpc32xx_wp_disable(host);
845
846         return 0;
847 }
848
849 static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
850 {
851         struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
852
853         /* Enable write protect for safety */
854         lpc32xx_wp_enable(host);
855
856         /* Disable clock */
857         clk_disable_unprepare(host->clk);
858         return 0;
859 }
860
861 #else
862 #define lpc32xx_nand_resume NULL
863 #define lpc32xx_nand_suspend NULL
864 #endif
865
866 static const struct of_device_id lpc32xx_nand_match[] = {
867         { .compatible = "nxp,lpc3220-mlc" },
868         { /* sentinel */ },
869 };
870 MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
871
872 static struct platform_driver lpc32xx_nand_driver = {
873         .probe          = lpc32xx_nand_probe,
874         .remove         = lpc32xx_nand_remove,
875         .resume         = lpc32xx_nand_resume,
876         .suspend        = lpc32xx_nand_suspend,
877         .driver         = {
878                 .name   = DRV_NAME,
879                 .of_match_table = lpc32xx_nand_match,
880         },
881 };
882
883 module_platform_driver(lpc32xx_nand_driver);
884
885 MODULE_LICENSE("GPL");
886 MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
887 MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");