]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/mmc/host/dw_mmc.c
mmc: dw_mmc: fix error handling on response error
[karo-tx-linux.git] / drivers / mmc / host / dw_mmc.c
1 /*
2  * Synopsys DesignWare Multimedia Card Interface driver
3  *  (Based on NXP driver for lpc 31xx)
4  *
5  * Copyright (C) 2009 NXP Semiconductors
6  * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/dw_mmc.h>
33 #include <linux/bitops.h>
34 #include <linux/regulator/consumer.h>
35 #include <linux/workqueue.h>
36 #include <linux/of.h>
37 #include <linux/of_gpio.h>
38
39 #include "dw_mmc.h"
40
41 /* Common flag combinations */
42 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
43                                  SDMMC_INT_HTO | SDMMC_INT_SBE  | \
44                                  SDMMC_INT_EBE)
45 #define DW_MCI_CMD_ERROR_FLAGS  (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
46                                  SDMMC_INT_RESP_ERR)
47 #define DW_MCI_ERROR_FLAGS      (DW_MCI_DATA_ERROR_FLAGS | \
48                                  DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
49 #define DW_MCI_SEND_STATUS      1
50 #define DW_MCI_RECV_STATUS      2
51 #define DW_MCI_DMA_THRESHOLD    16
52
53 #define DW_MCI_FREQ_MAX 200000000       /* unit: HZ */
54 #define DW_MCI_FREQ_MIN 400000          /* unit: HZ */
55
56 #ifdef CONFIG_MMC_DW_IDMAC
57 #define IDMAC_INT_CLR           (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
58                                  SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
59                                  SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
60                                  SDMMC_IDMAC_INT_TI)
61
62 struct idmac_desc {
63         u32             des0;   /* Control Descriptor */
64 #define IDMAC_DES0_DIC  BIT(1)
65 #define IDMAC_DES0_LD   BIT(2)
66 #define IDMAC_DES0_FD   BIT(3)
67 #define IDMAC_DES0_CH   BIT(4)
68 #define IDMAC_DES0_ER   BIT(5)
69 #define IDMAC_DES0_CES  BIT(30)
70 #define IDMAC_DES0_OWN  BIT(31)
71
72         u32             des1;   /* Buffer sizes */
73 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
74         ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
75
76         u32             des2;   /* buffer 1 physical address */
77
78         u32             des3;   /* buffer 2 physical address */
79 };
80 #endif /* CONFIG_MMC_DW_IDMAC */
81
82 static const u8 tuning_blk_pattern_4bit[] = {
83         0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
84         0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
85         0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
86         0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
87         0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
88         0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
89         0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
90         0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
91 };
92
93 static const u8 tuning_blk_pattern_8bit[] = {
94         0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
95         0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
96         0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
97         0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
98         0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
99         0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
100         0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
101         0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
102         0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
103         0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
104         0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
105         0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
106         0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
107         0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
108         0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
109         0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
110 };
111
112 #if defined(CONFIG_DEBUG_FS)
113 static int dw_mci_req_show(struct seq_file *s, void *v)
114 {
115         struct dw_mci_slot *slot = s->private;
116         struct mmc_request *mrq;
117         struct mmc_command *cmd;
118         struct mmc_command *stop;
119         struct mmc_data *data;
120
121         /* Make sure we get a consistent snapshot */
122         spin_lock_bh(&slot->host->lock);
123         mrq = slot->mrq;
124
125         if (mrq) {
126                 cmd = mrq->cmd;
127                 data = mrq->data;
128                 stop = mrq->stop;
129
130                 if (cmd)
131                         seq_printf(s,
132                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133                                    cmd->opcode, cmd->arg, cmd->flags,
134                                    cmd->resp[0], cmd->resp[1], cmd->resp[2],
135                                    cmd->resp[2], cmd->error);
136                 if (data)
137                         seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
138                                    data->bytes_xfered, data->blocks,
139                                    data->blksz, data->flags, data->error);
140                 if (stop)
141                         seq_printf(s,
142                                    "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
143                                    stop->opcode, stop->arg, stop->flags,
144                                    stop->resp[0], stop->resp[1], stop->resp[2],
145                                    stop->resp[2], stop->error);
146         }
147
148         spin_unlock_bh(&slot->host->lock);
149
150         return 0;
151 }
152
153 static int dw_mci_req_open(struct inode *inode, struct file *file)
154 {
155         return single_open(file, dw_mci_req_show, inode->i_private);
156 }
157
158 static const struct file_operations dw_mci_req_fops = {
159         .owner          = THIS_MODULE,
160         .open           = dw_mci_req_open,
161         .read           = seq_read,
162         .llseek         = seq_lseek,
163         .release        = single_release,
164 };
165
166 static int dw_mci_regs_show(struct seq_file *s, void *v)
167 {
168         seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
169         seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
170         seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
171         seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
172         seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
173         seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
174
175         return 0;
176 }
177
178 static int dw_mci_regs_open(struct inode *inode, struct file *file)
179 {
180         return single_open(file, dw_mci_regs_show, inode->i_private);
181 }
182
183 static const struct file_operations dw_mci_regs_fops = {
184         .owner          = THIS_MODULE,
185         .open           = dw_mci_regs_open,
186         .read           = seq_read,
187         .llseek         = seq_lseek,
188         .release        = single_release,
189 };
190
191 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
192 {
193         struct mmc_host *mmc = slot->mmc;
194         struct dw_mci *host = slot->host;
195         struct dentry *root;
196         struct dentry *node;
197
198         root = mmc->debugfs_root;
199         if (!root)
200                 return;
201
202         node = debugfs_create_file("regs", S_IRUSR, root, host,
203                                    &dw_mci_regs_fops);
204         if (!node)
205                 goto err;
206
207         node = debugfs_create_file("req", S_IRUSR, root, slot,
208                                    &dw_mci_req_fops);
209         if (!node)
210                 goto err;
211
212         node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
213         if (!node)
214                 goto err;
215
216         node = debugfs_create_x32("pending_events", S_IRUSR, root,
217                                   (u32 *)&host->pending_events);
218         if (!node)
219                 goto err;
220
221         node = debugfs_create_x32("completed_events", S_IRUSR, root,
222                                   (u32 *)&host->completed_events);
223         if (!node)
224                 goto err;
225
226         return;
227
228 err:
229         dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
230 }
231 #endif /* defined(CONFIG_DEBUG_FS) */
232
233 static void dw_mci_set_timeout(struct dw_mci *host)
234 {
235         /* timeout (maximum) */
236         mci_writel(host, TMOUT, 0xffffffff);
237 }
238
239 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
240 {
241         struct mmc_data *data;
242         struct dw_mci_slot *slot = mmc_priv(mmc);
243         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
244         u32 cmdr;
245         cmd->error = -EINPROGRESS;
246
247         cmdr = cmd->opcode;
248
249         if (cmdr == MMC_STOP_TRANSMISSION)
250                 cmdr |= SDMMC_CMD_STOP;
251         else
252                 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
253
254         if (cmd->flags & MMC_RSP_PRESENT) {
255                 /* We expect a response, so set this bit */
256                 cmdr |= SDMMC_CMD_RESP_EXP;
257                 if (cmd->flags & MMC_RSP_136)
258                         cmdr |= SDMMC_CMD_RESP_LONG;
259         }
260
261         if (cmd->flags & MMC_RSP_CRC)
262                 cmdr |= SDMMC_CMD_RESP_CRC;
263
264         data = cmd->data;
265         if (data) {
266                 cmdr |= SDMMC_CMD_DAT_EXP;
267                 if (data->flags & MMC_DATA_STREAM)
268                         cmdr |= SDMMC_CMD_STRM_MODE;
269                 if (data->flags & MMC_DATA_WRITE)
270                         cmdr |= SDMMC_CMD_DAT_WR;
271         }
272
273         if (drv_data && drv_data->prepare_command)
274                 drv_data->prepare_command(slot->host, &cmdr);
275
276         return cmdr;
277 }
278
279 static void dw_mci_start_command(struct dw_mci *host,
280                                  struct mmc_command *cmd, u32 cmd_flags)
281 {
282         host->cmd = cmd;
283         dev_vdbg(host->dev,
284                  "start command: ARGR=0x%08x CMDR=0x%08x\n",
285                  cmd->arg, cmd_flags);
286
287         mci_writel(host, CMDARG, cmd->arg);
288         wmb();
289
290         mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
291 }
292
293 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
294 {
295         dw_mci_start_command(host, data->stop, host->stop_cmdr);
296 }
297
298 /* DMA interface functions */
299 static void dw_mci_stop_dma(struct dw_mci *host)
300 {
301         if (host->using_dma) {
302                 host->dma_ops->stop(host);
303                 host->dma_ops->cleanup(host);
304         } else {
305                 /* Data transfer was stopped by the interrupt handler */
306                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
307         }
308 }
309
310 static int dw_mci_get_dma_dir(struct mmc_data *data)
311 {
312         if (data->flags & MMC_DATA_WRITE)
313                 return DMA_TO_DEVICE;
314         else
315                 return DMA_FROM_DEVICE;
316 }
317
318 #ifdef CONFIG_MMC_DW_IDMAC
319 static void dw_mci_dma_cleanup(struct dw_mci *host)
320 {
321         struct mmc_data *data = host->data;
322
323         if (data)
324                 if (!data->host_cookie)
325                         dma_unmap_sg(host->dev,
326                                      data->sg,
327                                      data->sg_len,
328                                      dw_mci_get_dma_dir(data));
329 }
330
331 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
332 {
333         u32 temp;
334
335         /* Disable and reset the IDMAC interface */
336         temp = mci_readl(host, CTRL);
337         temp &= ~SDMMC_CTRL_USE_IDMAC;
338         temp |= SDMMC_CTRL_DMA_RESET;
339         mci_writel(host, CTRL, temp);
340
341         /* Stop the IDMAC running */
342         temp = mci_readl(host, BMOD);
343         temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
344         mci_writel(host, BMOD, temp);
345 }
346
347 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
348 {
349         struct mmc_data *data = host->data;
350
351         dev_vdbg(host->dev, "DMA complete\n");
352
353         host->dma_ops->cleanup(host);
354
355         /*
356          * If the card was removed, data will be NULL. No point in trying to
357          * send the stop command or waiting for NBUSY in this case.
358          */
359         if (data) {
360                 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
361                 tasklet_schedule(&host->tasklet);
362         }
363 }
364
365 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
366                                     unsigned int sg_len)
367 {
368         int i;
369         struct idmac_desc *desc = host->sg_cpu;
370
371         for (i = 0; i < sg_len; i++, desc++) {
372                 unsigned int length = sg_dma_len(&data->sg[i]);
373                 u32 mem_addr = sg_dma_address(&data->sg[i]);
374
375                 /* Set the OWN bit and disable interrupts for this descriptor */
376                 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
377
378                 /* Buffer length */
379                 IDMAC_SET_BUFFER1_SIZE(desc, length);
380
381                 /* Physical address to DMA to/from */
382                 desc->des2 = mem_addr;
383         }
384
385         /* Set first descriptor */
386         desc = host->sg_cpu;
387         desc->des0 |= IDMAC_DES0_FD;
388
389         /* Set last descriptor */
390         desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
391         desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
392         desc->des0 |= IDMAC_DES0_LD;
393
394         wmb();
395 }
396
397 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
398 {
399         u32 temp;
400
401         dw_mci_translate_sglist(host, host->data, sg_len);
402
403         /* Select IDMAC interface */
404         temp = mci_readl(host, CTRL);
405         temp |= SDMMC_CTRL_USE_IDMAC;
406         mci_writel(host, CTRL, temp);
407
408         wmb();
409
410         /* Enable the IDMAC */
411         temp = mci_readl(host, BMOD);
412         temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
413         mci_writel(host, BMOD, temp);
414
415         /* Start it running */
416         mci_writel(host, PLDMND, 1);
417 }
418
419 static int dw_mci_idmac_init(struct dw_mci *host)
420 {
421         struct idmac_desc *p;
422         int i;
423
424         /* Number of descriptors in the ring buffer */
425         host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
426
427         /* Forward link the descriptor list */
428         for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
429                 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
430
431         /* Set the last descriptor as the end-of-ring descriptor */
432         p->des3 = host->sg_dma;
433         p->des0 = IDMAC_DES0_ER;
434
435         mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
436
437         /* Mask out interrupts - get Tx & Rx complete only */
438         mci_writel(host, IDSTS, IDMAC_INT_CLR);
439         mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
440                    SDMMC_IDMAC_INT_TI);
441
442         /* Set the descriptor base address */
443         mci_writel(host, DBADDR, host->sg_dma);
444         return 0;
445 }
446
447 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
448         .init = dw_mci_idmac_init,
449         .start = dw_mci_idmac_start_dma,
450         .stop = dw_mci_idmac_stop_dma,
451         .complete = dw_mci_idmac_complete_dma,
452         .cleanup = dw_mci_dma_cleanup,
453 };
454 #endif /* CONFIG_MMC_DW_IDMAC */
455
456 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
457                                    struct mmc_data *data,
458                                    bool next)
459 {
460         struct scatterlist *sg;
461         unsigned int i, sg_len;
462
463         if (!next && data->host_cookie)
464                 return data->host_cookie;
465
466         /*
467          * We don't do DMA on "complex" transfers, i.e. with
468          * non-word-aligned buffers or lengths. Also, we don't bother
469          * with all the DMA setup overhead for short transfers.
470          */
471         if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
472                 return -EINVAL;
473
474         if (data->blksz & 3)
475                 return -EINVAL;
476
477         for_each_sg(data->sg, sg, data->sg_len, i) {
478                 if (sg->offset & 3 || sg->length & 3)
479                         return -EINVAL;
480         }
481
482         sg_len = dma_map_sg(host->dev,
483                             data->sg,
484                             data->sg_len,
485                             dw_mci_get_dma_dir(data));
486         if (sg_len == 0)
487                 return -EINVAL;
488
489         if (next)
490                 data->host_cookie = sg_len;
491
492         return sg_len;
493 }
494
495 static void dw_mci_pre_req(struct mmc_host *mmc,
496                            struct mmc_request *mrq,
497                            bool is_first_req)
498 {
499         struct dw_mci_slot *slot = mmc_priv(mmc);
500         struct mmc_data *data = mrq->data;
501
502         if (!slot->host->use_dma || !data)
503                 return;
504
505         if (data->host_cookie) {
506                 data->host_cookie = 0;
507                 return;
508         }
509
510         if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
511                 data->host_cookie = 0;
512 }
513
514 static void dw_mci_post_req(struct mmc_host *mmc,
515                             struct mmc_request *mrq,
516                             int err)
517 {
518         struct dw_mci_slot *slot = mmc_priv(mmc);
519         struct mmc_data *data = mrq->data;
520
521         if (!slot->host->use_dma || !data)
522                 return;
523
524         if (data->host_cookie)
525                 dma_unmap_sg(slot->host->dev,
526                              data->sg,
527                              data->sg_len,
528                              dw_mci_get_dma_dir(data));
529         data->host_cookie = 0;
530 }
531
532 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
533 {
534 #ifdef CONFIG_MMC_DW_IDMAC
535         unsigned int blksz = data->blksz;
536         const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
537         u32 fifo_width = 1 << host->data_shift;
538         u32 blksz_depth = blksz / fifo_width, fifoth_val;
539         u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
540         int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
541
542         tx_wmark = (host->fifo_depth) / 2;
543         tx_wmark_invers = host->fifo_depth - tx_wmark;
544
545         /*
546          * MSIZE is '1',
547          * if blksz is not a multiple of the FIFO width
548          */
549         if (blksz % fifo_width) {
550                 msize = 0;
551                 rx_wmark = 1;
552                 goto done;
553         }
554
555         do {
556                 if (!((blksz_depth % mszs[idx]) ||
557                      (tx_wmark_invers % mszs[idx]))) {
558                         msize = idx;
559                         rx_wmark = mszs[idx] - 1;
560                         break;
561                 }
562         } while (--idx > 0);
563         /*
564          * If idx is '0', it won't be tried
565          * Thus, initial values are uesed
566          */
567 done:
568         fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
569         mci_writel(host, FIFOTH, fifoth_val);
570 #endif
571 }
572
573 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
574 {
575         unsigned int blksz = data->blksz;
576         u32 blksz_depth, fifo_depth;
577         u16 thld_size;
578
579         WARN_ON(!(data->flags & MMC_DATA_READ));
580
581         if (host->timing != MMC_TIMING_MMC_HS200 &&
582             host->timing != MMC_TIMING_UHS_SDR104)
583                 goto disable;
584
585         blksz_depth = blksz / (1 << host->data_shift);
586         fifo_depth = host->fifo_depth;
587
588         if (blksz_depth > fifo_depth)
589                 goto disable;
590
591         /*
592          * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
593          * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz
594          * Currently just choose blksz.
595          */
596         thld_size = blksz;
597         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
598         return;
599
600 disable:
601         mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
602 }
603
604 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
605 {
606         int sg_len;
607         u32 temp;
608
609         host->using_dma = 0;
610
611         /* If we don't have a channel, we can't do DMA */
612         if (!host->use_dma)
613                 return -ENODEV;
614
615         sg_len = dw_mci_pre_dma_transfer(host, data, 0);
616         if (sg_len < 0) {
617                 host->dma_ops->stop(host);
618                 return sg_len;
619         }
620
621         host->using_dma = 1;
622
623         dev_vdbg(host->dev,
624                  "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
625                  (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
626                  sg_len);
627
628         /*
629          * Decide the MSIZE and RX/TX Watermark.
630          * If current block size is same with previous size,
631          * no need to update fifoth.
632          */
633         if (host->prev_blksz != data->blksz)
634                 dw_mci_adjust_fifoth(host, data);
635
636         /* Enable the DMA interface */
637         temp = mci_readl(host, CTRL);
638         temp |= SDMMC_CTRL_DMA_ENABLE;
639         mci_writel(host, CTRL, temp);
640
641         /* Disable RX/TX IRQs, let DMA handle it */
642         temp = mci_readl(host, INTMASK);
643         temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
644         mci_writel(host, INTMASK, temp);
645
646         host->dma_ops->start(host, sg_len);
647
648         return 0;
649 }
650
651 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
652 {
653         u32 temp;
654
655         data->error = -EINPROGRESS;
656
657         WARN_ON(host->data);
658         host->sg = NULL;
659         host->data = data;
660
661         if (data->flags & MMC_DATA_READ) {
662                 host->dir_status = DW_MCI_RECV_STATUS;
663                 dw_mci_ctrl_rd_thld(host, data);
664         } else {
665                 host->dir_status = DW_MCI_SEND_STATUS;
666         }
667
668         if (dw_mci_submit_data_dma(host, data)) {
669                 int flags = SG_MITER_ATOMIC;
670                 if (host->data->flags & MMC_DATA_READ)
671                         flags |= SG_MITER_TO_SG;
672                 else
673                         flags |= SG_MITER_FROM_SG;
674
675                 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
676                 host->sg = data->sg;
677                 host->part_buf_start = 0;
678                 host->part_buf_count = 0;
679
680                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
681                 temp = mci_readl(host, INTMASK);
682                 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
683                 mci_writel(host, INTMASK, temp);
684
685                 temp = mci_readl(host, CTRL);
686                 temp &= ~SDMMC_CTRL_DMA_ENABLE;
687                 mci_writel(host, CTRL, temp);
688
689                 /*
690                  * Use the initial fifoth_val for PIO mode.
691                  * If next issued data may be transfered by DMA mode,
692                  * prev_blksz should be invalidated.
693                  */
694                 mci_writel(host, FIFOTH, host->fifoth_val);
695                 host->prev_blksz = 0;
696         } else {
697                 /*
698                  * Keep the current block size.
699                  * It will be used to decide whether to update
700                  * fifoth register next time.
701                  */
702                 host->prev_blksz = data->blksz;
703         }
704 }
705
706 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
707 {
708         struct dw_mci *host = slot->host;
709         unsigned long timeout = jiffies + msecs_to_jiffies(500);
710         unsigned int cmd_status = 0;
711
712         mci_writel(host, CMDARG, arg);
713         wmb();
714         mci_writel(host, CMD, SDMMC_CMD_START | cmd);
715
716         while (time_before(jiffies, timeout)) {
717                 cmd_status = mci_readl(host, CMD);
718                 if (!(cmd_status & SDMMC_CMD_START))
719                         return;
720         }
721         dev_err(&slot->mmc->class_dev,
722                 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
723                 cmd, arg, cmd_status);
724 }
725
726 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
727 {
728         struct dw_mci *host = slot->host;
729         unsigned int clock = slot->clock;
730         u32 div;
731         u32 clk_en_a;
732
733         if (!clock) {
734                 mci_writel(host, CLKENA, 0);
735                 mci_send_cmd(slot,
736                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
737         } else if (clock != host->current_speed || force_clkinit) {
738                 div = host->bus_hz / clock;
739                 if (host->bus_hz % clock && host->bus_hz > clock)
740                         /*
741                          * move the + 1 after the divide to prevent
742                          * over-clocking the card.
743                          */
744                         div += 1;
745
746                 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
747
748                 if ((clock << div) != slot->__clk_old || force_clkinit)
749                         dev_info(&slot->mmc->class_dev,
750                                  "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
751                                  slot->id, host->bus_hz, clock,
752                                  div ? ((host->bus_hz / div) >> 1) :
753                                  host->bus_hz, div);
754
755                 /* disable clock */
756                 mci_writel(host, CLKENA, 0);
757                 mci_writel(host, CLKSRC, 0);
758
759                 /* inform CIU */
760                 mci_send_cmd(slot,
761                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
762
763                 /* set clock to desired speed */
764                 mci_writel(host, CLKDIV, div);
765
766                 /* inform CIU */
767                 mci_send_cmd(slot,
768                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
769
770                 /* enable clock; only low power if no SDIO */
771                 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
772                 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
773                         clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
774                 mci_writel(host, CLKENA, clk_en_a);
775
776                 /* inform CIU */
777                 mci_send_cmd(slot,
778                              SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
779
780                 /* keep the clock with reflecting clock dividor */
781                 slot->__clk_old = clock << div;
782         }
783
784         host->current_speed = clock;
785
786         /* Set the current slot bus width */
787         mci_writel(host, CTYPE, (slot->ctype << slot->id));
788 }
789
790 static void __dw_mci_start_request(struct dw_mci *host,
791                                    struct dw_mci_slot *slot,
792                                    struct mmc_command *cmd)
793 {
794         struct mmc_request *mrq;
795         struct mmc_data *data;
796         u32 cmdflags;
797
798         mrq = slot->mrq;
799         if (host->pdata->select_slot)
800                 host->pdata->select_slot(slot->id);
801
802         host->cur_slot = slot;
803         host->mrq = mrq;
804
805         host->pending_events = 0;
806         host->completed_events = 0;
807         host->data_status = 0;
808
809         data = cmd->data;
810         if (data) {
811                 dw_mci_set_timeout(host);
812                 mci_writel(host, BYTCNT, data->blksz*data->blocks);
813                 mci_writel(host, BLKSIZ, data->blksz);
814         }
815
816         cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
817
818         /* this is the first command, send the initialization clock */
819         if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
820                 cmdflags |= SDMMC_CMD_INIT;
821
822         if (data) {
823                 dw_mci_submit_data(host, data);
824                 wmb();
825         }
826
827         dw_mci_start_command(host, cmd, cmdflags);
828
829         if (mrq->stop)
830                 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
831 }
832
833 static void dw_mci_start_request(struct dw_mci *host,
834                                  struct dw_mci_slot *slot)
835 {
836         struct mmc_request *mrq = slot->mrq;
837         struct mmc_command *cmd;
838
839         cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
840         __dw_mci_start_request(host, slot, cmd);
841 }
842
843 /* must be called with host->lock held */
844 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
845                                  struct mmc_request *mrq)
846 {
847         dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
848                  host->state);
849
850         slot->mrq = mrq;
851
852         if (host->state == STATE_IDLE) {
853                 host->state = STATE_SENDING_CMD;
854                 dw_mci_start_request(host, slot);
855         } else {
856                 list_add_tail(&slot->queue_node, &host->queue);
857         }
858 }
859
860 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
861 {
862         struct dw_mci_slot *slot = mmc_priv(mmc);
863         struct dw_mci *host = slot->host;
864
865         WARN_ON(slot->mrq);
866
867         /*
868          * The check for card presence and queueing of the request must be
869          * atomic, otherwise the card could be removed in between and the
870          * request wouldn't fail until another card was inserted.
871          */
872         spin_lock_bh(&host->lock);
873
874         if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
875                 spin_unlock_bh(&host->lock);
876                 mrq->cmd->error = -ENOMEDIUM;
877                 mmc_request_done(mmc, mrq);
878                 return;
879         }
880
881         dw_mci_queue_request(host, slot, mrq);
882
883         spin_unlock_bh(&host->lock);
884 }
885
886 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
887 {
888         struct dw_mci_slot *slot = mmc_priv(mmc);
889         const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
890         u32 regs;
891
892         switch (ios->bus_width) {
893         case MMC_BUS_WIDTH_4:
894                 slot->ctype = SDMMC_CTYPE_4BIT;
895                 break;
896         case MMC_BUS_WIDTH_8:
897                 slot->ctype = SDMMC_CTYPE_8BIT;
898                 break;
899         default:
900                 /* set default 1 bit mode */
901                 slot->ctype = SDMMC_CTYPE_1BIT;
902         }
903
904         regs = mci_readl(slot->host, UHS_REG);
905
906         /* DDR mode set */
907         if (ios->timing == MMC_TIMING_UHS_DDR50)
908                 regs |= ((0x1 << slot->id) << 16);
909         else
910                 regs &= ~((0x1 << slot->id) << 16);
911
912         mci_writel(slot->host, UHS_REG, regs);
913         slot->host->timing = ios->timing;
914
915         /*
916          * Use mirror of ios->clock to prevent race with mmc
917          * core ios update when finding the minimum.
918          */
919         slot->clock = ios->clock;
920
921         if (drv_data && drv_data->set_ios)
922                 drv_data->set_ios(slot->host, ios);
923
924         /* Slot specific timing and width adjustment */
925         dw_mci_setup_bus(slot, false);
926
927         switch (ios->power_mode) {
928         case MMC_POWER_UP:
929                 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
930                 /* Power up slot */
931                 if (slot->host->pdata->setpower)
932                         slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
933                 regs = mci_readl(slot->host, PWREN);
934                 regs |= (1 << slot->id);
935                 mci_writel(slot->host, PWREN, regs);
936                 break;
937         case MMC_POWER_OFF:
938                 /* Power down slot */
939                 if (slot->host->pdata->setpower)
940                         slot->host->pdata->setpower(slot->id, 0);
941                 regs = mci_readl(slot->host, PWREN);
942                 regs &= ~(1 << slot->id);
943                 mci_writel(slot->host, PWREN, regs);
944                 break;
945         default:
946                 break;
947         }
948 }
949
950 static int dw_mci_get_ro(struct mmc_host *mmc)
951 {
952         int read_only;
953         struct dw_mci_slot *slot = mmc_priv(mmc);
954         struct dw_mci_board *brd = slot->host->pdata;
955
956         /* Use platform get_ro function, else try on board write protect */
957         if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
958                 read_only = 0;
959         else if (brd->get_ro)
960                 read_only = brd->get_ro(slot->id);
961         else if (gpio_is_valid(slot->wp_gpio))
962                 read_only = gpio_get_value(slot->wp_gpio);
963         else
964                 read_only =
965                         mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
966
967         dev_dbg(&mmc->class_dev, "card is %s\n",
968                 read_only ? "read-only" : "read-write");
969
970         return read_only;
971 }
972
973 static int dw_mci_get_cd(struct mmc_host *mmc)
974 {
975         int present;
976         struct dw_mci_slot *slot = mmc_priv(mmc);
977         struct dw_mci_board *brd = slot->host->pdata;
978
979         /* Use platform get_cd function, else try onboard card detect */
980         if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
981                 present = 1;
982         else if (brd->get_cd)
983                 present = !brd->get_cd(slot->id);
984         else
985                 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
986                         == 0 ? 1 : 0;
987
988         if (present)
989                 dev_dbg(&mmc->class_dev, "card is present\n");
990         else
991                 dev_dbg(&mmc->class_dev, "card is not present\n");
992
993         return present;
994 }
995
996 /*
997  * Disable lower power mode.
998  *
999  * Low power mode will stop the card clock when idle.  According to the
1000  * description of the CLKENA register we should disable low power mode
1001  * for SDIO cards if we need SDIO interrupts to work.
1002  *
1003  * This function is fast if low power mode is already disabled.
1004  */
1005 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1006 {
1007         struct dw_mci *host = slot->host;
1008         u32 clk_en_a;
1009         const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1010
1011         clk_en_a = mci_readl(host, CLKENA);
1012
1013         if (clk_en_a & clken_low_pwr) {
1014                 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1015                 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1016                              SDMMC_CMD_PRV_DAT_WAIT, 0);
1017         }
1018 }
1019
1020 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1021 {
1022         struct dw_mci_slot *slot = mmc_priv(mmc);
1023         struct dw_mci *host = slot->host;
1024         u32 int_mask;
1025
1026         /* Enable/disable Slot Specific SDIO interrupt */
1027         int_mask = mci_readl(host, INTMASK);
1028         if (enb) {
1029                 /*
1030                  * Turn off low power mode if it was enabled.  This is a bit of
1031                  * a heavy operation and we disable / enable IRQs a lot, so
1032                  * we'll leave low power mode disabled and it will get
1033                  * re-enabled again in dw_mci_setup_bus().
1034                  */
1035                 dw_mci_disable_low_power(slot);
1036
1037                 mci_writel(host, INTMASK,
1038                            (int_mask | SDMMC_INT_SDIO(slot->id)));
1039         } else {
1040                 mci_writel(host, INTMASK,
1041                            (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1042         }
1043 }
1044
1045 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1046 {
1047         struct dw_mci_slot *slot = mmc_priv(mmc);
1048         struct dw_mci *host = slot->host;
1049         const struct dw_mci_drv_data *drv_data = host->drv_data;
1050         struct dw_mci_tuning_data tuning_data;
1051         int err = -ENOSYS;
1052
1053         if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1054                 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1055                         tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1056                         tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1057                 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1058                         tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1059                         tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1060                 } else {
1061                         return -EINVAL;
1062                 }
1063         } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1064                 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1065                 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1066         } else {
1067                 dev_err(host->dev,
1068                         "Undefined command(%d) for tuning\n", opcode);
1069                 return -EINVAL;
1070         }
1071
1072         if (drv_data && drv_data->execute_tuning)
1073                 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1074         return err;
1075 }
1076
1077 static const struct mmc_host_ops dw_mci_ops = {
1078         .request                = dw_mci_request,
1079         .pre_req                = dw_mci_pre_req,
1080         .post_req               = dw_mci_post_req,
1081         .set_ios                = dw_mci_set_ios,
1082         .get_ro                 = dw_mci_get_ro,
1083         .get_cd                 = dw_mci_get_cd,
1084         .enable_sdio_irq        = dw_mci_enable_sdio_irq,
1085         .execute_tuning         = dw_mci_execute_tuning,
1086 };
1087
1088 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1089         __releases(&host->lock)
1090         __acquires(&host->lock)
1091 {
1092         struct dw_mci_slot *slot;
1093         struct mmc_host *prev_mmc = host->cur_slot->mmc;
1094
1095         WARN_ON(host->cmd || host->data);
1096
1097         host->cur_slot->mrq = NULL;
1098         host->mrq = NULL;
1099         if (!list_empty(&host->queue)) {
1100                 slot = list_entry(host->queue.next,
1101                                   struct dw_mci_slot, queue_node);
1102                 list_del(&slot->queue_node);
1103                 dev_vdbg(host->dev, "list not empty: %s is next\n",
1104                          mmc_hostname(slot->mmc));
1105                 host->state = STATE_SENDING_CMD;
1106                 dw_mci_start_request(host, slot);
1107         } else {
1108                 dev_vdbg(host->dev, "list empty\n");
1109                 host->state = STATE_IDLE;
1110         }
1111
1112         spin_unlock(&host->lock);
1113         mmc_request_done(prev_mmc, mrq);
1114         spin_lock(&host->lock);
1115 }
1116
1117 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1118 {
1119         u32 status = host->cmd_status;
1120
1121         host->cmd_status = 0;
1122
1123         /* Read the response from the card (up to 16 bytes) */
1124         if (cmd->flags & MMC_RSP_PRESENT) {
1125                 if (cmd->flags & MMC_RSP_136) {
1126                         cmd->resp[3] = mci_readl(host, RESP0);
1127                         cmd->resp[2] = mci_readl(host, RESP1);
1128                         cmd->resp[1] = mci_readl(host, RESP2);
1129                         cmd->resp[0] = mci_readl(host, RESP3);
1130                 } else {
1131                         cmd->resp[0] = mci_readl(host, RESP0);
1132                         cmd->resp[1] = 0;
1133                         cmd->resp[2] = 0;
1134                         cmd->resp[3] = 0;
1135                 }
1136         }
1137
1138         if (status & SDMMC_INT_RTO)
1139                 cmd->error = -ETIMEDOUT;
1140         else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1141                 cmd->error = -EILSEQ;
1142         else if (status & SDMMC_INT_RESP_ERR)
1143                 cmd->error = -EIO;
1144         else
1145                 cmd->error = 0;
1146
1147         if (cmd->error) {
1148                 /* newer ip versions need a delay between retries */
1149                 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1150                         mdelay(20);
1151         }
1152 }
1153
1154 static void dw_mci_tasklet_func(unsigned long priv)
1155 {
1156         struct dw_mci *host = (struct dw_mci *)priv;
1157         struct mmc_data *data;
1158         struct mmc_command *cmd;
1159         enum dw_mci_state state;
1160         enum dw_mci_state prev_state;
1161         u32 status, ctrl;
1162
1163         spin_lock(&host->lock);
1164
1165         state = host->state;
1166         data = host->data;
1167
1168         do {
1169                 prev_state = state;
1170
1171                 switch (state) {
1172                 case STATE_IDLE:
1173                         break;
1174
1175                 case STATE_SENDING_CMD:
1176                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1177                                                 &host->pending_events))
1178                                 break;
1179
1180                         cmd = host->cmd;
1181                         host->cmd = NULL;
1182                         set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1183                         dw_mci_command_complete(host, cmd);
1184                         if (cmd == host->mrq->sbc && !cmd->error) {
1185                                 prev_state = state = STATE_SENDING_CMD;
1186                                 __dw_mci_start_request(host, host->cur_slot,
1187                                                        host->mrq->cmd);
1188                                 goto unlock;
1189                         }
1190
1191                         if (cmd->data && cmd->error) {
1192                                 dw_mci_stop_dma(host);
1193                                 if (data->stop) {
1194                                         send_stop_cmd(host, data);
1195                                         state = STATE_SENDING_STOP;
1196                                         break;
1197                                 } else {
1198                                         host->data = NULL;
1199                                 }
1200                         }
1201
1202                         if (!host->mrq->data || cmd->error) {
1203                                 dw_mci_request_end(host, host->mrq);
1204                                 goto unlock;
1205                         }
1206
1207                         prev_state = state = STATE_SENDING_DATA;
1208                         /* fall through */
1209
1210                 case STATE_SENDING_DATA:
1211                         if (test_and_clear_bit(EVENT_DATA_ERROR,
1212                                                &host->pending_events)) {
1213                                 dw_mci_stop_dma(host);
1214                                 if (data->stop)
1215                                         send_stop_cmd(host, data);
1216                                 state = STATE_DATA_ERROR;
1217                                 break;
1218                         }
1219
1220                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1221                                                 &host->pending_events))
1222                                 break;
1223
1224                         set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1225                         prev_state = state = STATE_DATA_BUSY;
1226                         /* fall through */
1227
1228                 case STATE_DATA_BUSY:
1229                         if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1230                                                 &host->pending_events))
1231                                 break;
1232
1233                         host->data = NULL;
1234                         set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1235                         status = host->data_status;
1236
1237                         if (status & DW_MCI_DATA_ERROR_FLAGS) {
1238                                 if (status & SDMMC_INT_DRTO) {
1239                                         data->error = -ETIMEDOUT;
1240                                 } else if (status & SDMMC_INT_DCRC) {
1241                                         data->error = -EILSEQ;
1242                                 } else if (status & SDMMC_INT_EBE &&
1243                                            host->dir_status ==
1244                                                         DW_MCI_SEND_STATUS) {
1245                                         /*
1246                                          * No data CRC status was returned.
1247                                          * The number of bytes transferred will
1248                                          * be exaggerated in PIO mode.
1249                                          */
1250                                         data->bytes_xfered = 0;
1251                                         data->error = -ETIMEDOUT;
1252                                 } else {
1253                                         dev_err(host->dev,
1254                                                 "data FIFO error "
1255                                                 "(status=%08x)\n",
1256                                                 status);
1257                                         data->error = -EIO;
1258                                 }
1259                                 /*
1260                                  * After an error, there may be data lingering
1261                                  * in the FIFO, so reset it - doing so
1262                                  * generates a block interrupt, hence setting
1263                                  * the scatter-gather pointer to NULL.
1264                                  */
1265                                 sg_miter_stop(&host->sg_miter);
1266                                 host->sg = NULL;
1267                                 ctrl = mci_readl(host, CTRL);
1268                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1269                                 mci_writel(host, CTRL, ctrl);
1270                         } else {
1271                                 data->bytes_xfered = data->blocks * data->blksz;
1272                                 data->error = 0;
1273                         }
1274
1275                         if (!data->stop) {
1276                                 dw_mci_request_end(host, host->mrq);
1277                                 goto unlock;
1278                         }
1279
1280                         if (host->mrq->sbc && !data->error) {
1281                                 data->stop->error = 0;
1282                                 dw_mci_request_end(host, host->mrq);
1283                                 goto unlock;
1284                         }
1285
1286                         prev_state = state = STATE_SENDING_STOP;
1287                         if (!data->error)
1288                                 send_stop_cmd(host, data);
1289                         /* fall through */
1290
1291                 case STATE_SENDING_STOP:
1292                         if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1293                                                 &host->pending_events))
1294                                 break;
1295
1296                         /* CMD error in data command */
1297                         if (host->mrq->cmd->error && host->mrq->data) {
1298                                 sg_miter_stop(&host->sg_miter);
1299                                 host->sg = NULL;
1300                                 ctrl = mci_readl(host, CTRL);
1301                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1302                                 mci_writel(host, CTRL, ctrl);
1303                         }
1304
1305                         host->cmd = NULL;
1306                         host->data = NULL;
1307                         dw_mci_command_complete(host, host->mrq->stop);
1308                         dw_mci_request_end(host, host->mrq);
1309                         goto unlock;
1310
1311                 case STATE_DATA_ERROR:
1312                         if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1313                                                 &host->pending_events))
1314                                 break;
1315
1316                         state = STATE_DATA_BUSY;
1317                         break;
1318                 }
1319         } while (state != prev_state);
1320
1321         host->state = state;
1322 unlock:
1323         spin_unlock(&host->lock);
1324
1325 }
1326
1327 /* push final bytes to part_buf, only use during push */
1328 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1329 {
1330         memcpy((void *)&host->part_buf, buf, cnt);
1331         host->part_buf_count = cnt;
1332 }
1333
1334 /* append bytes to part_buf, only use during push */
1335 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1336 {
1337         cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1338         memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1339         host->part_buf_count += cnt;
1340         return cnt;
1341 }
1342
1343 /* pull first bytes from part_buf, only use during pull */
1344 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1345 {
1346         cnt = min(cnt, (int)host->part_buf_count);
1347         if (cnt) {
1348                 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1349                        cnt);
1350                 host->part_buf_count -= cnt;
1351                 host->part_buf_start += cnt;
1352         }
1353         return cnt;
1354 }
1355
1356 /* pull final bytes from the part_buf, assuming it's just been filled */
1357 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1358 {
1359         memcpy(buf, &host->part_buf, cnt);
1360         host->part_buf_start = cnt;
1361         host->part_buf_count = (1 << host->data_shift) - cnt;
1362 }
1363
1364 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1365 {
1366         struct mmc_data *data = host->data;
1367         int init_cnt = cnt;
1368
1369         /* try and push anything in the part_buf */
1370         if (unlikely(host->part_buf_count)) {
1371                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1372                 buf += len;
1373                 cnt -= len;
1374                 if (host->part_buf_count == 2) {
1375                         mci_writew(host, DATA(host->data_offset),
1376                                         host->part_buf16);
1377                         host->part_buf_count = 0;
1378                 }
1379         }
1380 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1381         if (unlikely((unsigned long)buf & 0x1)) {
1382                 while (cnt >= 2) {
1383                         u16 aligned_buf[64];
1384                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1385                         int items = len >> 1;
1386                         int i;
1387                         /* memcpy from input buffer into aligned buffer */
1388                         memcpy(aligned_buf, buf, len);
1389                         buf += len;
1390                         cnt -= len;
1391                         /* push data from aligned buffer into fifo */
1392                         for (i = 0; i < items; ++i)
1393                                 mci_writew(host, DATA(host->data_offset),
1394                                                 aligned_buf[i]);
1395                 }
1396         } else
1397 #endif
1398         {
1399                 u16 *pdata = buf;
1400                 for (; cnt >= 2; cnt -= 2)
1401                         mci_writew(host, DATA(host->data_offset), *pdata++);
1402                 buf = pdata;
1403         }
1404         /* put anything remaining in the part_buf */
1405         if (cnt) {
1406                 dw_mci_set_part_bytes(host, buf, cnt);
1407                  /* Push data if we have reached the expected data length */
1408                 if ((data->bytes_xfered + init_cnt) ==
1409                     (data->blksz * data->blocks))
1410                         mci_writew(host, DATA(host->data_offset),
1411                                    host->part_buf16);
1412         }
1413 }
1414
1415 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1416 {
1417 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1418         if (unlikely((unsigned long)buf & 0x1)) {
1419                 while (cnt >= 2) {
1420                         /* pull data from fifo into aligned buffer */
1421                         u16 aligned_buf[64];
1422                         int len = min(cnt & -2, (int)sizeof(aligned_buf));
1423                         int items = len >> 1;
1424                         int i;
1425                         for (i = 0; i < items; ++i)
1426                                 aligned_buf[i] = mci_readw(host,
1427                                                 DATA(host->data_offset));
1428                         /* memcpy from aligned buffer into output buffer */
1429                         memcpy(buf, aligned_buf, len);
1430                         buf += len;
1431                         cnt -= len;
1432                 }
1433         } else
1434 #endif
1435         {
1436                 u16 *pdata = buf;
1437                 for (; cnt >= 2; cnt -= 2)
1438                         *pdata++ = mci_readw(host, DATA(host->data_offset));
1439                 buf = pdata;
1440         }
1441         if (cnt) {
1442                 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1443                 dw_mci_pull_final_bytes(host, buf, cnt);
1444         }
1445 }
1446
1447 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1448 {
1449         struct mmc_data *data = host->data;
1450         int init_cnt = cnt;
1451
1452         /* try and push anything in the part_buf */
1453         if (unlikely(host->part_buf_count)) {
1454                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1455                 buf += len;
1456                 cnt -= len;
1457                 if (host->part_buf_count == 4) {
1458                         mci_writel(host, DATA(host->data_offset),
1459                                         host->part_buf32);
1460                         host->part_buf_count = 0;
1461                 }
1462         }
1463 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1464         if (unlikely((unsigned long)buf & 0x3)) {
1465                 while (cnt >= 4) {
1466                         u32 aligned_buf[32];
1467                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1468                         int items = len >> 2;
1469                         int i;
1470                         /* memcpy from input buffer into aligned buffer */
1471                         memcpy(aligned_buf, buf, len);
1472                         buf += len;
1473                         cnt -= len;
1474                         /* push data from aligned buffer into fifo */
1475                         for (i = 0; i < items; ++i)
1476                                 mci_writel(host, DATA(host->data_offset),
1477                                                 aligned_buf[i]);
1478                 }
1479         } else
1480 #endif
1481         {
1482                 u32 *pdata = buf;
1483                 for (; cnt >= 4; cnt -= 4)
1484                         mci_writel(host, DATA(host->data_offset), *pdata++);
1485                 buf = pdata;
1486         }
1487         /* put anything remaining in the part_buf */
1488         if (cnt) {
1489                 dw_mci_set_part_bytes(host, buf, cnt);
1490                  /* Push data if we have reached the expected data length */
1491                 if ((data->bytes_xfered + init_cnt) ==
1492                     (data->blksz * data->blocks))
1493                         mci_writel(host, DATA(host->data_offset),
1494                                    host->part_buf32);
1495         }
1496 }
1497
1498 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1499 {
1500 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1501         if (unlikely((unsigned long)buf & 0x3)) {
1502                 while (cnt >= 4) {
1503                         /* pull data from fifo into aligned buffer */
1504                         u32 aligned_buf[32];
1505                         int len = min(cnt & -4, (int)sizeof(aligned_buf));
1506                         int items = len >> 2;
1507                         int i;
1508                         for (i = 0; i < items; ++i)
1509                                 aligned_buf[i] = mci_readl(host,
1510                                                 DATA(host->data_offset));
1511                         /* memcpy from aligned buffer into output buffer */
1512                         memcpy(buf, aligned_buf, len);
1513                         buf += len;
1514                         cnt -= len;
1515                 }
1516         } else
1517 #endif
1518         {
1519                 u32 *pdata = buf;
1520                 for (; cnt >= 4; cnt -= 4)
1521                         *pdata++ = mci_readl(host, DATA(host->data_offset));
1522                 buf = pdata;
1523         }
1524         if (cnt) {
1525                 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1526                 dw_mci_pull_final_bytes(host, buf, cnt);
1527         }
1528 }
1529
1530 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1531 {
1532         struct mmc_data *data = host->data;
1533         int init_cnt = cnt;
1534
1535         /* try and push anything in the part_buf */
1536         if (unlikely(host->part_buf_count)) {
1537                 int len = dw_mci_push_part_bytes(host, buf, cnt);
1538                 buf += len;
1539                 cnt -= len;
1540
1541                 if (host->part_buf_count == 8) {
1542                         mci_writeq(host, DATA(host->data_offset),
1543                                         host->part_buf);
1544                         host->part_buf_count = 0;
1545                 }
1546         }
1547 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1548         if (unlikely((unsigned long)buf & 0x7)) {
1549                 while (cnt >= 8) {
1550                         u64 aligned_buf[16];
1551                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1552                         int items = len >> 3;
1553                         int i;
1554                         /* memcpy from input buffer into aligned buffer */
1555                         memcpy(aligned_buf, buf, len);
1556                         buf += len;
1557                         cnt -= len;
1558                         /* push data from aligned buffer into fifo */
1559                         for (i = 0; i < items; ++i)
1560                                 mci_writeq(host, DATA(host->data_offset),
1561                                                 aligned_buf[i]);
1562                 }
1563         } else
1564 #endif
1565         {
1566                 u64 *pdata = buf;
1567                 for (; cnt >= 8; cnt -= 8)
1568                         mci_writeq(host, DATA(host->data_offset), *pdata++);
1569                 buf = pdata;
1570         }
1571         /* put anything remaining in the part_buf */
1572         if (cnt) {
1573                 dw_mci_set_part_bytes(host, buf, cnt);
1574                 /* Push data if we have reached the expected data length */
1575                 if ((data->bytes_xfered + init_cnt) ==
1576                     (data->blksz * data->blocks))
1577                         mci_writeq(host, DATA(host->data_offset),
1578                                    host->part_buf);
1579         }
1580 }
1581
1582 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1583 {
1584 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1585         if (unlikely((unsigned long)buf & 0x7)) {
1586                 while (cnt >= 8) {
1587                         /* pull data from fifo into aligned buffer */
1588                         u64 aligned_buf[16];
1589                         int len = min(cnt & -8, (int)sizeof(aligned_buf));
1590                         int items = len >> 3;
1591                         int i;
1592                         for (i = 0; i < items; ++i)
1593                                 aligned_buf[i] = mci_readq(host,
1594                                                 DATA(host->data_offset));
1595                         /* memcpy from aligned buffer into output buffer */
1596                         memcpy(buf, aligned_buf, len);
1597                         buf += len;
1598                         cnt -= len;
1599                 }
1600         } else
1601 #endif
1602         {
1603                 u64 *pdata = buf;
1604                 for (; cnt >= 8; cnt -= 8)
1605                         *pdata++ = mci_readq(host, DATA(host->data_offset));
1606                 buf = pdata;
1607         }
1608         if (cnt) {
1609                 host->part_buf = mci_readq(host, DATA(host->data_offset));
1610                 dw_mci_pull_final_bytes(host, buf, cnt);
1611         }
1612 }
1613
1614 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1615 {
1616         int len;
1617
1618         /* get remaining partial bytes */
1619         len = dw_mci_pull_part_bytes(host, buf, cnt);
1620         if (unlikely(len == cnt))
1621                 return;
1622         buf += len;
1623         cnt -= len;
1624
1625         /* get the rest of the data */
1626         host->pull_data(host, buf, cnt);
1627 }
1628
1629 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1630 {
1631         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1632         void *buf;
1633         unsigned int offset;
1634         struct mmc_data *data = host->data;
1635         int shift = host->data_shift;
1636         u32 status;
1637         unsigned int len;
1638         unsigned int remain, fcnt;
1639
1640         do {
1641                 if (!sg_miter_next(sg_miter))
1642                         goto done;
1643
1644                 host->sg = sg_miter->piter.sg;
1645                 buf = sg_miter->addr;
1646                 remain = sg_miter->length;
1647                 offset = 0;
1648
1649                 do {
1650                         fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1651                                         << shift) + host->part_buf_count;
1652                         len = min(remain, fcnt);
1653                         if (!len)
1654                                 break;
1655                         dw_mci_pull_data(host, (void *)(buf + offset), len);
1656                         data->bytes_xfered += len;
1657                         offset += len;
1658                         remain -= len;
1659                 } while (remain);
1660
1661                 sg_miter->consumed = offset;
1662                 status = mci_readl(host, MINTSTS);
1663                 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1664         /* if the RXDR is ready read again */
1665         } while ((status & SDMMC_INT_RXDR) ||
1666                  (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1667
1668         if (!remain) {
1669                 if (!sg_miter_next(sg_miter))
1670                         goto done;
1671                 sg_miter->consumed = 0;
1672         }
1673         sg_miter_stop(sg_miter);
1674         return;
1675
1676 done:
1677         sg_miter_stop(sg_miter);
1678         host->sg = NULL;
1679         smp_wmb();
1680         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1681 }
1682
1683 static void dw_mci_write_data_pio(struct dw_mci *host)
1684 {
1685         struct sg_mapping_iter *sg_miter = &host->sg_miter;
1686         void *buf;
1687         unsigned int offset;
1688         struct mmc_data *data = host->data;
1689         int shift = host->data_shift;
1690         u32 status;
1691         unsigned int len;
1692         unsigned int fifo_depth = host->fifo_depth;
1693         unsigned int remain, fcnt;
1694
1695         do {
1696                 if (!sg_miter_next(sg_miter))
1697                         goto done;
1698
1699                 host->sg = sg_miter->piter.sg;
1700                 buf = sg_miter->addr;
1701                 remain = sg_miter->length;
1702                 offset = 0;
1703
1704                 do {
1705                         fcnt = ((fifo_depth -
1706                                  SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1707                                         << shift) - host->part_buf_count;
1708                         len = min(remain, fcnt);
1709                         if (!len)
1710                                 break;
1711                         host->push_data(host, (void *)(buf + offset), len);
1712                         data->bytes_xfered += len;
1713                         offset += len;
1714                         remain -= len;
1715                 } while (remain);
1716
1717                 sg_miter->consumed = offset;
1718                 status = mci_readl(host, MINTSTS);
1719                 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1720         } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1721
1722         if (!remain) {
1723                 if (!sg_miter_next(sg_miter))
1724                         goto done;
1725                 sg_miter->consumed = 0;
1726         }
1727         sg_miter_stop(sg_miter);
1728         return;
1729
1730 done:
1731         sg_miter_stop(sg_miter);
1732         host->sg = NULL;
1733         smp_wmb();
1734         set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1735 }
1736
1737 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1738 {
1739         if (!host->cmd_status)
1740                 host->cmd_status = status;
1741
1742         smp_wmb();
1743
1744         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1745         tasklet_schedule(&host->tasklet);
1746 }
1747
1748 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1749 {
1750         struct dw_mci *host = dev_id;
1751         u32 pending;
1752         int i;
1753
1754         pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1755
1756         /*
1757          * DTO fix - version 2.10a and below, and only if internal DMA
1758          * is configured.
1759          */
1760         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1761                 if (!pending &&
1762                     ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1763                         pending |= SDMMC_INT_DATA_OVER;
1764         }
1765
1766         if (pending) {
1767                 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1768                         mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1769                         host->cmd_status = pending;
1770                         smp_wmb();
1771                         set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1772                 }
1773
1774                 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1775                         /* if there is an error report DATA_ERROR */
1776                         mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1777                         host->data_status = pending;
1778                         smp_wmb();
1779                         set_bit(EVENT_DATA_ERROR, &host->pending_events);
1780                         tasklet_schedule(&host->tasklet);
1781                 }
1782
1783                 if (pending & SDMMC_INT_DATA_OVER) {
1784                         mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1785                         if (!host->data_status)
1786                                 host->data_status = pending;
1787                         smp_wmb();
1788                         if (host->dir_status == DW_MCI_RECV_STATUS) {
1789                                 if (host->sg != NULL)
1790                                         dw_mci_read_data_pio(host, true);
1791                         }
1792                         set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1793                         tasklet_schedule(&host->tasklet);
1794                 }
1795
1796                 if (pending & SDMMC_INT_RXDR) {
1797                         mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1798                         if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1799                                 dw_mci_read_data_pio(host, false);
1800                 }
1801
1802                 if (pending & SDMMC_INT_TXDR) {
1803                         mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1804                         if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1805                                 dw_mci_write_data_pio(host);
1806                 }
1807
1808                 if (pending & SDMMC_INT_CMD_DONE) {
1809                         mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1810                         dw_mci_cmd_interrupt(host, pending);
1811                 }
1812
1813                 if (pending & SDMMC_INT_CD) {
1814                         mci_writel(host, RINTSTS, SDMMC_INT_CD);
1815                         queue_work(host->card_workqueue, &host->card_work);
1816                 }
1817
1818                 /* Handle SDIO Interrupts */
1819                 for (i = 0; i < host->num_slots; i++) {
1820                         struct dw_mci_slot *slot = host->slot[i];
1821                         if (pending & SDMMC_INT_SDIO(i)) {
1822                                 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1823                                 mmc_signal_sdio_irq(slot->mmc);
1824                         }
1825                 }
1826
1827         }
1828
1829 #ifdef CONFIG_MMC_DW_IDMAC
1830         /* Handle DMA interrupts */
1831         pending = mci_readl(host, IDSTS);
1832         if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1833                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1834                 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1835                 host->dma_ops->complete(host);
1836         }
1837 #endif
1838
1839         return IRQ_HANDLED;
1840 }
1841
1842 static void dw_mci_work_routine_card(struct work_struct *work)
1843 {
1844         struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1845         int i;
1846
1847         for (i = 0; i < host->num_slots; i++) {
1848                 struct dw_mci_slot *slot = host->slot[i];
1849                 struct mmc_host *mmc = slot->mmc;
1850                 struct mmc_request *mrq;
1851                 int present;
1852                 u32 ctrl;
1853
1854                 present = dw_mci_get_cd(mmc);
1855                 while (present != slot->last_detect_state) {
1856                         dev_dbg(&slot->mmc->class_dev, "card %s\n",
1857                                 present ? "inserted" : "removed");
1858
1859                         spin_lock_bh(&host->lock);
1860
1861                         /* Card change detected */
1862                         slot->last_detect_state = present;
1863
1864                         /* Mark card as present if applicable */
1865                         if (present != 0)
1866                                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1867
1868                         /* Clean up queue if present */
1869                         mrq = slot->mrq;
1870                         if (mrq) {
1871                                 if (mrq == host->mrq) {
1872                                         host->data = NULL;
1873                                         host->cmd = NULL;
1874
1875                                         switch (host->state) {
1876                                         case STATE_IDLE:
1877                                                 break;
1878                                         case STATE_SENDING_CMD:
1879                                                 mrq->cmd->error = -ENOMEDIUM;
1880                                                 if (!mrq->data)
1881                                                         break;
1882                                                 /* fall through */
1883                                         case STATE_SENDING_DATA:
1884                                                 mrq->data->error = -ENOMEDIUM;
1885                                                 dw_mci_stop_dma(host);
1886                                                 break;
1887                                         case STATE_DATA_BUSY:
1888                                         case STATE_DATA_ERROR:
1889                                                 if (mrq->data->error == -EINPROGRESS)
1890                                                         mrq->data->error = -ENOMEDIUM;
1891                                                 if (!mrq->stop)
1892                                                         break;
1893                                                 /* fall through */
1894                                         case STATE_SENDING_STOP:
1895                                                 mrq->stop->error = -ENOMEDIUM;
1896                                                 break;
1897                                         }
1898
1899                                         dw_mci_request_end(host, mrq);
1900                                 } else {
1901                                         list_del(&slot->queue_node);
1902                                         mrq->cmd->error = -ENOMEDIUM;
1903                                         if (mrq->data)
1904                                                 mrq->data->error = -ENOMEDIUM;
1905                                         if (mrq->stop)
1906                                                 mrq->stop->error = -ENOMEDIUM;
1907
1908                                         spin_unlock(&host->lock);
1909                                         mmc_request_done(slot->mmc, mrq);
1910                                         spin_lock(&host->lock);
1911                                 }
1912                         }
1913
1914                         /* Power down slot */
1915                         if (present == 0) {
1916                                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1917
1918                                 /*
1919                                  * Clear down the FIFO - doing so generates a
1920                                  * block interrupt, hence setting the
1921                                  * scatter-gather pointer to NULL.
1922                                  */
1923                                 sg_miter_stop(&host->sg_miter);
1924                                 host->sg = NULL;
1925
1926                                 ctrl = mci_readl(host, CTRL);
1927                                 ctrl |= SDMMC_CTRL_FIFO_RESET;
1928                                 mci_writel(host, CTRL, ctrl);
1929
1930 #ifdef CONFIG_MMC_DW_IDMAC
1931                                 ctrl = mci_readl(host, BMOD);
1932                                 /* Software reset of DMA */
1933                                 ctrl |= SDMMC_IDMAC_SWRESET;
1934                                 mci_writel(host, BMOD, ctrl);
1935 #endif
1936
1937                         }
1938
1939                         spin_unlock_bh(&host->lock);
1940
1941                         present = dw_mci_get_cd(mmc);
1942                 }
1943
1944                 mmc_detect_change(slot->mmc,
1945                         msecs_to_jiffies(host->pdata->detect_delay_ms));
1946         }
1947 }
1948
1949 #ifdef CONFIG_OF
1950 /* given a slot id, find out the device node representing that slot */
1951 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1952 {
1953         struct device_node *np;
1954         const __be32 *addr;
1955         int len;
1956
1957         if (!dev || !dev->of_node)
1958                 return NULL;
1959
1960         for_each_child_of_node(dev->of_node, np) {
1961                 addr = of_get_property(np, "reg", &len);
1962                 if (!addr || (len < sizeof(int)))
1963                         continue;
1964                 if (be32_to_cpup(addr) == slot)
1965                         return np;
1966         }
1967         return NULL;
1968 }
1969
1970 static struct dw_mci_of_slot_quirks {
1971         char *quirk;
1972         int id;
1973 } of_slot_quirks[] = {
1974         {
1975                 .quirk  = "disable-wp",
1976                 .id     = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
1977         },
1978 };
1979
1980 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1981 {
1982         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1983         int quirks = 0;
1984         int idx;
1985
1986         /* get quirks */
1987         for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
1988                 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
1989                         quirks |= of_slot_quirks[idx].id;
1990
1991         return quirks;
1992 }
1993
1994 /* find out bus-width for a given slot */
1995 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1996 {
1997         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1998         u32 bus_wd = 1;
1999
2000         if (!np)
2001                 return 1;
2002
2003         if (of_property_read_u32(np, "bus-width", &bus_wd))
2004                 dev_err(dev, "bus-width property not found, assuming width"
2005                                " as 1\n");
2006         return bus_wd;
2007 }
2008
2009 /* find the write protect gpio for a given slot; or -1 if none specified */
2010 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2011 {
2012         struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2013         int gpio;
2014
2015         if (!np)
2016                 return -EINVAL;
2017
2018         gpio = of_get_named_gpio(np, "wp-gpios", 0);
2019
2020         /* Having a missing entry is valid; return silently */
2021         if (!gpio_is_valid(gpio))
2022                 return -EINVAL;
2023
2024         if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2025                 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2026                 return -EINVAL;
2027         }
2028
2029         return gpio;
2030 }
2031 #else /* CONFIG_OF */
2032 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2033 {
2034         return 0;
2035 }
2036 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2037 {
2038         return 1;
2039 }
2040 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2041 {
2042         return NULL;
2043 }
2044 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2045 {
2046         return -EINVAL;
2047 }
2048 #endif /* CONFIG_OF */
2049
2050 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2051 {
2052         struct mmc_host *mmc;
2053         struct dw_mci_slot *slot;
2054         const struct dw_mci_drv_data *drv_data = host->drv_data;
2055         int ctrl_id, ret;
2056         u32 freq[2];
2057         u8 bus_width;
2058
2059         mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2060         if (!mmc)
2061                 return -ENOMEM;
2062
2063         slot = mmc_priv(mmc);
2064         slot->id = id;
2065         slot->mmc = mmc;
2066         slot->host = host;
2067         host->slot[id] = slot;
2068
2069         slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2070
2071         mmc->ops = &dw_mci_ops;
2072         if (of_property_read_u32_array(host->dev->of_node,
2073                                        "clock-freq-min-max", freq, 2)) {
2074                 mmc->f_min = DW_MCI_FREQ_MIN;
2075                 mmc->f_max = DW_MCI_FREQ_MAX;
2076         } else {
2077                 mmc->f_min = freq[0];
2078                 mmc->f_max = freq[1];
2079         }
2080
2081         if (host->pdata->get_ocr)
2082                 mmc->ocr_avail = host->pdata->get_ocr(id);
2083         else
2084                 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2085
2086         /*
2087          * Start with slot power disabled, it will be enabled when a card
2088          * is detected.
2089          */
2090         if (host->pdata->setpower)
2091                 host->pdata->setpower(id, 0);
2092
2093         if (host->pdata->caps)
2094                 mmc->caps = host->pdata->caps;
2095
2096         if (host->pdata->pm_caps)
2097                 mmc->pm_caps = host->pdata->pm_caps;
2098
2099         if (host->dev->of_node) {
2100                 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2101                 if (ctrl_id < 0)
2102                         ctrl_id = 0;
2103         } else {
2104                 ctrl_id = to_platform_device(host->dev)->id;
2105         }
2106         if (drv_data && drv_data->caps)
2107                 mmc->caps |= drv_data->caps[ctrl_id];
2108
2109         if (host->pdata->caps2)
2110                 mmc->caps2 = host->pdata->caps2;
2111
2112         if (host->pdata->get_bus_wd)
2113                 bus_width = host->pdata->get_bus_wd(slot->id);
2114         else if (host->dev->of_node)
2115                 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2116         else
2117                 bus_width = 1;
2118
2119         switch (bus_width) {
2120         case 8:
2121                 mmc->caps |= MMC_CAP_8_BIT_DATA;
2122         case 4:
2123                 mmc->caps |= MMC_CAP_4_BIT_DATA;
2124         }
2125
2126         if (host->pdata->blk_settings) {
2127                 mmc->max_segs = host->pdata->blk_settings->max_segs;
2128                 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2129                 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2130                 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2131                 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2132         } else {
2133                 /* Useful defaults if platform data is unset. */
2134 #ifdef CONFIG_MMC_DW_IDMAC
2135                 mmc->max_segs = host->ring_size;
2136                 mmc->max_blk_size = 65536;
2137                 mmc->max_blk_count = host->ring_size;
2138                 mmc->max_seg_size = 0x1000;
2139                 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2140 #else
2141                 mmc->max_segs = 64;
2142                 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2143                 mmc->max_blk_count = 512;
2144                 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2145                 mmc->max_seg_size = mmc->max_req_size;
2146 #endif /* CONFIG_MMC_DW_IDMAC */
2147         }
2148
2149         if (dw_mci_get_cd(mmc))
2150                 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2151         else
2152                 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2153
2154         slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2155
2156         ret = mmc_add_host(mmc);
2157         if (ret)
2158                 goto err_setup_bus;
2159
2160 #if defined(CONFIG_DEBUG_FS)
2161         dw_mci_init_debugfs(slot);
2162 #endif
2163
2164         /* Card initially undetected */
2165         slot->last_detect_state = 0;
2166
2167         return 0;
2168
2169 err_setup_bus:
2170         mmc_free_host(mmc);
2171         return -EINVAL;
2172 }
2173
2174 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2175 {
2176         /* Shutdown detect IRQ */
2177         if (slot->host->pdata->exit)
2178                 slot->host->pdata->exit(id);
2179
2180         /* Debugfs stuff is cleaned up by mmc core */
2181         mmc_remove_host(slot->mmc);
2182         slot->host->slot[id] = NULL;
2183         mmc_free_host(slot->mmc);
2184 }
2185
2186 static void dw_mci_init_dma(struct dw_mci *host)
2187 {
2188         /* Alloc memory for sg translation */
2189         host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2190                                           &host->sg_dma, GFP_KERNEL);
2191         if (!host->sg_cpu) {
2192                 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2193                         __func__);
2194                 goto no_dma;
2195         }
2196
2197         /* Determine which DMA interface to use */
2198 #ifdef CONFIG_MMC_DW_IDMAC
2199         host->dma_ops = &dw_mci_idmac_ops;
2200         dev_info(host->dev, "Using internal DMA controller.\n");
2201 #endif
2202
2203         if (!host->dma_ops)
2204                 goto no_dma;
2205
2206         if (host->dma_ops->init && host->dma_ops->start &&
2207             host->dma_ops->stop && host->dma_ops->cleanup) {
2208                 if (host->dma_ops->init(host)) {
2209                         dev_err(host->dev, "%s: Unable to initialize "
2210                                 "DMA Controller.\n", __func__);
2211                         goto no_dma;
2212                 }
2213         } else {
2214                 dev_err(host->dev, "DMA initialization not found.\n");
2215                 goto no_dma;
2216         }
2217
2218         host->use_dma = 1;
2219         return;
2220
2221 no_dma:
2222         dev_info(host->dev, "Using PIO mode.\n");
2223         host->use_dma = 0;
2224         return;
2225 }
2226
2227 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2228 {
2229         unsigned long timeout = jiffies + msecs_to_jiffies(500);
2230         unsigned int ctrl;
2231
2232         mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2233                                 SDMMC_CTRL_DMA_RESET));
2234
2235         /* wait till resets clear */
2236         do {
2237                 ctrl = mci_readl(host, CTRL);
2238                 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2239                               SDMMC_CTRL_DMA_RESET)))
2240                         return true;
2241         } while (time_before(jiffies, timeout));
2242
2243         dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2244
2245         return false;
2246 }
2247
2248 #ifdef CONFIG_OF
2249 static struct dw_mci_of_quirks {
2250         char *quirk;
2251         int id;
2252 } of_quirks[] = {
2253         {
2254                 .quirk  = "broken-cd",
2255                 .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2256         },
2257 };
2258
2259 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2260 {
2261         struct dw_mci_board *pdata;
2262         struct device *dev = host->dev;
2263         struct device_node *np = dev->of_node;
2264         const struct dw_mci_drv_data *drv_data = host->drv_data;
2265         int idx, ret;
2266         u32 clock_frequency;
2267
2268         pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2269         if (!pdata) {
2270                 dev_err(dev, "could not allocate memory for pdata\n");
2271                 return ERR_PTR(-ENOMEM);
2272         }
2273
2274         /* find out number of slots supported */
2275         if (of_property_read_u32(dev->of_node, "num-slots",
2276                                 &pdata->num_slots)) {
2277                 dev_info(dev, "num-slots property not found, "
2278                                 "assuming 1 slot is available\n");
2279                 pdata->num_slots = 1;
2280         }
2281
2282         /* get quirks */
2283         for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2284                 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2285                         pdata->quirks |= of_quirks[idx].id;
2286
2287         if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2288                 dev_info(dev, "fifo-depth property not found, using "
2289                                 "value of FIFOTH register as default\n");
2290
2291         of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2292
2293         if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2294                 pdata->bus_hz = clock_frequency;
2295
2296         if (drv_data && drv_data->parse_dt) {
2297                 ret = drv_data->parse_dt(host);
2298                 if (ret)
2299                         return ERR_PTR(ret);
2300         }
2301
2302         if (of_find_property(np, "keep-power-in-suspend", NULL))
2303                 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2304
2305         if (of_find_property(np, "enable-sdio-wakeup", NULL))
2306                 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2307
2308         if (of_find_property(np, "supports-highspeed", NULL))
2309                 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2310
2311         if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2312                 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2313
2314         if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2315                 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2316
2317         return pdata;
2318 }
2319
2320 #else /* CONFIG_OF */
2321 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2322 {
2323         return ERR_PTR(-EINVAL);
2324 }
2325 #endif /* CONFIG_OF */
2326
2327 int dw_mci_probe(struct dw_mci *host)
2328 {
2329         const struct dw_mci_drv_data *drv_data = host->drv_data;
2330         int width, i, ret = 0;
2331         u32 fifo_size;
2332         int init_slots = 0;
2333
2334         if (!host->pdata) {
2335                 host->pdata = dw_mci_parse_dt(host);
2336                 if (IS_ERR(host->pdata)) {
2337                         dev_err(host->dev, "platform data not available\n");
2338                         return -EINVAL;
2339                 }
2340         }
2341
2342         if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2343                 dev_err(host->dev,
2344                         "Platform data must supply select_slot function\n");
2345                 return -ENODEV;
2346         }
2347
2348         host->biu_clk = devm_clk_get(host->dev, "biu");
2349         if (IS_ERR(host->biu_clk)) {
2350                 dev_dbg(host->dev, "biu clock not available\n");
2351         } else {
2352                 ret = clk_prepare_enable(host->biu_clk);
2353                 if (ret) {
2354                         dev_err(host->dev, "failed to enable biu clock\n");
2355                         return ret;
2356                 }
2357         }
2358
2359         host->ciu_clk = devm_clk_get(host->dev, "ciu");
2360         if (IS_ERR(host->ciu_clk)) {
2361                 dev_dbg(host->dev, "ciu clock not available\n");
2362                 host->bus_hz = host->pdata->bus_hz;
2363         } else {
2364                 ret = clk_prepare_enable(host->ciu_clk);
2365                 if (ret) {
2366                         dev_err(host->dev, "failed to enable ciu clock\n");
2367                         goto err_clk_biu;
2368                 }
2369
2370                 if (host->pdata->bus_hz) {
2371                         ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2372                         if (ret)
2373                                 dev_warn(host->dev,
2374                                          "Unable to set bus rate to %ul\n",
2375                                          host->pdata->bus_hz);
2376                 }
2377                 host->bus_hz = clk_get_rate(host->ciu_clk);
2378         }
2379
2380         if (drv_data && drv_data->init) {
2381                 ret = drv_data->init(host);
2382                 if (ret) {
2383                         dev_err(host->dev,
2384                                 "implementation specific init failed\n");
2385                         goto err_clk_ciu;
2386                 }
2387         }
2388
2389         if (drv_data && drv_data->setup_clock) {
2390                 ret = drv_data->setup_clock(host);
2391                 if (ret) {
2392                         dev_err(host->dev,
2393                                 "implementation specific clock setup failed\n");
2394                         goto err_clk_ciu;
2395                 }
2396         }
2397
2398         host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2399         if (IS_ERR(host->vmmc)) {
2400                 ret = PTR_ERR(host->vmmc);
2401                 if (ret == -EPROBE_DEFER)
2402                         goto err_clk_ciu;
2403
2404                 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2405                 host->vmmc = NULL;
2406         } else {
2407                 ret = regulator_enable(host->vmmc);
2408                 if (ret) {
2409                         if (ret != -EPROBE_DEFER)
2410                                 dev_err(host->dev,
2411                                         "regulator_enable fail: %d\n", ret);
2412                         goto err_clk_ciu;
2413                 }
2414         }
2415
2416         if (!host->bus_hz) {
2417                 dev_err(host->dev,
2418                         "Platform data must supply bus speed\n");
2419                 ret = -ENODEV;
2420                 goto err_regulator;
2421         }
2422
2423         host->quirks = host->pdata->quirks;
2424
2425         spin_lock_init(&host->lock);
2426         INIT_LIST_HEAD(&host->queue);
2427
2428         /*
2429          * Get the host data width - this assumes that HCON has been set with
2430          * the correct values.
2431          */
2432         i = (mci_readl(host, HCON) >> 7) & 0x7;
2433         if (!i) {
2434                 host->push_data = dw_mci_push_data16;
2435                 host->pull_data = dw_mci_pull_data16;
2436                 width = 16;
2437                 host->data_shift = 1;
2438         } else if (i == 2) {
2439                 host->push_data = dw_mci_push_data64;
2440                 host->pull_data = dw_mci_pull_data64;
2441                 width = 64;
2442                 host->data_shift = 3;
2443         } else {
2444                 /* Check for a reserved value, and warn if it is */
2445                 WARN((i != 1),
2446                      "HCON reports a reserved host data width!\n"
2447                      "Defaulting to 32-bit access.\n");
2448                 host->push_data = dw_mci_push_data32;
2449                 host->pull_data = dw_mci_pull_data32;
2450                 width = 32;
2451                 host->data_shift = 2;
2452         }
2453
2454         /* Reset all blocks */
2455         if (!mci_wait_reset(host->dev, host))
2456                 return -ENODEV;
2457
2458         host->dma_ops = host->pdata->dma_ops;
2459         dw_mci_init_dma(host);
2460
2461         /* Clear the interrupts for the host controller */
2462         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2463         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2464
2465         /* Put in max timeout */
2466         mci_writel(host, TMOUT, 0xFFFFFFFF);
2467
2468         /*
2469          * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
2470          *                          Tx Mark = fifo_size / 2 DMA Size = 8
2471          */
2472         if (!host->pdata->fifo_depth) {
2473                 /*
2474                  * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2475                  * have been overwritten by the bootloader, just like we're
2476                  * about to do, so if you know the value for your hardware, you
2477                  * should put it in the platform data.
2478                  */
2479                 fifo_size = mci_readl(host, FIFOTH);
2480                 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2481         } else {
2482                 fifo_size = host->pdata->fifo_depth;
2483         }
2484         host->fifo_depth = fifo_size;
2485         host->fifoth_val =
2486                 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2487         mci_writel(host, FIFOTH, host->fifoth_val);
2488
2489         /* disable clock to CIU */
2490         mci_writel(host, CLKENA, 0);
2491         mci_writel(host, CLKSRC, 0);
2492
2493         /*
2494          * In 2.40a spec, Data offset is changed.
2495          * Need to check the version-id and set data-offset for DATA register.
2496          */
2497         host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2498         dev_info(host->dev, "Version ID is %04x\n", host->verid);
2499
2500         if (host->verid < DW_MMC_240A)
2501                 host->data_offset = DATA_OFFSET;
2502         else
2503                 host->data_offset = DATA_240A_OFFSET;
2504
2505         tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2506         host->card_workqueue = alloc_workqueue("dw-mci-card",
2507                         WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2508         if (!host->card_workqueue) {
2509                 ret = -ENOMEM;
2510                 goto err_dmaunmap;
2511         }
2512         INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2513         ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2514                                host->irq_flags, "dw-mci", host);
2515         if (ret)
2516                 goto err_workqueue;
2517
2518         if (host->pdata->num_slots)
2519                 host->num_slots = host->pdata->num_slots;
2520         else
2521                 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2522
2523         /*
2524          * Enable interrupts for command done, data over, data empty, card det,
2525          * receive ready and error such as transmit, receive timeout, crc error
2526          */
2527         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2528         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2529                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2530                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2531         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2532
2533         dev_info(host->dev, "DW MMC controller at irq %d, "
2534                  "%d bit host data width, "
2535                  "%u deep fifo\n",
2536                  host->irq, width, fifo_size);
2537
2538         /* We need at least one slot to succeed */
2539         for (i = 0; i < host->num_slots; i++) {
2540                 ret = dw_mci_init_slot(host, i);
2541                 if (ret)
2542                         dev_dbg(host->dev, "slot %d init failed\n", i);
2543                 else
2544                         init_slots++;
2545         }
2546
2547         if (init_slots) {
2548                 dev_info(host->dev, "%d slots initialized\n", init_slots);
2549         } else {
2550                 dev_dbg(host->dev, "attempted to initialize %d slots, "
2551                                         "but failed on all\n", host->num_slots);
2552                 goto err_workqueue;
2553         }
2554
2555         if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2556                 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2557
2558         return 0;
2559
2560 err_workqueue:
2561         destroy_workqueue(host->card_workqueue);
2562
2563 err_dmaunmap:
2564         if (host->use_dma && host->dma_ops->exit)
2565                 host->dma_ops->exit(host);
2566
2567 err_regulator:
2568         if (host->vmmc)
2569                 regulator_disable(host->vmmc);
2570
2571 err_clk_ciu:
2572         if (!IS_ERR(host->ciu_clk))
2573                 clk_disable_unprepare(host->ciu_clk);
2574
2575 err_clk_biu:
2576         if (!IS_ERR(host->biu_clk))
2577                 clk_disable_unprepare(host->biu_clk);
2578
2579         return ret;
2580 }
2581 EXPORT_SYMBOL(dw_mci_probe);
2582
2583 void dw_mci_remove(struct dw_mci *host)
2584 {
2585         int i;
2586
2587         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2588         mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2589
2590         for (i = 0; i < host->num_slots; i++) {
2591                 dev_dbg(host->dev, "remove slot %d\n", i);
2592                 if (host->slot[i])
2593                         dw_mci_cleanup_slot(host->slot[i], i);
2594         }
2595
2596         /* disable clock to CIU */
2597         mci_writel(host, CLKENA, 0);
2598         mci_writel(host, CLKSRC, 0);
2599
2600         destroy_workqueue(host->card_workqueue);
2601
2602         if (host->use_dma && host->dma_ops->exit)
2603                 host->dma_ops->exit(host);
2604
2605         if (host->vmmc)
2606                 regulator_disable(host->vmmc);
2607
2608         if (!IS_ERR(host->ciu_clk))
2609                 clk_disable_unprepare(host->ciu_clk);
2610
2611         if (!IS_ERR(host->biu_clk))
2612                 clk_disable_unprepare(host->biu_clk);
2613 }
2614 EXPORT_SYMBOL(dw_mci_remove);
2615
2616
2617
2618 #ifdef CONFIG_PM_SLEEP
2619 /*
2620  * TODO: we should probably disable the clock to the card in the suspend path.
2621  */
2622 int dw_mci_suspend(struct dw_mci *host)
2623 {
2624         int i, ret = 0;
2625
2626         for (i = 0; i < host->num_slots; i++) {
2627                 struct dw_mci_slot *slot = host->slot[i];
2628                 if (!slot)
2629                         continue;
2630                 ret = mmc_suspend_host(slot->mmc);
2631                 if (ret < 0) {
2632                         while (--i >= 0) {
2633                                 slot = host->slot[i];
2634                                 if (slot)
2635                                         mmc_resume_host(host->slot[i]->mmc);
2636                         }
2637                         return ret;
2638                 }
2639         }
2640
2641         if (host->vmmc)
2642                 regulator_disable(host->vmmc);
2643
2644         return 0;
2645 }
2646 EXPORT_SYMBOL(dw_mci_suspend);
2647
2648 int dw_mci_resume(struct dw_mci *host)
2649 {
2650         int i, ret;
2651
2652         if (host->vmmc) {
2653                 ret = regulator_enable(host->vmmc);
2654                 if (ret) {
2655                         dev_err(host->dev,
2656                                 "failed to enable regulator: %d\n", ret);
2657                         return ret;
2658                 }
2659         }
2660
2661         if (!mci_wait_reset(host->dev, host)) {
2662                 ret = -ENODEV;
2663                 return ret;
2664         }
2665
2666         if (host->use_dma && host->dma_ops->init)
2667                 host->dma_ops->init(host);
2668
2669         /*
2670          * Restore the initial value at FIFOTH register
2671          * And Invalidate the prev_blksz with zero
2672          */
2673         mci_writel(host, FIFOTH, host->fifoth_val);
2674         host->prev_blksz = 0;
2675
2676         /* Put in max timeout */
2677         mci_writel(host, TMOUT, 0xFFFFFFFF);
2678
2679         mci_writel(host, RINTSTS, 0xFFFFFFFF);
2680         mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2681                    SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2682                    DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2683         mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2684
2685         for (i = 0; i < host->num_slots; i++) {
2686                 struct dw_mci_slot *slot = host->slot[i];
2687                 if (!slot)
2688                         continue;
2689                 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2690                         dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2691                         dw_mci_setup_bus(slot, true);
2692                 }
2693
2694                 ret = mmc_resume_host(host->slot[i]->mmc);
2695                 if (ret < 0)
2696                         return ret;
2697         }
2698         return 0;
2699 }
2700 EXPORT_SYMBOL(dw_mci_resume);
2701 #endif /* CONFIG_PM_SLEEP */
2702
2703 static int __init dw_mci_init(void)
2704 {
2705         pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2706         return 0;
2707 }
2708
2709 static void __exit dw_mci_exit(void)
2710 {
2711 }
2712
2713 module_init(dw_mci_init);
2714 module_exit(dw_mci_exit);
2715
2716 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2717 MODULE_AUTHOR("NXP Semiconductor VietNam");
2718 MODULE_AUTHOR("Imagination Technologies Ltd");
2719 MODULE_LICENSE("GPL v2");