2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/blkdev.h>
15 #include <linux/clk.h>
16 #include <linux/debugfs.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/seq_file.h>
26 #include <linux/slab.h>
27 #include <linux/stat.h>
28 #include <linux/delay.h>
29 #include <linux/irq.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/dw_mmc.h>
33 #include <linux/bitops.h>
34 #include <linux/regulator/consumer.h>
35 #include <linux/workqueue.h>
37 #include <linux/of_gpio.h>
41 /* Common flag combinations */
42 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
43 SDMMC_INT_HTO | SDMMC_INT_SBE | \
45 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
47 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
48 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
49 #define DW_MCI_SEND_STATUS 1
50 #define DW_MCI_RECV_STATUS 2
51 #define DW_MCI_DMA_THRESHOLD 16
53 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
54 #define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
56 #ifdef CONFIG_MMC_DW_IDMAC
57 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
58 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
59 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
63 u32 des0; /* Control Descriptor */
64 #define IDMAC_DES0_DIC BIT(1)
65 #define IDMAC_DES0_LD BIT(2)
66 #define IDMAC_DES0_FD BIT(3)
67 #define IDMAC_DES0_CH BIT(4)
68 #define IDMAC_DES0_ER BIT(5)
69 #define IDMAC_DES0_CES BIT(30)
70 #define IDMAC_DES0_OWN BIT(31)
72 u32 des1; /* Buffer sizes */
73 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
74 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
76 u32 des2; /* buffer 1 physical address */
78 u32 des3; /* buffer 2 physical address */
80 #endif /* CONFIG_MMC_DW_IDMAC */
82 static const u8 tuning_blk_pattern_4bit[] = {
83 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
84 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
85 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
86 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
87 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
88 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
89 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
90 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
93 static const u8 tuning_blk_pattern_8bit[] = {
94 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
95 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
96 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
97 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
98 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
99 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
100 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
101 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
102 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
103 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
104 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
105 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
106 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
107 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
108 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
109 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
112 #if defined(CONFIG_DEBUG_FS)
113 static int dw_mci_req_show(struct seq_file *s, void *v)
115 struct dw_mci_slot *slot = s->private;
116 struct mmc_request *mrq;
117 struct mmc_command *cmd;
118 struct mmc_command *stop;
119 struct mmc_data *data;
121 /* Make sure we get a consistent snapshot */
122 spin_lock_bh(&slot->host->lock);
132 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
133 cmd->opcode, cmd->arg, cmd->flags,
134 cmd->resp[0], cmd->resp[1], cmd->resp[2],
135 cmd->resp[2], cmd->error);
137 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
138 data->bytes_xfered, data->blocks,
139 data->blksz, data->flags, data->error);
142 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
143 stop->opcode, stop->arg, stop->flags,
144 stop->resp[0], stop->resp[1], stop->resp[2],
145 stop->resp[2], stop->error);
148 spin_unlock_bh(&slot->host->lock);
153 static int dw_mci_req_open(struct inode *inode, struct file *file)
155 return single_open(file, dw_mci_req_show, inode->i_private);
158 static const struct file_operations dw_mci_req_fops = {
159 .owner = THIS_MODULE,
160 .open = dw_mci_req_open,
163 .release = single_release,
166 static int dw_mci_regs_show(struct seq_file *s, void *v)
168 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
169 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
170 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
171 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
172 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
173 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
178 static int dw_mci_regs_open(struct inode *inode, struct file *file)
180 return single_open(file, dw_mci_regs_show, inode->i_private);
183 static const struct file_operations dw_mci_regs_fops = {
184 .owner = THIS_MODULE,
185 .open = dw_mci_regs_open,
188 .release = single_release,
191 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
193 struct mmc_host *mmc = slot->mmc;
194 struct dw_mci *host = slot->host;
198 root = mmc->debugfs_root;
202 node = debugfs_create_file("regs", S_IRUSR, root, host,
207 node = debugfs_create_file("req", S_IRUSR, root, slot,
212 node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
216 node = debugfs_create_x32("pending_events", S_IRUSR, root,
217 (u32 *)&host->pending_events);
221 node = debugfs_create_x32("completed_events", S_IRUSR, root,
222 (u32 *)&host->completed_events);
229 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
231 #endif /* defined(CONFIG_DEBUG_FS) */
233 static void dw_mci_set_timeout(struct dw_mci *host)
235 /* timeout (maximum) */
236 mci_writel(host, TMOUT, 0xffffffff);
239 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
241 struct mmc_data *data;
242 struct dw_mci_slot *slot = mmc_priv(mmc);
243 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
245 cmd->error = -EINPROGRESS;
249 if (cmdr == MMC_STOP_TRANSMISSION)
250 cmdr |= SDMMC_CMD_STOP;
252 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
254 if (cmd->flags & MMC_RSP_PRESENT) {
255 /* We expect a response, so set this bit */
256 cmdr |= SDMMC_CMD_RESP_EXP;
257 if (cmd->flags & MMC_RSP_136)
258 cmdr |= SDMMC_CMD_RESP_LONG;
261 if (cmd->flags & MMC_RSP_CRC)
262 cmdr |= SDMMC_CMD_RESP_CRC;
266 cmdr |= SDMMC_CMD_DAT_EXP;
267 if (data->flags & MMC_DATA_STREAM)
268 cmdr |= SDMMC_CMD_STRM_MODE;
269 if (data->flags & MMC_DATA_WRITE)
270 cmdr |= SDMMC_CMD_DAT_WR;
273 if (drv_data && drv_data->prepare_command)
274 drv_data->prepare_command(slot->host, &cmdr);
279 static void dw_mci_start_command(struct dw_mci *host,
280 struct mmc_command *cmd, u32 cmd_flags)
284 "start command: ARGR=0x%08x CMDR=0x%08x\n",
285 cmd->arg, cmd_flags);
287 mci_writel(host, CMDARG, cmd->arg);
290 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
293 static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
295 dw_mci_start_command(host, data->stop, host->stop_cmdr);
298 /* DMA interface functions */
299 static void dw_mci_stop_dma(struct dw_mci *host)
301 if (host->using_dma) {
302 host->dma_ops->stop(host);
303 host->dma_ops->cleanup(host);
305 /* Data transfer was stopped by the interrupt handler */
306 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
310 static int dw_mci_get_dma_dir(struct mmc_data *data)
312 if (data->flags & MMC_DATA_WRITE)
313 return DMA_TO_DEVICE;
315 return DMA_FROM_DEVICE;
318 #ifdef CONFIG_MMC_DW_IDMAC
319 static void dw_mci_dma_cleanup(struct dw_mci *host)
321 struct mmc_data *data = host->data;
324 if (!data->host_cookie)
325 dma_unmap_sg(host->dev,
328 dw_mci_get_dma_dir(data));
331 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
335 /* Disable and reset the IDMAC interface */
336 temp = mci_readl(host, CTRL);
337 temp &= ~SDMMC_CTRL_USE_IDMAC;
338 temp |= SDMMC_CTRL_DMA_RESET;
339 mci_writel(host, CTRL, temp);
341 /* Stop the IDMAC running */
342 temp = mci_readl(host, BMOD);
343 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
344 mci_writel(host, BMOD, temp);
347 static void dw_mci_idmac_complete_dma(struct dw_mci *host)
349 struct mmc_data *data = host->data;
351 dev_vdbg(host->dev, "DMA complete\n");
353 host->dma_ops->cleanup(host);
356 * If the card was removed, data will be NULL. No point in trying to
357 * send the stop command or waiting for NBUSY in this case.
360 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
361 tasklet_schedule(&host->tasklet);
365 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
369 struct idmac_desc *desc = host->sg_cpu;
371 for (i = 0; i < sg_len; i++, desc++) {
372 unsigned int length = sg_dma_len(&data->sg[i]);
373 u32 mem_addr = sg_dma_address(&data->sg[i]);
375 /* Set the OWN bit and disable interrupts for this descriptor */
376 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
379 IDMAC_SET_BUFFER1_SIZE(desc, length);
381 /* Physical address to DMA to/from */
382 desc->des2 = mem_addr;
385 /* Set first descriptor */
387 desc->des0 |= IDMAC_DES0_FD;
389 /* Set last descriptor */
390 desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
391 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
392 desc->des0 |= IDMAC_DES0_LD;
397 static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
401 dw_mci_translate_sglist(host, host->data, sg_len);
403 /* Select IDMAC interface */
404 temp = mci_readl(host, CTRL);
405 temp |= SDMMC_CTRL_USE_IDMAC;
406 mci_writel(host, CTRL, temp);
410 /* Enable the IDMAC */
411 temp = mci_readl(host, BMOD);
412 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
413 mci_writel(host, BMOD, temp);
415 /* Start it running */
416 mci_writel(host, PLDMND, 1);
419 static int dw_mci_idmac_init(struct dw_mci *host)
421 struct idmac_desc *p;
424 /* Number of descriptors in the ring buffer */
425 host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
427 /* Forward link the descriptor list */
428 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
429 p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
431 /* Set the last descriptor as the end-of-ring descriptor */
432 p->des3 = host->sg_dma;
433 p->des0 = IDMAC_DES0_ER;
435 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
437 /* Mask out interrupts - get Tx & Rx complete only */
438 mci_writel(host, IDSTS, IDMAC_INT_CLR);
439 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
442 /* Set the descriptor base address */
443 mci_writel(host, DBADDR, host->sg_dma);
447 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
448 .init = dw_mci_idmac_init,
449 .start = dw_mci_idmac_start_dma,
450 .stop = dw_mci_idmac_stop_dma,
451 .complete = dw_mci_idmac_complete_dma,
452 .cleanup = dw_mci_dma_cleanup,
454 #endif /* CONFIG_MMC_DW_IDMAC */
456 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
457 struct mmc_data *data,
460 struct scatterlist *sg;
461 unsigned int i, sg_len;
463 if (!next && data->host_cookie)
464 return data->host_cookie;
467 * We don't do DMA on "complex" transfers, i.e. with
468 * non-word-aligned buffers or lengths. Also, we don't bother
469 * with all the DMA setup overhead for short transfers.
471 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
477 for_each_sg(data->sg, sg, data->sg_len, i) {
478 if (sg->offset & 3 || sg->length & 3)
482 sg_len = dma_map_sg(host->dev,
485 dw_mci_get_dma_dir(data));
490 data->host_cookie = sg_len;
495 static void dw_mci_pre_req(struct mmc_host *mmc,
496 struct mmc_request *mrq,
499 struct dw_mci_slot *slot = mmc_priv(mmc);
500 struct mmc_data *data = mrq->data;
502 if (!slot->host->use_dma || !data)
505 if (data->host_cookie) {
506 data->host_cookie = 0;
510 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
511 data->host_cookie = 0;
514 static void dw_mci_post_req(struct mmc_host *mmc,
515 struct mmc_request *mrq,
518 struct dw_mci_slot *slot = mmc_priv(mmc);
519 struct mmc_data *data = mrq->data;
521 if (!slot->host->use_dma || !data)
524 if (data->host_cookie)
525 dma_unmap_sg(slot->host->dev,
528 dw_mci_get_dma_dir(data));
529 data->host_cookie = 0;
532 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
534 #ifdef CONFIG_MMC_DW_IDMAC
535 unsigned int blksz = data->blksz;
536 const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
537 u32 fifo_width = 1 << host->data_shift;
538 u32 blksz_depth = blksz / fifo_width, fifoth_val;
539 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
540 int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1;
542 tx_wmark = (host->fifo_depth) / 2;
543 tx_wmark_invers = host->fifo_depth - tx_wmark;
547 * if blksz is not a multiple of the FIFO width
549 if (blksz % fifo_width) {
556 if (!((blksz_depth % mszs[idx]) ||
557 (tx_wmark_invers % mszs[idx]))) {
559 rx_wmark = mszs[idx] - 1;
564 * If idx is '0', it won't be tried
565 * Thus, initial values are uesed
568 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
569 mci_writel(host, FIFOTH, fifoth_val);
573 static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
575 unsigned int blksz = data->blksz;
576 u32 blksz_depth, fifo_depth;
579 WARN_ON(!(data->flags & MMC_DATA_READ));
581 if (host->timing != MMC_TIMING_MMC_HS200 &&
582 host->timing != MMC_TIMING_UHS_SDR104)
585 blksz_depth = blksz / (1 << host->data_shift);
586 fifo_depth = host->fifo_depth;
588 if (blksz_depth > fifo_depth)
592 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
593 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
594 * Currently just choose blksz.
597 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1));
601 mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0));
604 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
611 /* If we don't have a channel, we can't do DMA */
615 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
617 host->dma_ops->stop(host);
624 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
625 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
629 * Decide the MSIZE and RX/TX Watermark.
630 * If current block size is same with previous size,
631 * no need to update fifoth.
633 if (host->prev_blksz != data->blksz)
634 dw_mci_adjust_fifoth(host, data);
636 /* Enable the DMA interface */
637 temp = mci_readl(host, CTRL);
638 temp |= SDMMC_CTRL_DMA_ENABLE;
639 mci_writel(host, CTRL, temp);
641 /* Disable RX/TX IRQs, let DMA handle it */
642 temp = mci_readl(host, INTMASK);
643 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
644 mci_writel(host, INTMASK, temp);
646 host->dma_ops->start(host, sg_len);
651 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
655 data->error = -EINPROGRESS;
661 if (data->flags & MMC_DATA_READ) {
662 host->dir_status = DW_MCI_RECV_STATUS;
663 dw_mci_ctrl_rd_thld(host, data);
665 host->dir_status = DW_MCI_SEND_STATUS;
668 if (dw_mci_submit_data_dma(host, data)) {
669 int flags = SG_MITER_ATOMIC;
670 if (host->data->flags & MMC_DATA_READ)
671 flags |= SG_MITER_TO_SG;
673 flags |= SG_MITER_FROM_SG;
675 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
677 host->part_buf_start = 0;
678 host->part_buf_count = 0;
680 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
681 temp = mci_readl(host, INTMASK);
682 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
683 mci_writel(host, INTMASK, temp);
685 temp = mci_readl(host, CTRL);
686 temp &= ~SDMMC_CTRL_DMA_ENABLE;
687 mci_writel(host, CTRL, temp);
690 * Use the initial fifoth_val for PIO mode.
691 * If next issued data may be transfered by DMA mode,
692 * prev_blksz should be invalidated.
694 mci_writel(host, FIFOTH, host->fifoth_val);
695 host->prev_blksz = 0;
698 * Keep the current block size.
699 * It will be used to decide whether to update
700 * fifoth register next time.
702 host->prev_blksz = data->blksz;
706 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
708 struct dw_mci *host = slot->host;
709 unsigned long timeout = jiffies + msecs_to_jiffies(500);
710 unsigned int cmd_status = 0;
712 mci_writel(host, CMDARG, arg);
714 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
716 while (time_before(jiffies, timeout)) {
717 cmd_status = mci_readl(host, CMD);
718 if (!(cmd_status & SDMMC_CMD_START))
721 dev_err(&slot->mmc->class_dev,
722 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
723 cmd, arg, cmd_status);
726 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
728 struct dw_mci *host = slot->host;
729 unsigned int clock = slot->clock;
734 mci_writel(host, CLKENA, 0);
736 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
737 } else if (clock != host->current_speed || force_clkinit) {
738 div = host->bus_hz / clock;
739 if (host->bus_hz % clock && host->bus_hz > clock)
741 * move the + 1 after the divide to prevent
742 * over-clocking the card.
746 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
748 if ((clock << div) != slot->__clk_old || force_clkinit)
749 dev_info(&slot->mmc->class_dev,
750 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
751 slot->id, host->bus_hz, clock,
752 div ? ((host->bus_hz / div) >> 1) :
756 mci_writel(host, CLKENA, 0);
757 mci_writel(host, CLKSRC, 0);
761 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
763 /* set clock to desired speed */
764 mci_writel(host, CLKDIV, div);
768 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
770 /* enable clock; only low power if no SDIO */
771 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
772 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
773 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
774 mci_writel(host, CLKENA, clk_en_a);
778 SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
780 /* keep the clock with reflecting clock dividor */
781 slot->__clk_old = clock << div;
784 host->current_speed = clock;
786 /* Set the current slot bus width */
787 mci_writel(host, CTYPE, (slot->ctype << slot->id));
790 static void __dw_mci_start_request(struct dw_mci *host,
791 struct dw_mci_slot *slot,
792 struct mmc_command *cmd)
794 struct mmc_request *mrq;
795 struct mmc_data *data;
799 if (host->pdata->select_slot)
800 host->pdata->select_slot(slot->id);
802 host->cur_slot = slot;
805 host->pending_events = 0;
806 host->completed_events = 0;
807 host->data_status = 0;
811 dw_mci_set_timeout(host);
812 mci_writel(host, BYTCNT, data->blksz*data->blocks);
813 mci_writel(host, BLKSIZ, data->blksz);
816 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
818 /* this is the first command, send the initialization clock */
819 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
820 cmdflags |= SDMMC_CMD_INIT;
823 dw_mci_submit_data(host, data);
827 dw_mci_start_command(host, cmd, cmdflags);
830 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
833 static void dw_mci_start_request(struct dw_mci *host,
834 struct dw_mci_slot *slot)
836 struct mmc_request *mrq = slot->mrq;
837 struct mmc_command *cmd;
839 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
840 __dw_mci_start_request(host, slot, cmd);
843 /* must be called with host->lock held */
844 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
845 struct mmc_request *mrq)
847 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
852 if (host->state == STATE_IDLE) {
853 host->state = STATE_SENDING_CMD;
854 dw_mci_start_request(host, slot);
856 list_add_tail(&slot->queue_node, &host->queue);
860 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
862 struct dw_mci_slot *slot = mmc_priv(mmc);
863 struct dw_mci *host = slot->host;
868 * The check for card presence and queueing of the request must be
869 * atomic, otherwise the card could be removed in between and the
870 * request wouldn't fail until another card was inserted.
872 spin_lock_bh(&host->lock);
874 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
875 spin_unlock_bh(&host->lock);
876 mrq->cmd->error = -ENOMEDIUM;
877 mmc_request_done(mmc, mrq);
881 dw_mci_queue_request(host, slot, mrq);
883 spin_unlock_bh(&host->lock);
886 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
888 struct dw_mci_slot *slot = mmc_priv(mmc);
889 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
892 switch (ios->bus_width) {
893 case MMC_BUS_WIDTH_4:
894 slot->ctype = SDMMC_CTYPE_4BIT;
896 case MMC_BUS_WIDTH_8:
897 slot->ctype = SDMMC_CTYPE_8BIT;
900 /* set default 1 bit mode */
901 slot->ctype = SDMMC_CTYPE_1BIT;
904 regs = mci_readl(slot->host, UHS_REG);
907 if (ios->timing == MMC_TIMING_UHS_DDR50)
908 regs |= ((0x1 << slot->id) << 16);
910 regs &= ~((0x1 << slot->id) << 16);
912 mci_writel(slot->host, UHS_REG, regs);
913 slot->host->timing = ios->timing;
916 * Use mirror of ios->clock to prevent race with mmc
917 * core ios update when finding the minimum.
919 slot->clock = ios->clock;
921 if (drv_data && drv_data->set_ios)
922 drv_data->set_ios(slot->host, ios);
924 /* Slot specific timing and width adjustment */
925 dw_mci_setup_bus(slot, false);
927 switch (ios->power_mode) {
929 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
931 if (slot->host->pdata->setpower)
932 slot->host->pdata->setpower(slot->id, mmc->ocr_avail);
933 regs = mci_readl(slot->host, PWREN);
934 regs |= (1 << slot->id);
935 mci_writel(slot->host, PWREN, regs);
938 /* Power down slot */
939 if (slot->host->pdata->setpower)
940 slot->host->pdata->setpower(slot->id, 0);
941 regs = mci_readl(slot->host, PWREN);
942 regs &= ~(1 << slot->id);
943 mci_writel(slot->host, PWREN, regs);
950 static int dw_mci_get_ro(struct mmc_host *mmc)
953 struct dw_mci_slot *slot = mmc_priv(mmc);
954 struct dw_mci_board *brd = slot->host->pdata;
956 /* Use platform get_ro function, else try on board write protect */
957 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
959 else if (brd->get_ro)
960 read_only = brd->get_ro(slot->id);
961 else if (gpio_is_valid(slot->wp_gpio))
962 read_only = gpio_get_value(slot->wp_gpio);
965 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
967 dev_dbg(&mmc->class_dev, "card is %s\n",
968 read_only ? "read-only" : "read-write");
973 static int dw_mci_get_cd(struct mmc_host *mmc)
976 struct dw_mci_slot *slot = mmc_priv(mmc);
977 struct dw_mci_board *brd = slot->host->pdata;
979 /* Use platform get_cd function, else try onboard card detect */
980 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
982 else if (brd->get_cd)
983 present = !brd->get_cd(slot->id);
985 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
989 dev_dbg(&mmc->class_dev, "card is present\n");
991 dev_dbg(&mmc->class_dev, "card is not present\n");
997 * Disable lower power mode.
999 * Low power mode will stop the card clock when idle. According to the
1000 * description of the CLKENA register we should disable low power mode
1001 * for SDIO cards if we need SDIO interrupts to work.
1003 * This function is fast if low power mode is already disabled.
1005 static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
1007 struct dw_mci *host = slot->host;
1009 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1011 clk_en_a = mci_readl(host, CLKENA);
1013 if (clk_en_a & clken_low_pwr) {
1014 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
1015 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1016 SDMMC_CMD_PRV_DAT_WAIT, 0);
1020 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1022 struct dw_mci_slot *slot = mmc_priv(mmc);
1023 struct dw_mci *host = slot->host;
1026 /* Enable/disable Slot Specific SDIO interrupt */
1027 int_mask = mci_readl(host, INTMASK);
1030 * Turn off low power mode if it was enabled. This is a bit of
1031 * a heavy operation and we disable / enable IRQs a lot, so
1032 * we'll leave low power mode disabled and it will get
1033 * re-enabled again in dw_mci_setup_bus().
1035 dw_mci_disable_low_power(slot);
1037 mci_writel(host, INTMASK,
1038 (int_mask | SDMMC_INT_SDIO(slot->id)));
1040 mci_writel(host, INTMASK,
1041 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1045 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1047 struct dw_mci_slot *slot = mmc_priv(mmc);
1048 struct dw_mci *host = slot->host;
1049 const struct dw_mci_drv_data *drv_data = host->drv_data;
1050 struct dw_mci_tuning_data tuning_data;
1053 if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
1054 if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
1055 tuning_data.blk_pattern = tuning_blk_pattern_8bit;
1056 tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
1057 } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
1058 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1059 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1063 } else if (opcode == MMC_SEND_TUNING_BLOCK) {
1064 tuning_data.blk_pattern = tuning_blk_pattern_4bit;
1065 tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
1068 "Undefined command(%d) for tuning\n", opcode);
1072 if (drv_data && drv_data->execute_tuning)
1073 err = drv_data->execute_tuning(slot, opcode, &tuning_data);
1077 static const struct mmc_host_ops dw_mci_ops = {
1078 .request = dw_mci_request,
1079 .pre_req = dw_mci_pre_req,
1080 .post_req = dw_mci_post_req,
1081 .set_ios = dw_mci_set_ios,
1082 .get_ro = dw_mci_get_ro,
1083 .get_cd = dw_mci_get_cd,
1084 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1085 .execute_tuning = dw_mci_execute_tuning,
1088 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1089 __releases(&host->lock)
1090 __acquires(&host->lock)
1092 struct dw_mci_slot *slot;
1093 struct mmc_host *prev_mmc = host->cur_slot->mmc;
1095 WARN_ON(host->cmd || host->data);
1097 host->cur_slot->mrq = NULL;
1099 if (!list_empty(&host->queue)) {
1100 slot = list_entry(host->queue.next,
1101 struct dw_mci_slot, queue_node);
1102 list_del(&slot->queue_node);
1103 dev_vdbg(host->dev, "list not empty: %s is next\n",
1104 mmc_hostname(slot->mmc));
1105 host->state = STATE_SENDING_CMD;
1106 dw_mci_start_request(host, slot);
1108 dev_vdbg(host->dev, "list empty\n");
1109 host->state = STATE_IDLE;
1112 spin_unlock(&host->lock);
1113 mmc_request_done(prev_mmc, mrq);
1114 spin_lock(&host->lock);
1117 static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1119 u32 status = host->cmd_status;
1121 host->cmd_status = 0;
1123 /* Read the response from the card (up to 16 bytes) */
1124 if (cmd->flags & MMC_RSP_PRESENT) {
1125 if (cmd->flags & MMC_RSP_136) {
1126 cmd->resp[3] = mci_readl(host, RESP0);
1127 cmd->resp[2] = mci_readl(host, RESP1);
1128 cmd->resp[1] = mci_readl(host, RESP2);
1129 cmd->resp[0] = mci_readl(host, RESP3);
1131 cmd->resp[0] = mci_readl(host, RESP0);
1138 if (status & SDMMC_INT_RTO)
1139 cmd->error = -ETIMEDOUT;
1140 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1141 cmd->error = -EILSEQ;
1142 else if (status & SDMMC_INT_RESP_ERR)
1148 /* newer ip versions need a delay between retries */
1149 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
1154 static void dw_mci_tasklet_func(unsigned long priv)
1156 struct dw_mci *host = (struct dw_mci *)priv;
1157 struct mmc_data *data;
1158 struct mmc_command *cmd;
1159 enum dw_mci_state state;
1160 enum dw_mci_state prev_state;
1163 spin_lock(&host->lock);
1165 state = host->state;
1175 case STATE_SENDING_CMD:
1176 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1177 &host->pending_events))
1182 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
1183 dw_mci_command_complete(host, cmd);
1184 if (cmd == host->mrq->sbc && !cmd->error) {
1185 prev_state = state = STATE_SENDING_CMD;
1186 __dw_mci_start_request(host, host->cur_slot,
1191 if (cmd->data && cmd->error) {
1192 dw_mci_stop_dma(host);
1194 send_stop_cmd(host, data);
1195 state = STATE_SENDING_STOP;
1202 if (!host->mrq->data || cmd->error) {
1203 dw_mci_request_end(host, host->mrq);
1207 prev_state = state = STATE_SENDING_DATA;
1210 case STATE_SENDING_DATA:
1211 if (test_and_clear_bit(EVENT_DATA_ERROR,
1212 &host->pending_events)) {
1213 dw_mci_stop_dma(host);
1215 send_stop_cmd(host, data);
1216 state = STATE_DATA_ERROR;
1220 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1221 &host->pending_events))
1224 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
1225 prev_state = state = STATE_DATA_BUSY;
1228 case STATE_DATA_BUSY:
1229 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
1230 &host->pending_events))
1234 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
1235 status = host->data_status;
1237 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1238 if (status & SDMMC_INT_DRTO) {
1239 data->error = -ETIMEDOUT;
1240 } else if (status & SDMMC_INT_DCRC) {
1241 data->error = -EILSEQ;
1242 } else if (status & SDMMC_INT_EBE &&
1244 DW_MCI_SEND_STATUS) {
1246 * No data CRC status was returned.
1247 * The number of bytes transferred will
1248 * be exaggerated in PIO mode.
1250 data->bytes_xfered = 0;
1251 data->error = -ETIMEDOUT;
1260 * After an error, there may be data lingering
1261 * in the FIFO, so reset it - doing so
1262 * generates a block interrupt, hence setting
1263 * the scatter-gather pointer to NULL.
1265 sg_miter_stop(&host->sg_miter);
1267 ctrl = mci_readl(host, CTRL);
1268 ctrl |= SDMMC_CTRL_FIFO_RESET;
1269 mci_writel(host, CTRL, ctrl);
1271 data->bytes_xfered = data->blocks * data->blksz;
1276 dw_mci_request_end(host, host->mrq);
1280 if (host->mrq->sbc && !data->error) {
1281 data->stop->error = 0;
1282 dw_mci_request_end(host, host->mrq);
1286 prev_state = state = STATE_SENDING_STOP;
1288 send_stop_cmd(host, data);
1291 case STATE_SENDING_STOP:
1292 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
1293 &host->pending_events))
1296 /* CMD error in data command */
1297 if (host->mrq->cmd->error && host->mrq->data) {
1298 sg_miter_stop(&host->sg_miter);
1300 ctrl = mci_readl(host, CTRL);
1301 ctrl |= SDMMC_CTRL_FIFO_RESET;
1302 mci_writel(host, CTRL, ctrl);
1307 dw_mci_command_complete(host, host->mrq->stop);
1308 dw_mci_request_end(host, host->mrq);
1311 case STATE_DATA_ERROR:
1312 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
1313 &host->pending_events))
1316 state = STATE_DATA_BUSY;
1319 } while (state != prev_state);
1321 host->state = state;
1323 spin_unlock(&host->lock);
1327 /* push final bytes to part_buf, only use during push */
1328 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
1330 memcpy((void *)&host->part_buf, buf, cnt);
1331 host->part_buf_count = cnt;
1334 /* append bytes to part_buf, only use during push */
1335 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
1337 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
1338 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
1339 host->part_buf_count += cnt;
1343 /* pull first bytes from part_buf, only use during pull */
1344 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
1346 cnt = min(cnt, (int)host->part_buf_count);
1348 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
1350 host->part_buf_count -= cnt;
1351 host->part_buf_start += cnt;
1356 /* pull final bytes from the part_buf, assuming it's just been filled */
1357 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
1359 memcpy(buf, &host->part_buf, cnt);
1360 host->part_buf_start = cnt;
1361 host->part_buf_count = (1 << host->data_shift) - cnt;
1364 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
1366 struct mmc_data *data = host->data;
1369 /* try and push anything in the part_buf */
1370 if (unlikely(host->part_buf_count)) {
1371 int len = dw_mci_push_part_bytes(host, buf, cnt);
1374 if (host->part_buf_count == 2) {
1375 mci_writew(host, DATA(host->data_offset),
1377 host->part_buf_count = 0;
1380 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1381 if (unlikely((unsigned long)buf & 0x1)) {
1383 u16 aligned_buf[64];
1384 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1385 int items = len >> 1;
1387 /* memcpy from input buffer into aligned buffer */
1388 memcpy(aligned_buf, buf, len);
1391 /* push data from aligned buffer into fifo */
1392 for (i = 0; i < items; ++i)
1393 mci_writew(host, DATA(host->data_offset),
1400 for (; cnt >= 2; cnt -= 2)
1401 mci_writew(host, DATA(host->data_offset), *pdata++);
1404 /* put anything remaining in the part_buf */
1406 dw_mci_set_part_bytes(host, buf, cnt);
1407 /* Push data if we have reached the expected data length */
1408 if ((data->bytes_xfered + init_cnt) ==
1409 (data->blksz * data->blocks))
1410 mci_writew(host, DATA(host->data_offset),
1415 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
1417 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1418 if (unlikely((unsigned long)buf & 0x1)) {
1420 /* pull data from fifo into aligned buffer */
1421 u16 aligned_buf[64];
1422 int len = min(cnt & -2, (int)sizeof(aligned_buf));
1423 int items = len >> 1;
1425 for (i = 0; i < items; ++i)
1426 aligned_buf[i] = mci_readw(host,
1427 DATA(host->data_offset));
1428 /* memcpy from aligned buffer into output buffer */
1429 memcpy(buf, aligned_buf, len);
1437 for (; cnt >= 2; cnt -= 2)
1438 *pdata++ = mci_readw(host, DATA(host->data_offset));
1442 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
1443 dw_mci_pull_final_bytes(host, buf, cnt);
1447 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
1449 struct mmc_data *data = host->data;
1452 /* try and push anything in the part_buf */
1453 if (unlikely(host->part_buf_count)) {
1454 int len = dw_mci_push_part_bytes(host, buf, cnt);
1457 if (host->part_buf_count == 4) {
1458 mci_writel(host, DATA(host->data_offset),
1460 host->part_buf_count = 0;
1463 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1464 if (unlikely((unsigned long)buf & 0x3)) {
1466 u32 aligned_buf[32];
1467 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1468 int items = len >> 2;
1470 /* memcpy from input buffer into aligned buffer */
1471 memcpy(aligned_buf, buf, len);
1474 /* push data from aligned buffer into fifo */
1475 for (i = 0; i < items; ++i)
1476 mci_writel(host, DATA(host->data_offset),
1483 for (; cnt >= 4; cnt -= 4)
1484 mci_writel(host, DATA(host->data_offset), *pdata++);
1487 /* put anything remaining in the part_buf */
1489 dw_mci_set_part_bytes(host, buf, cnt);
1490 /* Push data if we have reached the expected data length */
1491 if ((data->bytes_xfered + init_cnt) ==
1492 (data->blksz * data->blocks))
1493 mci_writel(host, DATA(host->data_offset),
1498 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
1500 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1501 if (unlikely((unsigned long)buf & 0x3)) {
1503 /* pull data from fifo into aligned buffer */
1504 u32 aligned_buf[32];
1505 int len = min(cnt & -4, (int)sizeof(aligned_buf));
1506 int items = len >> 2;
1508 for (i = 0; i < items; ++i)
1509 aligned_buf[i] = mci_readl(host,
1510 DATA(host->data_offset));
1511 /* memcpy from aligned buffer into output buffer */
1512 memcpy(buf, aligned_buf, len);
1520 for (; cnt >= 4; cnt -= 4)
1521 *pdata++ = mci_readl(host, DATA(host->data_offset));
1525 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
1526 dw_mci_pull_final_bytes(host, buf, cnt);
1530 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
1532 struct mmc_data *data = host->data;
1535 /* try and push anything in the part_buf */
1536 if (unlikely(host->part_buf_count)) {
1537 int len = dw_mci_push_part_bytes(host, buf, cnt);
1541 if (host->part_buf_count == 8) {
1542 mci_writeq(host, DATA(host->data_offset),
1544 host->part_buf_count = 0;
1547 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1548 if (unlikely((unsigned long)buf & 0x7)) {
1550 u64 aligned_buf[16];
1551 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1552 int items = len >> 3;
1554 /* memcpy from input buffer into aligned buffer */
1555 memcpy(aligned_buf, buf, len);
1558 /* push data from aligned buffer into fifo */
1559 for (i = 0; i < items; ++i)
1560 mci_writeq(host, DATA(host->data_offset),
1567 for (; cnt >= 8; cnt -= 8)
1568 mci_writeq(host, DATA(host->data_offset), *pdata++);
1571 /* put anything remaining in the part_buf */
1573 dw_mci_set_part_bytes(host, buf, cnt);
1574 /* Push data if we have reached the expected data length */
1575 if ((data->bytes_xfered + init_cnt) ==
1576 (data->blksz * data->blocks))
1577 mci_writeq(host, DATA(host->data_offset),
1582 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
1584 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1585 if (unlikely((unsigned long)buf & 0x7)) {
1587 /* pull data from fifo into aligned buffer */
1588 u64 aligned_buf[16];
1589 int len = min(cnt & -8, (int)sizeof(aligned_buf));
1590 int items = len >> 3;
1592 for (i = 0; i < items; ++i)
1593 aligned_buf[i] = mci_readq(host,
1594 DATA(host->data_offset));
1595 /* memcpy from aligned buffer into output buffer */
1596 memcpy(buf, aligned_buf, len);
1604 for (; cnt >= 8; cnt -= 8)
1605 *pdata++ = mci_readq(host, DATA(host->data_offset));
1609 host->part_buf = mci_readq(host, DATA(host->data_offset));
1610 dw_mci_pull_final_bytes(host, buf, cnt);
1614 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
1618 /* get remaining partial bytes */
1619 len = dw_mci_pull_part_bytes(host, buf, cnt);
1620 if (unlikely(len == cnt))
1625 /* get the rest of the data */
1626 host->pull_data(host, buf, cnt);
1629 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
1631 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1633 unsigned int offset;
1634 struct mmc_data *data = host->data;
1635 int shift = host->data_shift;
1638 unsigned int remain, fcnt;
1641 if (!sg_miter_next(sg_miter))
1644 host->sg = sg_miter->piter.sg;
1645 buf = sg_miter->addr;
1646 remain = sg_miter->length;
1650 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
1651 << shift) + host->part_buf_count;
1652 len = min(remain, fcnt);
1655 dw_mci_pull_data(host, (void *)(buf + offset), len);
1656 data->bytes_xfered += len;
1661 sg_miter->consumed = offset;
1662 status = mci_readl(host, MINTSTS);
1663 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1664 /* if the RXDR is ready read again */
1665 } while ((status & SDMMC_INT_RXDR) ||
1666 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
1669 if (!sg_miter_next(sg_miter))
1671 sg_miter->consumed = 0;
1673 sg_miter_stop(sg_miter);
1677 sg_miter_stop(sg_miter);
1680 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1683 static void dw_mci_write_data_pio(struct dw_mci *host)
1685 struct sg_mapping_iter *sg_miter = &host->sg_miter;
1687 unsigned int offset;
1688 struct mmc_data *data = host->data;
1689 int shift = host->data_shift;
1692 unsigned int fifo_depth = host->fifo_depth;
1693 unsigned int remain, fcnt;
1696 if (!sg_miter_next(sg_miter))
1699 host->sg = sg_miter->piter.sg;
1700 buf = sg_miter->addr;
1701 remain = sg_miter->length;
1705 fcnt = ((fifo_depth -
1706 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1707 << shift) - host->part_buf_count;
1708 len = min(remain, fcnt);
1711 host->push_data(host, (void *)(buf + offset), len);
1712 data->bytes_xfered += len;
1717 sg_miter->consumed = offset;
1718 status = mci_readl(host, MINTSTS);
1719 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1720 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
1723 if (!sg_miter_next(sg_miter))
1725 sg_miter->consumed = 0;
1727 sg_miter_stop(sg_miter);
1731 sg_miter_stop(sg_miter);
1734 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1737 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
1739 if (!host->cmd_status)
1740 host->cmd_status = status;
1744 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1745 tasklet_schedule(&host->tasklet);
1748 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1750 struct dw_mci *host = dev_id;
1754 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
1757 * DTO fix - version 2.10a and below, and only if internal DMA
1760 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
1762 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
1763 pending |= SDMMC_INT_DATA_OVER;
1767 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
1768 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
1769 host->cmd_status = pending;
1771 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1774 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
1775 /* if there is an error report DATA_ERROR */
1776 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
1777 host->data_status = pending;
1779 set_bit(EVENT_DATA_ERROR, &host->pending_events);
1780 tasklet_schedule(&host->tasklet);
1783 if (pending & SDMMC_INT_DATA_OVER) {
1784 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
1785 if (!host->data_status)
1786 host->data_status = pending;
1788 if (host->dir_status == DW_MCI_RECV_STATUS) {
1789 if (host->sg != NULL)
1790 dw_mci_read_data_pio(host, true);
1792 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1793 tasklet_schedule(&host->tasklet);
1796 if (pending & SDMMC_INT_RXDR) {
1797 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
1798 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
1799 dw_mci_read_data_pio(host, false);
1802 if (pending & SDMMC_INT_TXDR) {
1803 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
1804 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
1805 dw_mci_write_data_pio(host);
1808 if (pending & SDMMC_INT_CMD_DONE) {
1809 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
1810 dw_mci_cmd_interrupt(host, pending);
1813 if (pending & SDMMC_INT_CD) {
1814 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1815 queue_work(host->card_workqueue, &host->card_work);
1818 /* Handle SDIO Interrupts */
1819 for (i = 0; i < host->num_slots; i++) {
1820 struct dw_mci_slot *slot = host->slot[i];
1821 if (pending & SDMMC_INT_SDIO(i)) {
1822 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
1823 mmc_signal_sdio_irq(slot->mmc);
1829 #ifdef CONFIG_MMC_DW_IDMAC
1830 /* Handle DMA interrupts */
1831 pending = mci_readl(host, IDSTS);
1832 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1833 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1834 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1835 host->dma_ops->complete(host);
1842 static void dw_mci_work_routine_card(struct work_struct *work)
1844 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
1847 for (i = 0; i < host->num_slots; i++) {
1848 struct dw_mci_slot *slot = host->slot[i];
1849 struct mmc_host *mmc = slot->mmc;
1850 struct mmc_request *mrq;
1854 present = dw_mci_get_cd(mmc);
1855 while (present != slot->last_detect_state) {
1856 dev_dbg(&slot->mmc->class_dev, "card %s\n",
1857 present ? "inserted" : "removed");
1859 spin_lock_bh(&host->lock);
1861 /* Card change detected */
1862 slot->last_detect_state = present;
1864 /* Mark card as present if applicable */
1866 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1868 /* Clean up queue if present */
1871 if (mrq == host->mrq) {
1875 switch (host->state) {
1878 case STATE_SENDING_CMD:
1879 mrq->cmd->error = -ENOMEDIUM;
1883 case STATE_SENDING_DATA:
1884 mrq->data->error = -ENOMEDIUM;
1885 dw_mci_stop_dma(host);
1887 case STATE_DATA_BUSY:
1888 case STATE_DATA_ERROR:
1889 if (mrq->data->error == -EINPROGRESS)
1890 mrq->data->error = -ENOMEDIUM;
1894 case STATE_SENDING_STOP:
1895 mrq->stop->error = -ENOMEDIUM;
1899 dw_mci_request_end(host, mrq);
1901 list_del(&slot->queue_node);
1902 mrq->cmd->error = -ENOMEDIUM;
1904 mrq->data->error = -ENOMEDIUM;
1906 mrq->stop->error = -ENOMEDIUM;
1908 spin_unlock(&host->lock);
1909 mmc_request_done(slot->mmc, mrq);
1910 spin_lock(&host->lock);
1914 /* Power down slot */
1916 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1919 * Clear down the FIFO - doing so generates a
1920 * block interrupt, hence setting the
1921 * scatter-gather pointer to NULL.
1923 sg_miter_stop(&host->sg_miter);
1926 ctrl = mci_readl(host, CTRL);
1927 ctrl |= SDMMC_CTRL_FIFO_RESET;
1928 mci_writel(host, CTRL, ctrl);
1930 #ifdef CONFIG_MMC_DW_IDMAC
1931 ctrl = mci_readl(host, BMOD);
1932 /* Software reset of DMA */
1933 ctrl |= SDMMC_IDMAC_SWRESET;
1934 mci_writel(host, BMOD, ctrl);
1939 spin_unlock_bh(&host->lock);
1941 present = dw_mci_get_cd(mmc);
1944 mmc_detect_change(slot->mmc,
1945 msecs_to_jiffies(host->pdata->detect_delay_ms));
1950 /* given a slot id, find out the device node representing that slot */
1951 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
1953 struct device_node *np;
1957 if (!dev || !dev->of_node)
1960 for_each_child_of_node(dev->of_node, np) {
1961 addr = of_get_property(np, "reg", &len);
1962 if (!addr || (len < sizeof(int)))
1964 if (be32_to_cpup(addr) == slot)
1970 static struct dw_mci_of_slot_quirks {
1973 } of_slot_quirks[] = {
1975 .quirk = "disable-wp",
1976 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
1980 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
1982 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
1987 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
1988 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
1989 quirks |= of_slot_quirks[idx].id;
1994 /* find out bus-width for a given slot */
1995 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
1997 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2003 if (of_property_read_u32(np, "bus-width", &bus_wd))
2004 dev_err(dev, "bus-width property not found, assuming width"
2009 /* find the write protect gpio for a given slot; or -1 if none specified */
2010 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2012 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
2018 gpio = of_get_named_gpio(np, "wp-gpios", 0);
2020 /* Having a missing entry is valid; return silently */
2021 if (!gpio_is_valid(gpio))
2024 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
2025 dev_warn(dev, "gpio [%d] request failed\n", gpio);
2031 #else /* CONFIG_OF */
2032 static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
2036 static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
2040 static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
2044 static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
2048 #endif /* CONFIG_OF */
2050 static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2052 struct mmc_host *mmc;
2053 struct dw_mci_slot *slot;
2054 const struct dw_mci_drv_data *drv_data = host->drv_data;
2059 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2063 slot = mmc_priv(mmc);
2067 host->slot[id] = slot;
2069 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
2071 mmc->ops = &dw_mci_ops;
2072 if (of_property_read_u32_array(host->dev->of_node,
2073 "clock-freq-min-max", freq, 2)) {
2074 mmc->f_min = DW_MCI_FREQ_MIN;
2075 mmc->f_max = DW_MCI_FREQ_MAX;
2077 mmc->f_min = freq[0];
2078 mmc->f_max = freq[1];
2081 if (host->pdata->get_ocr)
2082 mmc->ocr_avail = host->pdata->get_ocr(id);
2084 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2087 * Start with slot power disabled, it will be enabled when a card
2090 if (host->pdata->setpower)
2091 host->pdata->setpower(id, 0);
2093 if (host->pdata->caps)
2094 mmc->caps = host->pdata->caps;
2096 if (host->pdata->pm_caps)
2097 mmc->pm_caps = host->pdata->pm_caps;
2099 if (host->dev->of_node) {
2100 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2104 ctrl_id = to_platform_device(host->dev)->id;
2106 if (drv_data && drv_data->caps)
2107 mmc->caps |= drv_data->caps[ctrl_id];
2109 if (host->pdata->caps2)
2110 mmc->caps2 = host->pdata->caps2;
2112 if (host->pdata->get_bus_wd)
2113 bus_width = host->pdata->get_bus_wd(slot->id);
2114 else if (host->dev->of_node)
2115 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
2119 switch (bus_width) {
2121 mmc->caps |= MMC_CAP_8_BIT_DATA;
2123 mmc->caps |= MMC_CAP_4_BIT_DATA;
2126 if (host->pdata->blk_settings) {
2127 mmc->max_segs = host->pdata->blk_settings->max_segs;
2128 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
2129 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
2130 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
2131 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
2133 /* Useful defaults if platform data is unset. */
2134 #ifdef CONFIG_MMC_DW_IDMAC
2135 mmc->max_segs = host->ring_size;
2136 mmc->max_blk_size = 65536;
2137 mmc->max_blk_count = host->ring_size;
2138 mmc->max_seg_size = 0x1000;
2139 mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
2142 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
2143 mmc->max_blk_count = 512;
2144 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2145 mmc->max_seg_size = mmc->max_req_size;
2146 #endif /* CONFIG_MMC_DW_IDMAC */
2149 if (dw_mci_get_cd(mmc))
2150 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2152 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
2154 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
2156 ret = mmc_add_host(mmc);
2160 #if defined(CONFIG_DEBUG_FS)
2161 dw_mci_init_debugfs(slot);
2164 /* Card initially undetected */
2165 slot->last_detect_state = 0;
2174 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
2176 /* Shutdown detect IRQ */
2177 if (slot->host->pdata->exit)
2178 slot->host->pdata->exit(id);
2180 /* Debugfs stuff is cleaned up by mmc core */
2181 mmc_remove_host(slot->mmc);
2182 slot->host->slot[id] = NULL;
2183 mmc_free_host(slot->mmc);
2186 static void dw_mci_init_dma(struct dw_mci *host)
2188 /* Alloc memory for sg translation */
2189 host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
2190 &host->sg_dma, GFP_KERNEL);
2191 if (!host->sg_cpu) {
2192 dev_err(host->dev, "%s: could not alloc DMA memory\n",
2197 /* Determine which DMA interface to use */
2198 #ifdef CONFIG_MMC_DW_IDMAC
2199 host->dma_ops = &dw_mci_idmac_ops;
2200 dev_info(host->dev, "Using internal DMA controller.\n");
2206 if (host->dma_ops->init && host->dma_ops->start &&
2207 host->dma_ops->stop && host->dma_ops->cleanup) {
2208 if (host->dma_ops->init(host)) {
2209 dev_err(host->dev, "%s: Unable to initialize "
2210 "DMA Controller.\n", __func__);
2214 dev_err(host->dev, "DMA initialization not found.\n");
2222 dev_info(host->dev, "Using PIO mode.\n");
2227 static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
2229 unsigned long timeout = jiffies + msecs_to_jiffies(500);
2232 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2233 SDMMC_CTRL_DMA_RESET));
2235 /* wait till resets clear */
2237 ctrl = mci_readl(host, CTRL);
2238 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
2239 SDMMC_CTRL_DMA_RESET)))
2241 } while (time_before(jiffies, timeout));
2243 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
2249 static struct dw_mci_of_quirks {
2254 .quirk = "broken-cd",
2255 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
2259 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2261 struct dw_mci_board *pdata;
2262 struct device *dev = host->dev;
2263 struct device_node *np = dev->of_node;
2264 const struct dw_mci_drv_data *drv_data = host->drv_data;
2266 u32 clock_frequency;
2268 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2270 dev_err(dev, "could not allocate memory for pdata\n");
2271 return ERR_PTR(-ENOMEM);
2274 /* find out number of slots supported */
2275 if (of_property_read_u32(dev->of_node, "num-slots",
2276 &pdata->num_slots)) {
2277 dev_info(dev, "num-slots property not found, "
2278 "assuming 1 slot is available\n");
2279 pdata->num_slots = 1;
2283 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
2284 if (of_get_property(np, of_quirks[idx].quirk, NULL))
2285 pdata->quirks |= of_quirks[idx].id;
2287 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
2288 dev_info(dev, "fifo-depth property not found, using "
2289 "value of FIFOTH register as default\n");
2291 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
2293 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
2294 pdata->bus_hz = clock_frequency;
2296 if (drv_data && drv_data->parse_dt) {
2297 ret = drv_data->parse_dt(host);
2299 return ERR_PTR(ret);
2302 if (of_find_property(np, "keep-power-in-suspend", NULL))
2303 pdata->pm_caps |= MMC_PM_KEEP_POWER;
2305 if (of_find_property(np, "enable-sdio-wakeup", NULL))
2306 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
2308 if (of_find_property(np, "supports-highspeed", NULL))
2309 pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
2311 if (of_find_property(np, "caps2-mmc-hs200-1_8v", NULL))
2312 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
2314 if (of_find_property(np, "caps2-mmc-hs200-1_2v", NULL))
2315 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
2320 #else /* CONFIG_OF */
2321 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
2323 return ERR_PTR(-EINVAL);
2325 #endif /* CONFIG_OF */
2327 int dw_mci_probe(struct dw_mci *host)
2329 const struct dw_mci_drv_data *drv_data = host->drv_data;
2330 int width, i, ret = 0;
2335 host->pdata = dw_mci_parse_dt(host);
2336 if (IS_ERR(host->pdata)) {
2337 dev_err(host->dev, "platform data not available\n");
2342 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
2344 "Platform data must supply select_slot function\n");
2348 host->biu_clk = devm_clk_get(host->dev, "biu");
2349 if (IS_ERR(host->biu_clk)) {
2350 dev_dbg(host->dev, "biu clock not available\n");
2352 ret = clk_prepare_enable(host->biu_clk);
2354 dev_err(host->dev, "failed to enable biu clock\n");
2359 host->ciu_clk = devm_clk_get(host->dev, "ciu");
2360 if (IS_ERR(host->ciu_clk)) {
2361 dev_dbg(host->dev, "ciu clock not available\n");
2362 host->bus_hz = host->pdata->bus_hz;
2364 ret = clk_prepare_enable(host->ciu_clk);
2366 dev_err(host->dev, "failed to enable ciu clock\n");
2370 if (host->pdata->bus_hz) {
2371 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
2374 "Unable to set bus rate to %ul\n",
2375 host->pdata->bus_hz);
2377 host->bus_hz = clk_get_rate(host->ciu_clk);
2380 if (drv_data && drv_data->init) {
2381 ret = drv_data->init(host);
2384 "implementation specific init failed\n");
2389 if (drv_data && drv_data->setup_clock) {
2390 ret = drv_data->setup_clock(host);
2393 "implementation specific clock setup failed\n");
2398 host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
2399 if (IS_ERR(host->vmmc)) {
2400 ret = PTR_ERR(host->vmmc);
2401 if (ret == -EPROBE_DEFER)
2404 dev_info(host->dev, "no vmmc regulator found: %d\n", ret);
2407 ret = regulator_enable(host->vmmc);
2409 if (ret != -EPROBE_DEFER)
2411 "regulator_enable fail: %d\n", ret);
2416 if (!host->bus_hz) {
2418 "Platform data must supply bus speed\n");
2423 host->quirks = host->pdata->quirks;
2425 spin_lock_init(&host->lock);
2426 INIT_LIST_HEAD(&host->queue);
2429 * Get the host data width - this assumes that HCON has been set with
2430 * the correct values.
2432 i = (mci_readl(host, HCON) >> 7) & 0x7;
2434 host->push_data = dw_mci_push_data16;
2435 host->pull_data = dw_mci_pull_data16;
2437 host->data_shift = 1;
2438 } else if (i == 2) {
2439 host->push_data = dw_mci_push_data64;
2440 host->pull_data = dw_mci_pull_data64;
2442 host->data_shift = 3;
2444 /* Check for a reserved value, and warn if it is */
2446 "HCON reports a reserved host data width!\n"
2447 "Defaulting to 32-bit access.\n");
2448 host->push_data = dw_mci_push_data32;
2449 host->pull_data = dw_mci_pull_data32;
2451 host->data_shift = 2;
2454 /* Reset all blocks */
2455 if (!mci_wait_reset(host->dev, host))
2458 host->dma_ops = host->pdata->dma_ops;
2459 dw_mci_init_dma(host);
2461 /* Clear the interrupts for the host controller */
2462 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2463 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2465 /* Put in max timeout */
2466 mci_writel(host, TMOUT, 0xFFFFFFFF);
2469 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
2470 * Tx Mark = fifo_size / 2 DMA Size = 8
2472 if (!host->pdata->fifo_depth) {
2474 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
2475 * have been overwritten by the bootloader, just like we're
2476 * about to do, so if you know the value for your hardware, you
2477 * should put it in the platform data.
2479 fifo_size = mci_readl(host, FIFOTH);
2480 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
2482 fifo_size = host->pdata->fifo_depth;
2484 host->fifo_depth = fifo_size;
2486 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
2487 mci_writel(host, FIFOTH, host->fifoth_val);
2489 /* disable clock to CIU */
2490 mci_writel(host, CLKENA, 0);
2491 mci_writel(host, CLKSRC, 0);
2494 * In 2.40a spec, Data offset is changed.
2495 * Need to check the version-id and set data-offset for DATA register.
2497 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
2498 dev_info(host->dev, "Version ID is %04x\n", host->verid);
2500 if (host->verid < DW_MMC_240A)
2501 host->data_offset = DATA_OFFSET;
2503 host->data_offset = DATA_240A_OFFSET;
2505 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2506 host->card_workqueue = alloc_workqueue("dw-mci-card",
2507 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2508 if (!host->card_workqueue) {
2512 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2513 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
2514 host->irq_flags, "dw-mci", host);
2518 if (host->pdata->num_slots)
2519 host->num_slots = host->pdata->num_slots;
2521 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
2524 * Enable interrupts for command done, data over, data empty, card det,
2525 * receive ready and error such as transmit, receive timeout, crc error
2527 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2528 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2529 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2530 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2531 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
2533 dev_info(host->dev, "DW MMC controller at irq %d, "
2534 "%d bit host data width, "
2536 host->irq, width, fifo_size);
2538 /* We need at least one slot to succeed */
2539 for (i = 0; i < host->num_slots; i++) {
2540 ret = dw_mci_init_slot(host, i);
2542 dev_dbg(host->dev, "slot %d init failed\n", i);
2548 dev_info(host->dev, "%d slots initialized\n", init_slots);
2550 dev_dbg(host->dev, "attempted to initialize %d slots, "
2551 "but failed on all\n", host->num_slots);
2555 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
2556 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
2561 destroy_workqueue(host->card_workqueue);
2564 if (host->use_dma && host->dma_ops->exit)
2565 host->dma_ops->exit(host);
2569 regulator_disable(host->vmmc);
2572 if (!IS_ERR(host->ciu_clk))
2573 clk_disable_unprepare(host->ciu_clk);
2576 if (!IS_ERR(host->biu_clk))
2577 clk_disable_unprepare(host->biu_clk);
2581 EXPORT_SYMBOL(dw_mci_probe);
2583 void dw_mci_remove(struct dw_mci *host)
2587 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2588 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
2590 for (i = 0; i < host->num_slots; i++) {
2591 dev_dbg(host->dev, "remove slot %d\n", i);
2593 dw_mci_cleanup_slot(host->slot[i], i);
2596 /* disable clock to CIU */
2597 mci_writel(host, CLKENA, 0);
2598 mci_writel(host, CLKSRC, 0);
2600 destroy_workqueue(host->card_workqueue);
2602 if (host->use_dma && host->dma_ops->exit)
2603 host->dma_ops->exit(host);
2606 regulator_disable(host->vmmc);
2608 if (!IS_ERR(host->ciu_clk))
2609 clk_disable_unprepare(host->ciu_clk);
2611 if (!IS_ERR(host->biu_clk))
2612 clk_disable_unprepare(host->biu_clk);
2614 EXPORT_SYMBOL(dw_mci_remove);
2618 #ifdef CONFIG_PM_SLEEP
2620 * TODO: we should probably disable the clock to the card in the suspend path.
2622 int dw_mci_suspend(struct dw_mci *host)
2626 for (i = 0; i < host->num_slots; i++) {
2627 struct dw_mci_slot *slot = host->slot[i];
2630 ret = mmc_suspend_host(slot->mmc);
2633 slot = host->slot[i];
2635 mmc_resume_host(host->slot[i]->mmc);
2642 regulator_disable(host->vmmc);
2646 EXPORT_SYMBOL(dw_mci_suspend);
2648 int dw_mci_resume(struct dw_mci *host)
2653 ret = regulator_enable(host->vmmc);
2656 "failed to enable regulator: %d\n", ret);
2661 if (!mci_wait_reset(host->dev, host)) {
2666 if (host->use_dma && host->dma_ops->init)
2667 host->dma_ops->init(host);
2670 * Restore the initial value at FIFOTH register
2671 * And Invalidate the prev_blksz with zero
2673 mci_writel(host, FIFOTH, host->fifoth_val);
2674 host->prev_blksz = 0;
2676 /* Put in max timeout */
2677 mci_writel(host, TMOUT, 0xFFFFFFFF);
2679 mci_writel(host, RINTSTS, 0xFFFFFFFF);
2680 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
2681 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2682 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
2683 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
2685 for (i = 0; i < host->num_slots; i++) {
2686 struct dw_mci_slot *slot = host->slot[i];
2689 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2690 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
2691 dw_mci_setup_bus(slot, true);
2694 ret = mmc_resume_host(host->slot[i]->mmc);
2700 EXPORT_SYMBOL(dw_mci_resume);
2701 #endif /* CONFIG_PM_SLEEP */
2703 static int __init dw_mci_init(void)
2705 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
2709 static void __exit dw_mci_exit(void)
2713 module_init(dw_mci_init);
2714 module_exit(dw_mci_exit);
2716 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
2717 MODULE_AUTHOR("NXP Semiconductor VietNam");
2718 MODULE_AUTHOR("Imagination Technologies Ltd");
2719 MODULE_LICENSE("GPL v2");