2 * linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver
4 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
5 * Copyright (C) 2010 ST-Ericsson SA
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/ioport.h>
15 #include <linux/device.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/log2.h>
22 #include <linux/mmc/host.h>
23 #include <linux/mmc/card.h>
24 #include <linux/amba/bus.h>
25 #include <linux/clk.h>
26 #include <linux/scatterlist.h>
27 #include <linux/gpio.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/dmaengine.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/amba/mmci.h>
32 #include <linux/pm_runtime.h>
34 #include <asm/div64.h>
36 #include <asm/sizes.h>
40 #define DRIVER_NAME "mmci-pl18x"
42 static unsigned int fmax = 515633;
45 * struct variant_data - MMCI variant-specific quirks
46 * @clkreg: default value for MCICLOCK register
47 * @clkreg_enable: enable value for MMCICLOCK register
48 * @datalength_bits: number of bits in the MMCIDATALENGTH register
49 * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY
50 * is asserted (likewise for RX)
51 * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY
52 * is asserted (likewise for RX)
53 * @sdio: variant supports SDIO
54 * @st_clkdiv: true if using a ST-specific clock divider algorithm
55 * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
56 * @pwrreg_powerup: power up value for MMCIPOWER register
57 * @signal_direction: input/out direction of bus signals can be indicated
61 unsigned int clkreg_enable;
62 unsigned int datalength_bits;
63 unsigned int fifosize;
64 unsigned int fifohalfsize;
67 bool blksz_datactrl16;
69 bool signal_direction;
72 static struct variant_data variant_arm = {
74 .fifohalfsize = 8 * 4,
75 .datalength_bits = 16,
76 .pwrreg_powerup = MCI_PWR_UP,
79 static struct variant_data variant_arm_extended_fifo = {
81 .fifohalfsize = 64 * 4,
82 .datalength_bits = 16,
83 .pwrreg_powerup = MCI_PWR_UP,
86 static struct variant_data variant_u300 = {
88 .fifohalfsize = 8 * 4,
89 .clkreg_enable = MCI_ST_U300_HWFCEN,
90 .datalength_bits = 16,
92 .pwrreg_powerup = MCI_PWR_ON,
93 .signal_direction = true,
96 static struct variant_data variant_ux500 = {
98 .fifohalfsize = 8 * 4,
99 .clkreg = MCI_CLK_ENABLE,
100 .clkreg_enable = MCI_ST_UX500_HWFCEN,
101 .datalength_bits = 24,
104 .pwrreg_powerup = MCI_PWR_ON,
105 .signal_direction = true,
108 static struct variant_data variant_ux500v2 = {
110 .fifohalfsize = 8 * 4,
111 .clkreg = MCI_CLK_ENABLE,
112 .clkreg_enable = MCI_ST_UX500_HWFCEN,
113 .datalength_bits = 24,
116 .blksz_datactrl16 = true,
117 .pwrreg_powerup = MCI_PWR_ON,
118 .signal_direction = true,
122 * This must be called with host->lock held
124 static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
126 if (host->clk_reg != clk) {
128 writel(clk, host->base + MMCICLOCK);
133 * This must be called with host->lock held
135 static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
137 if (host->pwr_reg != pwr) {
139 writel(pwr, host->base + MMCIPOWER);
144 * This must be called with host->lock held
146 static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
148 struct variant_data *variant = host->variant;
149 u32 clk = variant->clkreg;
152 if (desired >= host->mclk) {
153 clk = MCI_CLK_BYPASS;
154 if (variant->st_clkdiv)
155 clk |= MCI_ST_UX500_NEG_EDGE;
156 host->cclk = host->mclk;
157 } else if (variant->st_clkdiv) {
159 * DB8500 TRM says f = mclk / (clkdiv + 2)
160 * => clkdiv = (mclk / f) - 2
161 * Round the divider up so we don't exceed the max
164 clk = DIV_ROUND_UP(host->mclk, desired) - 2;
167 host->cclk = host->mclk / (clk + 2);
170 * PL180 TRM says f = mclk / (2 * (clkdiv + 1))
171 * => clkdiv = mclk / (2 * f) - 1
173 clk = host->mclk / (2 * desired) - 1;
176 host->cclk = host->mclk / (2 * (clk + 1));
179 clk |= variant->clkreg_enable;
180 clk |= MCI_CLK_ENABLE;
181 /* This hasn't proven to be worthwhile */
182 /* clk |= MCI_CLK_PWRSAVE; */
185 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
187 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
188 clk |= MCI_ST_8BIT_BUS;
190 mmci_write_clkreg(host, clk);
194 mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
196 writel(0, host->base + MMCICOMMAND);
203 mmc_request_done(host->mmc, mrq);
205 pm_runtime_mark_last_busy(mmc_dev(host->mmc));
206 pm_runtime_put_autosuspend(mmc_dev(host->mmc));
209 static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
211 void __iomem *base = host->base;
213 if (host->singleirq) {
214 unsigned int mask0 = readl(base + MMCIMASK0);
216 mask0 &= ~MCI_IRQ1MASK;
219 writel(mask0, base + MMCIMASK0);
222 writel(mask, base + MMCIMASK1);
225 static void mmci_stop_data(struct mmci_host *host)
227 writel(0, host->base + MMCIDATACTRL);
228 mmci_set_mask1(host, 0);
232 static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
234 unsigned int flags = SG_MITER_ATOMIC;
236 if (data->flags & MMC_DATA_READ)
237 flags |= SG_MITER_TO_SG;
239 flags |= SG_MITER_FROM_SG;
241 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
245 * All the DMA operation mode stuff goes inside this ifdef.
246 * This assumes that you have a generic DMA device interface,
247 * no custom DMA interfaces are supported.
249 #ifdef CONFIG_DMA_ENGINE
250 static void __devinit mmci_dma_setup(struct mmci_host *host)
252 struct mmci_platform_data *plat = host->plat;
253 const char *rxname, *txname;
256 if (!plat || !plat->dma_filter) {
257 dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
261 /* initialize pre request cookie */
262 host->next_data.cookie = 1;
264 /* Try to acquire a generic DMA engine slave channel */
266 dma_cap_set(DMA_SLAVE, mask);
269 * If only an RX channel is specified, the driver will
270 * attempt to use it bidirectionally, however if it is
271 * is specified but cannot be located, DMA will be disabled.
273 if (plat->dma_rx_param) {
274 host->dma_rx_channel = dma_request_channel(mask,
277 /* E.g if no DMA hardware is present */
278 if (!host->dma_rx_channel)
279 dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
282 if (plat->dma_tx_param) {
283 host->dma_tx_channel = dma_request_channel(mask,
286 if (!host->dma_tx_channel)
287 dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
289 host->dma_tx_channel = host->dma_rx_channel;
292 if (host->dma_rx_channel)
293 rxname = dma_chan_name(host->dma_rx_channel);
297 if (host->dma_tx_channel)
298 txname = dma_chan_name(host->dma_tx_channel);
302 dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n",
306 * Limit the maximum segment size in any SG entry according to
307 * the parameters of the DMA engine device.
309 if (host->dma_tx_channel) {
310 struct device *dev = host->dma_tx_channel->device->dev;
311 unsigned int max_seg_size = dma_get_max_seg_size(dev);
313 if (max_seg_size < host->mmc->max_seg_size)
314 host->mmc->max_seg_size = max_seg_size;
316 if (host->dma_rx_channel) {
317 struct device *dev = host->dma_rx_channel->device->dev;
318 unsigned int max_seg_size = dma_get_max_seg_size(dev);
320 if (max_seg_size < host->mmc->max_seg_size)
321 host->mmc->max_seg_size = max_seg_size;
326 * This is used in __devinit or __devexit so inline it
327 * so it can be discarded.
329 static inline void mmci_dma_release(struct mmci_host *host)
331 struct mmci_platform_data *plat = host->plat;
333 if (host->dma_rx_channel)
334 dma_release_channel(host->dma_rx_channel);
335 if (host->dma_tx_channel && plat->dma_tx_param)
336 dma_release_channel(host->dma_tx_channel);
337 host->dma_rx_channel = host->dma_tx_channel = NULL;
340 static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
342 struct dma_chan *chan = host->dma_current;
343 enum dma_data_direction dir;
347 /* Wait up to 1ms for the DMA to complete */
349 status = readl(host->base + MMCISTATUS);
350 if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100)
356 * Check to see whether we still have some data left in the FIFO -
357 * this catches DMA controllers which are unable to monitor the
358 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non-
359 * contiguous buffers. On TX, we'll get a FIFO underrun error.
361 if (status & MCI_RXDATAAVLBLMASK) {
362 dmaengine_terminate_all(chan);
367 if (data->flags & MMC_DATA_WRITE) {
370 dir = DMA_FROM_DEVICE;
373 if (!data->host_cookie)
374 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
377 * Use of DMA with scatter-gather is impossible.
378 * Give up with DMA and switch back to PIO mode.
380 if (status & MCI_RXDATAAVLBLMASK) {
381 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
382 mmci_dma_release(host);
386 static void mmci_dma_data_error(struct mmci_host *host)
388 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
389 dmaengine_terminate_all(host->dma_current);
392 static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
393 struct mmci_host_next *next)
395 struct variant_data *variant = host->variant;
396 struct dma_slave_config conf = {
397 .src_addr = host->phybase + MMCIFIFO,
398 .dst_addr = host->phybase + MMCIFIFO,
399 .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
400 .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
401 .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
402 .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
404 struct dma_chan *chan;
405 struct dma_device *device;
406 struct dma_async_tx_descriptor *desc;
407 enum dma_data_direction buffer_dirn;
410 /* Check if next job is already prepared */
411 if (data->host_cookie && !next &&
412 host->dma_current && host->dma_desc_current)
416 host->dma_current = NULL;
417 host->dma_desc_current = NULL;
420 if (data->flags & MMC_DATA_READ) {
421 conf.direction = DMA_DEV_TO_MEM;
422 buffer_dirn = DMA_FROM_DEVICE;
423 chan = host->dma_rx_channel;
425 conf.direction = DMA_MEM_TO_DEV;
426 buffer_dirn = DMA_TO_DEVICE;
427 chan = host->dma_tx_channel;
430 /* If there's no DMA channel, fall back to PIO */
434 /* If less than or equal to the fifo size, don't bother with DMA */
435 if (data->blksz * data->blocks <= variant->fifosize)
438 device = chan->device;
439 nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
443 dmaengine_slave_config(chan, &conf);
444 desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
445 conf.direction, DMA_CTRL_ACK);
450 next->dma_chan = chan;
451 next->dma_desc = desc;
453 host->dma_current = chan;
454 host->dma_desc_current = desc;
461 dmaengine_terminate_all(chan);
462 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
466 static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
469 struct mmc_data *data = host->data;
471 ret = mmci_dma_prep_data(host, host->data, NULL);
475 /* Okay, go for it. */
476 dev_vdbg(mmc_dev(host->mmc),
477 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
478 data->sg_len, data->blksz, data->blocks, data->flags);
479 dmaengine_submit(host->dma_desc_current);
480 dma_async_issue_pending(host->dma_current);
482 datactrl |= MCI_DPSM_DMAENABLE;
484 /* Trigger the DMA transfer */
485 writel(datactrl, host->base + MMCIDATACTRL);
488 * Let the MMCI say when the data is ended and it's time
489 * to fire next DMA request. When that happens, MMCI will
490 * call mmci_data_end()
492 writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
493 host->base + MMCIMASK0);
497 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
499 struct mmci_host_next *next = &host->next_data;
501 if (data->host_cookie && data->host_cookie != next->cookie) {
502 pr_warning("[%s] invalid cookie: data->host_cookie %d"
503 " host->next_data.cookie %d\n",
504 __func__, data->host_cookie, host->next_data.cookie);
505 data->host_cookie = 0;
508 if (!data->host_cookie)
511 host->dma_desc_current = next->dma_desc;
512 host->dma_current = next->dma_chan;
514 next->dma_desc = NULL;
515 next->dma_chan = NULL;
518 static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
521 struct mmci_host *host = mmc_priv(mmc);
522 struct mmc_data *data = mrq->data;
523 struct mmci_host_next *nd = &host->next_data;
528 if (data->host_cookie) {
529 data->host_cookie = 0;
533 /* if config for dma */
534 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
535 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
536 if (mmci_dma_prep_data(host, data, nd))
537 data->host_cookie = 0;
539 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
543 static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
546 struct mmci_host *host = mmc_priv(mmc);
547 struct mmc_data *data = mrq->data;
548 struct dma_chan *chan;
549 enum dma_data_direction dir;
554 if (data->flags & MMC_DATA_READ) {
555 dir = DMA_FROM_DEVICE;
556 chan = host->dma_rx_channel;
559 chan = host->dma_tx_channel;
563 /* if config for dma */
566 dmaengine_terminate_all(chan);
567 if (data->host_cookie)
568 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
570 mrq->data->host_cookie = 0;
575 /* Blank functions if the DMA engine is not available */
576 static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
579 static inline void mmci_dma_setup(struct mmci_host *host)
583 static inline void mmci_dma_release(struct mmci_host *host)
587 static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
591 static inline void mmci_dma_data_error(struct mmci_host *host)
595 static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
600 #define mmci_pre_request NULL
601 #define mmci_post_request NULL
605 static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
607 struct variant_data *variant = host->variant;
608 unsigned int datactrl, timeout, irqmask;
609 unsigned long long clks;
613 dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
614 data->blksz, data->blocks, data->flags);
617 host->size = data->blksz * data->blocks;
618 data->bytes_xfered = 0;
620 clks = (unsigned long long)data->timeout_ns * host->cclk;
621 do_div(clks, 1000000000UL);
623 timeout = data->timeout_clks + (unsigned int)clks;
626 writel(timeout, base + MMCIDATATIMER);
627 writel(host->size, base + MMCIDATALENGTH);
629 blksz_bits = ffs(data->blksz) - 1;
630 BUG_ON(1 << blksz_bits != data->blksz);
632 if (variant->blksz_datactrl16)
633 datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
635 datactrl = MCI_DPSM_ENABLE | blksz_bits << 4;
637 if (data->flags & MMC_DATA_READ)
638 datactrl |= MCI_DPSM_DIRECTION;
640 /* The ST Micro variants has a special bit to enable SDIO */
641 if (variant->sdio && host->mmc->card)
642 if (mmc_card_sdio(host->mmc->card))
643 datactrl |= MCI_ST_DPSM_SDIOEN;
646 * Attempt to use DMA operation mode, if this
647 * should fail, fall back to PIO mode
649 if (!mmci_dma_start_data(host, datactrl))
652 /* IRQ mode, map the SG list for CPU reading/writing */
653 mmci_init_sg(host, data);
655 if (data->flags & MMC_DATA_READ) {
656 irqmask = MCI_RXFIFOHALFFULLMASK;
659 * If we have less than the fifo 'half-full' threshold to
660 * transfer, trigger a PIO interrupt as soon as any data
663 if (host->size < variant->fifohalfsize)
664 irqmask |= MCI_RXDATAAVLBLMASK;
667 * We don't actually need to include "FIFO empty" here
668 * since its implicit in "FIFO half empty".
670 irqmask = MCI_TXFIFOHALFEMPTYMASK;
673 writel(datactrl, base + MMCIDATACTRL);
674 writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
675 mmci_set_mask1(host, irqmask);
679 mmci_start_command(struct mmci_host *host, struct mmc_command *cmd, u32 c)
681 void __iomem *base = host->base;
683 dev_dbg(mmc_dev(host->mmc), "op %02x arg %08x flags %08x\n",
684 cmd->opcode, cmd->arg, cmd->flags);
686 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
687 writel(0, base + MMCICOMMAND);
691 c |= cmd->opcode | MCI_CPSM_ENABLE;
692 if (cmd->flags & MMC_RSP_PRESENT) {
693 if (cmd->flags & MMC_RSP_136)
694 c |= MCI_CPSM_LONGRSP;
695 c |= MCI_CPSM_RESPONSE;
698 c |= MCI_CPSM_INTERRUPT;
702 writel(cmd->arg, base + MMCIARGUMENT);
703 writel(c, base + MMCICOMMAND);
707 mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
710 /* First check for errors */
711 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
712 MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
715 /* Terminate the DMA transfer */
716 if (dma_inprogress(host))
717 mmci_dma_data_error(host);
720 * Calculate how far we are into the transfer. Note that
721 * the data counter gives the number of bytes transferred
722 * on the MMC bus, not on the host side. On reads, this
723 * can be as much as a FIFO-worth of data ahead. This
724 * matters for FIFO overruns only.
726 remain = readl(host->base + MMCIDATACNT);
727 success = data->blksz * data->blocks - remain;
729 dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n",
731 if (status & MCI_DATACRCFAIL) {
732 /* Last block was not successful */
734 data->error = -EILSEQ;
735 } else if (status & MCI_DATATIMEOUT) {
736 data->error = -ETIMEDOUT;
737 } else if (status & MCI_STARTBITERR) {
738 data->error = -ECOMM;
739 } else if (status & MCI_TXUNDERRUN) {
741 } else if (status & MCI_RXOVERRUN) {
742 if (success > host->variant->fifosize)
743 success -= host->variant->fifosize;
748 data->bytes_xfered = round_down(success, data->blksz);
751 if (status & MCI_DATABLOCKEND)
752 dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n");
754 if (status & MCI_DATAEND || data->error) {
755 if (dma_inprogress(host))
756 mmci_dma_unmap(host, data);
757 mmci_stop_data(host);
760 /* The error clause is handled above, success! */
761 data->bytes_xfered = data->blksz * data->blocks;
764 mmci_request_end(host, data->mrq);
766 mmci_start_command(host, data->stop, 0);
772 mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
775 void __iomem *base = host->base;
779 if (status & MCI_CMDTIMEOUT) {
780 cmd->error = -ETIMEDOUT;
781 } else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {
782 cmd->error = -EILSEQ;
784 cmd->resp[0] = readl(base + MMCIRESPONSE0);
785 cmd->resp[1] = readl(base + MMCIRESPONSE1);
786 cmd->resp[2] = readl(base + MMCIRESPONSE2);
787 cmd->resp[3] = readl(base + MMCIRESPONSE3);
790 if (!cmd->data || cmd->error) {
792 /* Terminate the DMA transfer */
793 if (dma_inprogress(host))
794 mmci_dma_data_error(host);
795 mmci_stop_data(host);
797 mmci_request_end(host, cmd->mrq);
798 } else if (!(cmd->data->flags & MMC_DATA_READ)) {
799 mmci_start_data(host, cmd->data);
803 static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int remain)
805 void __iomem *base = host->base;
808 int host_remain = host->size;
811 int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
820 * SDIO especially may want to send something that is
821 * not divisible by 4 (as opposed to card sectors
822 * etc). Therefore make sure to always read the last bytes
823 * while only doing full 32-bit reads towards the FIFO.
825 if (unlikely(count & 0x3)) {
827 unsigned char buf[4];
828 readsl(base + MMCIFIFO, buf, 1);
829 memcpy(ptr, buf, count);
831 readsl(base + MMCIFIFO, ptr, count >> 2);
835 readsl(base + MMCIFIFO, ptr, count >> 2);
840 host_remain -= count;
845 status = readl(base + MMCISTATUS);
846 } while (status & MCI_RXDATAAVLBL);
851 static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status)
853 struct variant_data *variant = host->variant;
854 void __iomem *base = host->base;
858 unsigned int count, maxcnt;
860 maxcnt = status & MCI_TXFIFOEMPTY ?
861 variant->fifosize : variant->fifohalfsize;
862 count = min(remain, maxcnt);
865 * The ST Micro variant for SDIO transfer sizes
866 * less then 8 bytes should have clock H/W flow
870 mmc_card_sdio(host->mmc->card)) {
873 clk = host->clk_reg & ~variant->clkreg_enable;
875 clk = host->clk_reg | variant->clkreg_enable;
877 mmci_write_clkreg(host, clk);
881 * SDIO especially may want to send something that is
882 * not divisible by 4 (as opposed to card sectors
883 * etc), and the FIFO only accept full 32-bit writes.
884 * So compensate by adding +3 on the count, a single
885 * byte become a 32bit write, 7 bytes will be two
888 writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
896 status = readl(base + MMCISTATUS);
897 } while (status & MCI_TXFIFOHALFEMPTY);
903 * PIO data transfer IRQ handler.
905 static irqreturn_t mmci_pio_irq(int irq, void *dev_id)
907 struct mmci_host *host = dev_id;
908 struct sg_mapping_iter *sg_miter = &host->sg_miter;
909 struct variant_data *variant = host->variant;
910 void __iomem *base = host->base;
914 status = readl(base + MMCISTATUS);
916 dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
918 local_irq_save(flags);
921 unsigned int remain, len;
925 * For write, we only need to test the half-empty flag
926 * here - if the FIFO is completely empty, then by
927 * definition it is more than half empty.
929 * For read, check for data available.
931 if (!(status & (MCI_TXFIFOHALFEMPTY|MCI_RXDATAAVLBL)))
934 if (!sg_miter_next(sg_miter))
937 buffer = sg_miter->addr;
938 remain = sg_miter->length;
941 if (status & MCI_RXACTIVE)
942 len = mmci_pio_read(host, buffer, remain);
943 if (status & MCI_TXACTIVE)
944 len = mmci_pio_write(host, buffer, remain, status);
946 sg_miter->consumed = len;
954 status = readl(base + MMCISTATUS);
957 sg_miter_stop(sg_miter);
959 local_irq_restore(flags);
962 * If we have less than the fifo 'half-full' threshold to transfer,
963 * trigger a PIO interrupt as soon as any data is available.
965 if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize)
966 mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);
969 * If we run out of data, disable the data IRQs; this
970 * prevents a race where the FIFO becomes empty before
971 * the chip itself has disabled the data path, and
972 * stops us racing with our data end IRQ.
974 if (host->size == 0) {
975 mmci_set_mask1(host, 0);
976 writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
983 * Handle completion of command and data transfers.
985 static irqreturn_t mmci_irq(int irq, void *dev_id)
987 struct mmci_host *host = dev_id;
991 spin_lock(&host->lock);
994 struct mmc_command *cmd;
995 struct mmc_data *data;
997 status = readl(host->base + MMCISTATUS);
999 if (host->singleirq) {
1000 if (status & readl(host->base + MMCIMASK1))
1001 mmci_pio_irq(irq, dev_id);
1003 status &= ~MCI_IRQ1MASK;
1006 status &= readl(host->base + MMCIMASK0);
1007 writel(status, host->base + MMCICLEAR);
1009 dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
1012 if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
1013 MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
1014 MCI_DATABLOCKEND) && data)
1015 mmci_data_irq(host, data, status);
1018 if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
1019 mmci_cmd_irq(host, cmd, status);
1024 spin_unlock(&host->lock);
1026 return IRQ_RETVAL(ret);
1029 static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1031 struct mmci_host *host = mmc_priv(mmc);
1032 unsigned long flags;
1034 WARN_ON(host->mrq != NULL);
1036 if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
1037 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
1039 mrq->cmd->error = -EINVAL;
1040 mmc_request_done(mmc, mrq);
1044 pm_runtime_get_sync(mmc_dev(mmc));
1046 spin_lock_irqsave(&host->lock, flags);
1051 mmci_get_next_data(host, mrq->data);
1053 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
1054 mmci_start_data(host, mrq->data);
1056 mmci_start_command(host, mrq->cmd, 0);
1058 spin_unlock_irqrestore(&host->lock, flags);
1061 static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1063 struct mmci_host *host = mmc_priv(mmc);
1064 struct variant_data *variant = host->variant;
1066 unsigned long flags;
1069 pm_runtime_get_sync(mmc_dev(mmc));
1071 if (host->plat->ios_handler &&
1072 host->plat->ios_handler(mmc_dev(mmc), ios))
1073 dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
1075 switch (ios->power_mode) {
1078 ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
1082 ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
1084 dev_err(mmc_dev(mmc), "unable to set OCR\n");
1086 * The .set_ios() function in the mmc_host_ops
1087 * struct return void, and failing to set the
1088 * power should be rare so we print an error
1095 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
1096 * and instead uses MCI_PWR_ON so apply whatever value is
1097 * configured in the variant data.
1099 pwr |= variant->pwrreg_powerup;
1107 if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
1109 * The ST Micro variant has some additional bits
1110 * indicating signal direction for the signals in
1111 * the SD/MMC bus and feedback-clock usage.
1113 pwr |= host->plat->sigdir;
1115 if (ios->bus_width == MMC_BUS_WIDTH_4)
1116 pwr &= ~MCI_ST_DATA74DIREN;
1117 else if (ios->bus_width == MMC_BUS_WIDTH_1)
1118 pwr &= (~MCI_ST_DATA74DIREN &
1119 ~MCI_ST_DATA31DIREN &
1120 ~MCI_ST_DATA2DIREN);
1123 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
1124 if (host->hw_designer != AMBA_VENDOR_ST)
1128 * The ST Micro variant use the ROD bit for something
1129 * else and only has OD (Open Drain).
1135 spin_lock_irqsave(&host->lock, flags);
1137 mmci_set_clkreg(host, ios->clock);
1138 mmci_write_pwrreg(host, pwr);
1140 spin_unlock_irqrestore(&host->lock, flags);
1143 pm_runtime_mark_last_busy(mmc_dev(mmc));
1144 pm_runtime_put_autosuspend(mmc_dev(mmc));
1147 static int mmci_get_ro(struct mmc_host *mmc)
1149 struct mmci_host *host = mmc_priv(mmc);
1151 if (host->gpio_wp == -ENOSYS)
1154 return gpio_get_value_cansleep(host->gpio_wp);
1157 static int mmci_get_cd(struct mmc_host *mmc)
1159 struct mmci_host *host = mmc_priv(mmc);
1160 struct mmci_platform_data *plat = host->plat;
1161 unsigned int status;
1163 if (host->gpio_cd == -ENOSYS) {
1165 return 1; /* Assume always present */
1167 status = plat->status(mmc_dev(host->mmc));
1169 status = !!gpio_get_value_cansleep(host->gpio_cd)
1173 * Use positive logic throughout - status is zero for no card,
1174 * non-zero for card inserted.
1179 static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
1181 struct mmci_host *host = dev_id;
1183 mmc_detect_change(host->mmc, msecs_to_jiffies(500));
1188 static const struct mmc_host_ops mmci_ops = {
1189 .request = mmci_request,
1190 .pre_req = mmci_pre_request,
1191 .post_req = mmci_post_request,
1192 .set_ios = mmci_set_ios,
1193 .get_ro = mmci_get_ro,
1194 .get_cd = mmci_get_cd,
1197 static int __devinit mmci_probe(struct amba_device *dev,
1198 const struct amba_id *id)
1200 struct mmci_platform_data *plat = dev->dev.platform_data;
1201 struct variant_data *variant = id->data;
1202 struct mmci_host *host;
1203 struct mmc_host *mmc;
1206 /* must have platform data */
1212 ret = amba_request_regions(dev, DRIVER_NAME);
1216 mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
1222 host = mmc_priv(mmc);
1225 host->gpio_wp = -ENOSYS;
1226 host->gpio_cd = -ENOSYS;
1227 host->gpio_cd_irq = -1;
1229 host->hw_designer = amba_manf(dev);
1230 host->hw_revision = amba_rev(dev);
1231 dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
1232 dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
1234 host->clk = clk_get(&dev->dev, NULL);
1235 if (IS_ERR(host->clk)) {
1236 ret = PTR_ERR(host->clk);
1241 ret = clk_prepare(host->clk);
1245 ret = clk_enable(host->clk);
1250 host->variant = variant;
1251 host->mclk = clk_get_rate(host->clk);
1253 * According to the spec, mclk is max 100 MHz,
1254 * so we try to adjust the clock down to this,
1257 if (host->mclk > 100000000) {
1258 ret = clk_set_rate(host->clk, 100000000);
1261 host->mclk = clk_get_rate(host->clk);
1262 dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
1265 host->phybase = dev->res.start;
1266 host->base = ioremap(dev->res.start, resource_size(&dev->res));
1272 mmc->ops = &mmci_ops;
1274 * The ARM and ST versions of the block have slightly different
1275 * clock divider equations which means that the minimum divider
1278 if (variant->st_clkdiv)
1279 mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
1281 mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
1283 * If the platform data supplies a maximum operating
1284 * frequency, this takes precedence. Else, we fall back
1285 * to using the module parameter, which has a (low)
1286 * default value in case it is not specified. Either
1287 * value must not exceed the clock rate into the block,
1291 mmc->f_max = min(host->mclk, plat->f_max);
1293 mmc->f_max = min(host->mclk, fmax);
1294 dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
1296 #ifdef CONFIG_REGULATOR
1297 /* If we're using the regulator framework, try to fetch a regulator */
1298 host->vcc = regulator_get(&dev->dev, "vmmc");
1299 if (IS_ERR(host->vcc))
1302 int mask = mmc_regulator_get_ocrmask(host->vcc);
1305 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
1308 host->mmc->ocr_avail = (u32) mask;
1311 "Provided ocr_mask/setpower will not be used "
1312 "(using regulator instead)\n");
1316 /* Fall back to platform data if no regulator is found */
1317 if (host->vcc == NULL)
1318 mmc->ocr_avail = plat->ocr_mask;
1319 mmc->caps = plat->capabilities;
1320 mmc->caps2 = plat->capabilities2;
1325 mmc->max_segs = NR_SG;
1328 * Since only a certain number of bits are valid in the data length
1329 * register, we must ensure that we don't exceed 2^num-1 bytes in a
1332 mmc->max_req_size = (1 << variant->datalength_bits) - 1;
1335 * Set the maximum segment size. Since we aren't doing DMA
1336 * (yet) we are only limited by the data length register.
1338 mmc->max_seg_size = mmc->max_req_size;
1341 * Block size can be up to 2048 bytes, but must be a power of two.
1343 mmc->max_blk_size = 1 << 11;
1346 * Limit the number of blocks transferred so that we don't overflow
1347 * the maximum request size.
1349 mmc->max_blk_count = mmc->max_req_size >> 11;
1351 spin_lock_init(&host->lock);
1353 writel(0, host->base + MMCIMASK0);
1354 writel(0, host->base + MMCIMASK1);
1355 writel(0xfff, host->base + MMCICLEAR);
1357 if (gpio_is_valid(plat->gpio_cd)) {
1358 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1360 ret = gpio_direction_input(plat->gpio_cd);
1362 host->gpio_cd = plat->gpio_cd;
1363 else if (ret != -ENOSYS)
1367 * A gpio pin that will detect cards when inserted and removed
1368 * will most likely want to trigger on the edges if it is
1369 * 0 when ejected and 1 when inserted (or mutatis mutandis
1370 * for the inverted case) so we request triggers on both
1373 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1375 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1376 DRIVER_NAME " (cd)", host);
1378 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1380 if (gpio_is_valid(plat->gpio_wp)) {
1381 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1383 ret = gpio_direction_input(plat->gpio_wp);
1385 host->gpio_wp = plat->gpio_wp;
1386 else if (ret != -ENOSYS)
1390 if ((host->plat->status || host->gpio_cd != -ENOSYS)
1391 && host->gpio_cd_irq < 0)
1392 mmc->caps |= MMC_CAP_NEEDS_POLL;
1394 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
1398 if (dev->irq[1] == NO_IRQ || !dev->irq[1])
1399 host->singleirq = true;
1401 ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
1402 DRIVER_NAME " (pio)", host);
1407 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1409 amba_set_drvdata(dev, mmc);
1411 dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
1412 mmc_hostname(mmc), amba_part(dev), amba_manf(dev),
1413 amba_rev(dev), (unsigned long long)dev->res.start,
1414 dev->irq[0], dev->irq[1]);
1416 mmci_dma_setup(host);
1418 pm_runtime_set_autosuspend_delay(&dev->dev, 50);
1419 pm_runtime_use_autosuspend(&dev->dev);
1420 pm_runtime_put(&dev->dev);
1427 free_irq(dev->irq[0], host);
1429 if (host->gpio_wp != -ENOSYS)
1430 gpio_free(host->gpio_wp);
1432 if (host->gpio_cd_irq >= 0)
1433 free_irq(host->gpio_cd_irq, host);
1434 if (host->gpio_cd != -ENOSYS)
1435 gpio_free(host->gpio_cd);
1437 iounmap(host->base);
1439 clk_disable(host->clk);
1441 clk_unprepare(host->clk);
1447 amba_release_regions(dev);
1452 static int __devexit mmci_remove(struct amba_device *dev)
1454 struct mmc_host *mmc = amba_get_drvdata(dev);
1456 amba_set_drvdata(dev, NULL);
1459 struct mmci_host *host = mmc_priv(mmc);
1462 * Undo pm_runtime_put() in probe. We use the _sync
1463 * version here so that we can access the primecell.
1465 pm_runtime_get_sync(&dev->dev);
1467 mmc_remove_host(mmc);
1469 writel(0, host->base + MMCIMASK0);
1470 writel(0, host->base + MMCIMASK1);
1472 writel(0, host->base + MMCICOMMAND);
1473 writel(0, host->base + MMCIDATACTRL);
1475 mmci_dma_release(host);
1476 free_irq(dev->irq[0], host);
1477 if (!host->singleirq)
1478 free_irq(dev->irq[1], host);
1480 if (host->gpio_wp != -ENOSYS)
1481 gpio_free(host->gpio_wp);
1482 if (host->gpio_cd_irq >= 0)
1483 free_irq(host->gpio_cd_irq, host);
1484 if (host->gpio_cd != -ENOSYS)
1485 gpio_free(host->gpio_cd);
1487 iounmap(host->base);
1488 clk_disable(host->clk);
1489 clk_unprepare(host->clk);
1493 mmc_regulator_set_ocr(mmc, host->vcc, 0);
1494 regulator_put(host->vcc);
1498 amba_release_regions(dev);
1504 #ifdef CONFIG_SUSPEND
1505 static int mmci_suspend(struct device *dev)
1507 struct amba_device *adev = to_amba_device(dev);
1508 struct mmc_host *mmc = amba_get_drvdata(adev);
1512 struct mmci_host *host = mmc_priv(mmc);
1514 ret = mmc_suspend_host(mmc);
1516 pm_runtime_get_sync(dev);
1517 writel(0, host->base + MMCIMASK0);
1524 static int mmci_resume(struct device *dev)
1526 struct amba_device *adev = to_amba_device(dev);
1527 struct mmc_host *mmc = amba_get_drvdata(adev);
1531 struct mmci_host *host = mmc_priv(mmc);
1533 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1534 pm_runtime_put(dev);
1536 ret = mmc_resume_host(mmc);
1543 static const struct dev_pm_ops mmci_dev_pm_ops = {
1544 SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
1547 static struct amba_id mmci_ids[] = {
1551 .data = &variant_arm,
1556 .data = &variant_arm_extended_fifo,
1561 .data = &variant_arm,
1563 /* ST Micro variants */
1567 .data = &variant_u300,
1572 .data = &variant_u300,
1577 .data = &variant_ux500,
1582 .data = &variant_ux500v2,
1587 MODULE_DEVICE_TABLE(amba, mmci_ids);
1589 static struct amba_driver mmci_driver = {
1591 .name = DRIVER_NAME,
1592 .pm = &mmci_dev_pm_ops,
1594 .probe = mmci_probe,
1595 .remove = __devexit_p(mmci_remove),
1596 .id_table = mmci_ids,
1599 module_amba_driver(mmci_driver);
1601 module_param(fmax, uint, 0444);
1603 MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");
1604 MODULE_LICENSE("GPL");