2 * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
4 * Copyright (C) 2007 Google Inc,
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 * Author: San Mehat (san@android.com)
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/device.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/highmem.h>
27 #include <linux/log2.h>
28 #include <linux/mmc/host.h>
29 #include <linux/mmc/card.h>
30 #include <linux/mmc/sdio.h>
31 #include <linux/clk.h>
32 #include <linux/scatterlist.h>
33 #include <linux/platform_device.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/debugfs.h>
37 #include <linux/memory.h>
39 #include <asm/cacheflush.h>
40 #include <asm/div64.h>
41 #include <asm/sizes.h>
44 #include <mach/msm_iomap.h>
49 #define DRIVER_NAME "msm-sdcc"
51 #define BUSCLK_PWRSAVE 1
52 #define BUSCLK_TIMEOUT (HZ)
53 static unsigned int msmsdcc_fmin = 144000;
54 static unsigned int msmsdcc_fmax = 50000000;
55 static unsigned int msmsdcc_4bit = 1;
56 static unsigned int msmsdcc_pwrsave = 1;
57 static unsigned int msmsdcc_piopoll = 1;
58 static unsigned int msmsdcc_sdioirq;
60 #define PIO_SPINMAX 30
61 #define CMD_SPINMAX 20
65 msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
67 WARN_ON(!host->clks_on);
69 BUG_ON(host->curr.mrq);
72 mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
74 del_timer_sync(&host->busclk_timer);
75 clk_disable(host->clk);
76 clk_disable(host->pclk);
82 msmsdcc_enable_clocks(struct msmsdcc_host *host)
86 WARN_ON(host->clks_on);
88 del_timer_sync(&host->busclk_timer);
90 rc = clk_enable(host->pclk);
93 rc = clk_enable(host->clk);
95 clk_disable(host->pclk);
98 udelay(1 + ((3 * USEC_PER_SEC) /
99 (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
104 static inline unsigned int
105 msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
107 return readl(host->base + reg);
111 msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
113 writel(data, host->base + reg);
114 /* 3 clk delay required! */
115 udelay(1 + ((3 * USEC_PER_SEC) /
116 (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
120 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
124 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
126 BUG_ON(host->curr.data);
128 host->curr.mrq = NULL;
129 host->curr.cmd = NULL;
132 mrq->data->bytes_xfered = host->curr.data_xfered;
133 if (mrq->cmd->error == -ETIMEDOUT)
137 msmsdcc_disable_clocks(host, 1);
140 * Need to drop the host lock here; mmc_request_done may call
141 * back into the driver...
143 spin_unlock(&host->lock);
144 mmc_request_done(host->mmc, mrq);
145 spin_lock(&host->lock);
149 msmsdcc_stop_data(struct msmsdcc_host *host)
151 host->curr.data = NULL;
152 host->curr.got_dataend = host->curr.got_datablkend = 0;
155 uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
157 switch (host->pdev_id) {
159 return MSM_SDC1_PHYS + MMCIFIFO;
161 return MSM_SDC2_PHYS + MMCIFIFO;
163 return MSM_SDC3_PHYS + MMCIFIFO;
165 return MSM_SDC4_PHYS + MMCIFIFO;
172 msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
173 msmsdcc_writel(host, arg, MMCIARGUMENT);
174 msmsdcc_writel(host, c, MMCICOMMAND);
178 msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
180 struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
182 writel(host->cmd_timeout, host->base + MMCIDATATIMER);
183 writel((unsigned int)host->curr.xfer_size, host->base + MMCIDATALENGTH);
184 writel(host->cmd_pio_irqmask, host->base + MMCIMASK1);
185 writel(host->cmd_datactrl, host->base + MMCIDATACTRL);
188 msmsdcc_start_command_exec(host,
189 (u32)host->cmd_cmd->arg, (u32)host->cmd_c);
191 host->dma.active = 1;
195 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
197 struct msm_dmov_errdata *err)
199 struct msmsdcc_dma_data *dma_data =
200 container_of(cmd, struct msmsdcc_dma_data, hdr);
201 struct msmsdcc_host *host = dma_data->host;
203 struct mmc_request *mrq;
205 spin_lock_irqsave(&host->lock, flags);
206 host->dma.active = 0;
208 mrq = host->curr.mrq;
212 if (!(result & DMOV_RSLT_VALID)) {
213 pr_err("msmsdcc: Invalid DataMover result\n");
217 if (result & DMOV_RSLT_DONE) {
218 host->curr.data_xfered = host->curr.xfer_size;
221 if (result & DMOV_RSLT_ERROR)
222 pr_err("%s: DMA error (0x%.8x)\n",
223 mmc_hostname(host->mmc), result);
224 if (result & DMOV_RSLT_FLUSH)
225 pr_err("%s: DMA channel flushed (0x%.8x)\n",
226 mmc_hostname(host->mmc), result);
228 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
229 err->flush[0], err->flush[1], err->flush[2],
230 err->flush[3], err->flush[4], err->flush[5]);
231 if (!mrq->data->error)
232 mrq->data->error = -EIO;
234 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
237 if (host->curr.user_pages) {
238 struct scatterlist *sg = host->dma.sg;
241 for (i = 0; i < host->dma.num_ents; i++)
242 flush_dcache_page(sg_page(sg++));
248 if ((host->curr.got_dataend && host->curr.got_datablkend)
249 || mrq->data->error) {
252 * If we've already gotten our DATAEND / DATABLKEND
253 * for this request, then complete it through here.
255 msmsdcc_stop_data(host);
257 if (!mrq->data->error)
258 host->curr.data_xfered = host->curr.xfer_size;
259 if (!mrq->data->stop || mrq->cmd->error) {
260 host->curr.mrq = NULL;
261 host->curr.cmd = NULL;
262 mrq->data->bytes_xfered = host->curr.data_xfered;
264 spin_unlock_irqrestore(&host->lock, flags);
266 msmsdcc_disable_clocks(host, 1);
268 mmc_request_done(host->mmc, mrq);
271 msmsdcc_start_command(host, mrq->data->stop, 0);
275 spin_unlock_irqrestore(&host->lock, flags);
279 static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
281 if (host->dma.channel == -1)
284 if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
286 if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
291 static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
293 struct msmsdcc_nc_dmadata *nc;
299 struct scatterlist *sg = data->sg;
301 rc = validate_dma(host, data);
305 host->dma.sg = data->sg;
306 host->dma.num_ents = data->sg_len;
308 BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
312 switch (host->pdev_id) {
314 crci = MSMSDCC_CRCI_SDC1;
317 crci = MSMSDCC_CRCI_SDC2;
320 crci = MSMSDCC_CRCI_SDC3;
323 crci = MSMSDCC_CRCI_SDC4;
327 host->dma.num_ents = 0;
331 if (data->flags & MMC_DATA_READ)
332 host->dma.dir = DMA_FROM_DEVICE;
334 host->dma.dir = DMA_TO_DEVICE;
336 host->curr.user_pages = 0;
339 for (i = 0; i < host->dma.num_ents; i++) {
340 box->cmd = CMD_MODE_BOX;
342 /* Initialize sg dma address */
343 sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
346 if (i == (host->dma.num_ents - 1))
348 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
349 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
350 (sg_dma_len(sg) / MCI_FIFOSIZE) ;
352 if (data->flags & MMC_DATA_READ) {
353 box->src_row_addr = msmsdcc_fifo_addr(host);
354 box->dst_row_addr = sg_dma_address(sg);
356 box->src_dst_len = (MCI_FIFOSIZE << 16) |
358 box->row_offset = MCI_FIFOSIZE;
360 box->num_rows = rows * ((1 << 16) + 1);
361 box->cmd |= CMD_SRC_CRCI(crci);
363 box->src_row_addr = sg_dma_address(sg);
364 box->dst_row_addr = msmsdcc_fifo_addr(host);
366 box->src_dst_len = (MCI_FIFOSIZE << 16) |
368 box->row_offset = (MCI_FIFOSIZE << 16);
370 box->num_rows = rows * ((1 << 16) + 1);
371 box->cmd |= CMD_DST_CRCI(crci);
377 /* location of command block must be 64 bit aligned */
378 BUG_ON(host->dma.cmd_busaddr & 0x07);
380 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
381 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
382 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
383 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
385 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
386 host->dma.num_ents, host->dma.dir);
387 /* dsb inside dma_map_sg will write nc out to mem as well */
389 if (n != host->dma.num_ents) {
390 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
391 mmc_hostname(host->mmc));
393 host->dma.num_ents = 0;
401 snoop_cccr_abort(struct mmc_command *cmd)
403 if ((cmd->opcode == 52) &&
404 (cmd->arg & 0x80000000) &&
405 (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
411 msmsdcc_start_command_deferred(struct msmsdcc_host *host,
412 struct mmc_command *cmd, u32 *c)
414 *c |= (cmd->opcode | MCI_CPSM_ENABLE);
416 if (cmd->flags & MMC_RSP_PRESENT) {
417 if (cmd->flags & MMC_RSP_136)
418 *c |= MCI_CPSM_LONGRSP;
419 *c |= MCI_CPSM_RESPONSE;
423 *c |= MCI_CPSM_INTERRUPT;
425 if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
426 ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
428 *c |= MCI_CSPM_DATCMD;
430 if (cmd == cmd->mrq->stop)
431 *c |= MCI_CSPM_MCIABORT;
433 if (snoop_cccr_abort(cmd))
434 *c |= MCI_CSPM_MCIABORT;
436 if (host->curr.cmd != NULL) {
437 printk(KERN_ERR "%s: Overlapping command requests\n",
438 mmc_hostname(host->mmc));
440 host->curr.cmd = cmd;
444 msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
445 struct mmc_command *cmd, u32 c)
447 unsigned int datactrl, timeout;
448 unsigned long long clks;
449 unsigned int pio_irqmask = 0;
451 host->curr.data = data;
452 host->curr.xfer_size = data->blksz * data->blocks;
453 host->curr.xfer_remain = host->curr.xfer_size;
454 host->curr.data_xfered = 0;
455 host->curr.got_dataend = 0;
456 host->curr.got_datablkend = 0;
458 memset(&host->pio, 0, sizeof(host->pio));
460 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
462 if (!msmsdcc_config_dma(host, data))
463 datactrl |= MCI_DPSM_DMAENABLE;
465 host->pio.sg = data->sg;
466 host->pio.sg_len = data->sg_len;
467 host->pio.sg_off = 0;
469 if (data->flags & MMC_DATA_READ) {
470 pio_irqmask = MCI_RXFIFOHALFFULLMASK;
471 if (host->curr.xfer_remain < MCI_FIFOSIZE)
472 pio_irqmask |= MCI_RXDATAAVLBLMASK;
474 pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
477 if (data->flags & MMC_DATA_READ)
478 datactrl |= MCI_DPSM_DIRECTION;
480 clks = (unsigned long long)data->timeout_ns * host->clk_rate;
481 do_div(clks, NSEC_PER_SEC);
482 timeout = data->timeout_clks + (unsigned int)clks*2 ;
484 if (datactrl & MCI_DPSM_DMAENABLE) {
485 /* Save parameters for the exec function */
486 host->cmd_timeout = timeout;
487 host->cmd_pio_irqmask = pio_irqmask;
488 host->cmd_datactrl = datactrl;
491 host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
492 host->dma.hdr.data = (void *)host;
496 msmsdcc_start_command_deferred(host, cmd, &c);
499 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
501 msmsdcc_writel(host, timeout, MMCIDATATIMER);
503 msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
505 msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
506 msmsdcc_writel(host, datactrl, MMCIDATACTRL);
509 /* Daisy-chain the command if requested */
510 msmsdcc_start_command(host, cmd, c);
516 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
518 if (cmd == cmd->mrq->stop)
519 c |= MCI_CSPM_MCIABORT;
523 msmsdcc_start_command_deferred(host, cmd, &c);
524 msmsdcc_start_command_exec(host, cmd->arg, c);
528 msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
531 if (status & MCI_DATACRCFAIL) {
532 pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
533 pr_err("%s: opcode 0x%.8x\n", __func__,
534 data->mrq->cmd->opcode);
535 pr_err("%s: blksz %d, blocks %d\n", __func__,
536 data->blksz, data->blocks);
537 data->error = -EILSEQ;
538 } else if (status & MCI_DATATIMEOUT) {
539 pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
540 data->error = -ETIMEDOUT;
541 } else if (status & MCI_RXOVERRUN) {
542 pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
544 } else if (status & MCI_TXUNDERRUN) {
545 pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
548 pr_err("%s: Unknown error (0x%.8x)\n",
549 mmc_hostname(host->mmc), status);
556 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
558 uint32_t *ptr = (uint32_t *) buffer;
561 while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
562 *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
564 count += sizeof(uint32_t);
566 remain -= sizeof(uint32_t);
574 msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
575 unsigned int remain, u32 status)
577 void __iomem *base = host->base;
581 unsigned int count, maxcnt;
583 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
585 count = min(remain, maxcnt);
587 writesl(base + MMCIFIFO, ptr, count >> 2);
594 status = msmsdcc_readl(host, MMCISTATUS);
595 } while (status & MCI_TXFIFOHALFEMPTY);
601 msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
604 if ((msmsdcc_readl(host, MMCISTATUS) & mask))
613 msmsdcc_pio_irq(int irq, void *dev_id)
615 struct msmsdcc_host *host = dev_id;
618 status = msmsdcc_readl(host, MMCISTATUS);
622 unsigned int remain, len;
625 if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
626 if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
629 if (msmsdcc_spin_on_status(host,
630 (MCI_TXFIFOHALFEMPTY |
637 /* Map the current scatter buffer */
638 local_irq_save(flags);
639 buffer = kmap_atomic(sg_page(host->pio.sg),
640 KM_BIO_SRC_IRQ) + host->pio.sg->offset;
641 buffer += host->pio.sg_off;
642 remain = host->pio.sg->length - host->pio.sg_off;
644 if (status & MCI_RXACTIVE)
645 len = msmsdcc_pio_read(host, buffer, remain);
646 if (status & MCI_TXACTIVE)
647 len = msmsdcc_pio_write(host, buffer, remain, status);
649 /* Unmap the buffer */
650 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
651 local_irq_restore(flags);
653 host->pio.sg_off += len;
654 host->curr.xfer_remain -= len;
655 host->curr.data_xfered += len;
659 /* This sg page is full - do some housekeeping */
660 if (status & MCI_RXACTIVE && host->curr.user_pages)
661 flush_dcache_page(sg_page(host->pio.sg));
663 if (!--host->pio.sg_len) {
664 memset(&host->pio, 0, sizeof(host->pio));
668 /* Advance to next sg */
670 host->pio.sg_off = 0;
673 status = msmsdcc_readl(host, MMCISTATUS);
676 if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
677 msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
679 if (!host->curr.xfer_remain)
680 msmsdcc_writel(host, 0, MMCIMASK1);
685 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
687 struct mmc_command *cmd = host->curr.cmd;
689 host->curr.cmd = NULL;
690 cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
691 cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
692 cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
693 cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
695 if (status & MCI_CMDTIMEOUT) {
696 cmd->error = -ETIMEDOUT;
697 } else if (status & MCI_CMDCRCFAIL &&
698 cmd->flags & MMC_RSP_CRC) {
699 pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
700 cmd->error = -EILSEQ;
703 if (!cmd->data || cmd->error) {
704 if (host->curr.data && host->dma.sg)
705 msm_dmov_stop_cmd(host->dma.channel,
707 else if (host->curr.data) { /* Non DMA */
708 msmsdcc_stop_data(host);
709 msmsdcc_request_end(host, cmd->mrq);
710 } else /* host->data == NULL */
711 msmsdcc_request_end(host, cmd->mrq);
712 } else if (cmd->data)
713 if (!(cmd->data->flags & MMC_DATA_READ))
714 msmsdcc_start_data(host, cmd->data,
719 msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
722 struct mmc_data *data = host->curr.data;
724 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
725 MCI_CMDTIMEOUT) && host->curr.cmd) {
726 msmsdcc_do_cmdirq(host, status);
732 /* Check for data errors */
733 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
734 MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
735 msmsdcc_data_err(host, data, status);
736 host->curr.data_xfered = 0;
738 msm_dmov_stop_cmd(host->dma.channel,
742 msmsdcc_stop_data(host);
744 msmsdcc_request_end(host, data->mrq);
746 msmsdcc_start_command(host, data->stop, 0);
750 /* Check for data done */
751 if (!host->curr.got_dataend && (status & MCI_DATAEND))
752 host->curr.got_dataend = 1;
754 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
755 host->curr.got_datablkend = 1;
758 * If DMA is still in progress, we complete via the completion handler
760 if (host->curr.got_dataend && host->curr.got_datablkend &&
763 * There appears to be an issue in the controller where
764 * if you request a small block transfer (< fifo size),
765 * you may get your DATAEND/DATABLKEND irq without the
768 * Check to see if there is still data to be read,
769 * and simulate a PIO irq.
771 if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
772 msmsdcc_pio_irq(1, host);
774 msmsdcc_stop_data(host);
776 host->curr.data_xfered = host->curr.xfer_size;
779 msmsdcc_request_end(host, data->mrq);
781 msmsdcc_start_command(host, data->stop, 0);
786 msmsdcc_irq(int irq, void *dev_id)
788 struct msmsdcc_host *host = dev_id;
789 void __iomem *base = host->base;
794 spin_lock(&host->lock);
797 struct mmc_data *data;
798 status = msmsdcc_readl(host, MMCISTATUS);
799 status &= (msmsdcc_readl(host, MMCIMASK0) |
800 MCI_DATABLOCKENDMASK);
801 msmsdcc_writel(host, status, MMCICLEAR);
803 if (status & MCI_SDIOINTR)
804 status &= ~MCI_SDIOINTR;
809 msmsdcc_handle_irq_data(host, status, base);
811 if (status & MCI_SDIOINTOPER) {
813 status &= ~MCI_SDIOINTOPER;
818 spin_unlock(&host->lock);
821 * We have to delay handling the card interrupt as it calls
822 * back into the driver.
825 mmc_signal_sdio_irq(host->mmc);
827 return IRQ_RETVAL(ret);
831 msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
833 struct msmsdcc_host *host = mmc_priv(mmc);
836 WARN_ON(host->curr.mrq != NULL);
837 WARN_ON(host->pwr == 0);
839 spin_lock_irqsave(&host->lock, flags);
844 if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
846 mrq->data->bytes_xfered = mrq->data->blksz *
849 mrq->cmd->error = -ENOMEDIUM;
851 spin_unlock_irqrestore(&host->lock, flags);
852 mmc_request_done(mmc, mrq);
856 host->curr.mrq = mrq;
858 /* Need to drop the host lock here in case
859 * the busclk wd fires
861 spin_unlock_irqrestore(&host->lock, flags);
863 msmsdcc_enable_clocks(host);
864 spin_lock_irqsave(&host->lock, flags);
866 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
867 /* Queue/read data, daisy-chain command when data starts */
868 msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
870 msmsdcc_start_command(host, mrq->cmd, 0);
872 if (host->cmdpoll && !msmsdcc_spin_on_status(host,
873 MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
875 uint32_t status = msmsdcc_readl(host, MMCISTATUS);
876 msmsdcc_do_cmdirq(host, status);
878 MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
880 host->stats.cmdpoll_hits++;
882 host->stats.cmdpoll_misses++;
884 spin_unlock_irqrestore(&host->lock, flags);
888 msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
890 struct msmsdcc_host *host = mmc_priv(mmc);
891 u32 clk = 0, pwr = 0;
896 msmsdcc_enable_clocks(host);
898 spin_lock_irqsave(&host->lock, flags);
901 if (ios->clock != host->clk_rate) {
902 rc = clk_set_rate(host->clk, ios->clock);
904 pr_err("%s: Error setting clock rate (%d)\n",
905 mmc_hostname(host->mmc), rc);
907 host->clk_rate = ios->clock;
909 clk |= MCI_CLK_ENABLE;
912 if (ios->bus_width == MMC_BUS_WIDTH_4)
913 clk |= (2 << 10); /* Set WIDEBUS */
915 if (ios->clock > 400000 && msmsdcc_pwrsave)
916 clk |= (1 << 9); /* PWRSAVE */
918 clk |= (1 << 12); /* FLOW_ENA */
919 clk |= (1 << 15); /* feedback clock */
921 if (host->plat->translate_vdd)
922 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
924 switch (ios->power_mode) {
935 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
938 msmsdcc_writel(host, clk, MMCICLOCK);
940 if (host->pwr != pwr) {
942 msmsdcc_writel(host, pwr, MMCIPOWER);
945 msmsdcc_disable_clocks(host, 1);
947 spin_unlock_irqrestore(&host->lock, flags);
950 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
952 struct msmsdcc_host *host = mmc_priv(mmc);
956 spin_lock_irqsave(&host->lock, flags);
957 if (msmsdcc_sdioirq == 1) {
958 status = msmsdcc_readl(host, MMCIMASK0);
960 status |= MCI_SDIOINTOPERMASK;
962 status &= ~MCI_SDIOINTOPERMASK;
963 host->saved_irq0mask = status;
964 msmsdcc_writel(host, status, MMCIMASK0);
966 spin_unlock_irqrestore(&host->lock, flags);
969 static const struct mmc_host_ops msmsdcc_ops = {
970 .request = msmsdcc_request,
971 .set_ios = msmsdcc_set_ios,
972 .enable_sdio_irq = msmsdcc_enable_sdio_irq,
976 msmsdcc_check_status(unsigned long data)
978 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
981 if (!host->plat->status) {
982 mmc_detect_change(host->mmc, 0);
986 status = host->plat->status(mmc_dev(host->mmc));
987 host->eject = !status;
988 if (status ^ host->oldstat) {
989 pr_info("%s: Slot status change detected (%d -> %d)\n",
990 mmc_hostname(host->mmc), host->oldstat, status);
992 mmc_detect_change(host->mmc, (5 * HZ) / 2);
994 mmc_detect_change(host->mmc, 0);
997 host->oldstat = status;
1000 if (host->timer.function)
1001 mod_timer(&host->timer, jiffies + HZ);
1005 msmsdcc_platform_status_irq(int irq, void *dev_id)
1007 struct msmsdcc_host *host = dev_id;
1009 printk(KERN_DEBUG "%s: %d\n", __func__, irq);
1010 msmsdcc_check_status((unsigned long) host);
1015 msmsdcc_status_notify_cb(int card_present, void *dev_id)
1017 struct msmsdcc_host *host = dev_id;
1019 printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
1021 msmsdcc_check_status((unsigned long) host);
1025 msmsdcc_busclk_expired(unsigned long _data)
1027 struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
1028 unsigned long flags;
1030 spin_lock_irqsave(&host->lock, flags);
1031 dev_info(mmc_dev(host->mmc), "Bus clock timer expired\n");
1033 msmsdcc_disable_clocks(host, 0);
1034 spin_unlock_irqrestore(&host->lock, flags);
1038 msmsdcc_init_dma(struct msmsdcc_host *host)
1040 memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
1041 host->dma.host = host;
1042 host->dma.channel = -1;
1047 host->dma.nc = dma_alloc_coherent(NULL,
1048 sizeof(struct msmsdcc_nc_dmadata),
1049 &host->dma.nc_busaddr,
1051 if (host->dma.nc == NULL) {
1052 pr_err("Unable to allocate DMA buffer\n");
1055 memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
1056 host->dma.cmd_busaddr = host->dma.nc_busaddr;
1057 host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
1058 offsetof(struct msmsdcc_nc_dmadata, cmdptr);
1059 host->dma.channel = host->dmares->start;
1064 #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
1066 do_resume_work(struct work_struct *work)
1068 struct msmsdcc_host *host =
1069 container_of(work, struct msmsdcc_host, resume_task);
1070 struct mmc_host *mmc = host->mmc;
1073 mmc_resume_host(mmc);
1075 enable_irq(host->stat_irq);
1081 msmsdcc_probe(struct platform_device *pdev)
1083 struct mmc_platform_data *plat = pdev->dev.platform_data;
1084 struct msmsdcc_host *host;
1085 struct mmc_host *mmc;
1086 struct resource *cmd_irqres = NULL;
1087 struct resource *pio_irqres = NULL;
1088 struct resource *stat_irqres = NULL;
1089 struct resource *memres = NULL;
1090 struct resource *dmares = NULL;
1093 /* must have platform data */
1095 pr_err("%s: Platform data not available\n", __func__);
1100 if (pdev->id < 1 || pdev->id > 4)
1103 if (pdev->resource == NULL || pdev->num_resources < 2) {
1104 pr_err("%s: Invalid resource\n", __func__);
1108 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1109 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1110 cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1112 pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1114 stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1117 if (!cmd_irqres || !pio_irqres || !memres) {
1118 pr_err("%s: Invalid resource\n", __func__);
1123 * Setup our host structure
1126 mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
1132 host = mmc_priv(mmc);
1133 host->pdev_id = pdev->id;
1136 host->curr.cmd = NULL;
1140 host->base = ioremap(memres->start, PAGE_SIZE);
1146 host->cmd_irqres = cmd_irqres;
1147 host->pio_irqres = pio_irqres;
1148 host->memres = memres;
1149 host->dmares = dmares;
1150 spin_lock_init(&host->lock);
1155 msmsdcc_init_dma(host);
1157 /* Get our clocks */
1158 host->pclk = clk_get(&pdev->dev, "sdc_pclk");
1159 if (IS_ERR(host->pclk)) {
1160 ret = PTR_ERR(host->pclk);
1164 host->clk = clk_get(&pdev->dev, "sdc_clk");
1165 if (IS_ERR(host->clk)) {
1166 ret = PTR_ERR(host->clk);
1171 ret = msmsdcc_enable_clocks(host);
1175 ret = clk_set_rate(host->clk, msmsdcc_fmin);
1177 pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
1181 host->pclk_rate = clk_get_rate(host->pclk);
1182 host->clk_rate = clk_get_rate(host->clk);
1185 * Setup MMC host structure
1187 mmc->ops = &msmsdcc_ops;
1188 mmc->f_min = msmsdcc_fmin;
1189 mmc->f_max = msmsdcc_fmax;
1190 mmc->ocr_avail = plat->ocr_mask;
1193 mmc->caps |= MMC_CAP_4_BIT_DATA;
1194 if (msmsdcc_sdioirq)
1195 mmc->caps |= MMC_CAP_SDIO_IRQ;
1196 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1198 mmc->max_phys_segs = NR_SG;
1199 mmc->max_hw_segs = NR_SG;
1200 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1201 mmc->max_blk_count = 65536;
1203 mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
1204 mmc->max_seg_size = mmc->max_req_size;
1206 msmsdcc_writel(host, 0, MMCIMASK0);
1207 msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
1209 msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
1210 host->saved_irq0mask = MCI_IRQENABLE;
1213 * Setup card detect change
1216 memset(&host->timer, 0, sizeof(host->timer));
1218 if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
1219 unsigned long irqflags = IRQF_SHARED |
1220 (stat_irqres->flags & IRQF_TRIGGER_MASK);
1222 host->stat_irq = stat_irqres->start;
1223 ret = request_irq(host->stat_irq,
1224 msmsdcc_platform_status_irq,
1226 DRIVER_NAME " (slot)",
1229 pr_err("%s: Unable to get slot IRQ %d (%d)\n",
1230 mmc_hostname(mmc), host->stat_irq, ret);
1233 } else if (plat->register_status_notify) {
1234 plat->register_status_notify(msmsdcc_status_notify_cb, host);
1235 } else if (!plat->status)
1236 pr_err("%s: No card detect facilities available\n",
1239 init_timer(&host->timer);
1240 host->timer.data = (unsigned long)host;
1241 host->timer.function = msmsdcc_check_status;
1242 host->timer.expires = jiffies + HZ;
1243 add_timer(&host->timer);
1247 host->oldstat = host->plat->status(mmc_dev(host->mmc));
1248 host->eject = !host->oldstat;
1251 init_timer(&host->busclk_timer);
1252 host->busclk_timer.data = (unsigned long) host;
1253 host->busclk_timer.function = msmsdcc_busclk_expired;
1255 ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
1256 DRIVER_NAME " (cmd)", host);
1260 ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
1261 DRIVER_NAME " (pio)", host);
1265 mmc_set_drvdata(pdev, mmc);
1268 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
1269 mmc_hostname(mmc), (unsigned long long)memres->start,
1270 (unsigned int) cmd_irqres->start,
1271 (unsigned int) host->stat_irq, host->dma.channel);
1272 pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
1273 (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
1274 pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
1275 mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
1276 pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
1277 pr_info("%s: Power save feature enable = %d\n",
1278 mmc_hostname(mmc), msmsdcc_pwrsave);
1280 if (host->dma.channel != -1) {
1281 pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
1282 mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
1283 pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
1284 mmc_hostname(mmc), host->dma.cmd_busaddr,
1285 host->dma.cmdptr_busaddr);
1287 pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
1288 if (host->timer.function)
1289 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1292 msmsdcc_disable_clocks(host, 1);
1296 free_irq(cmd_irqres->start, host);
1299 free_irq(host->stat_irq, host);
1301 msmsdcc_disable_clocks(host, 0);
1305 clk_put(host->pclk);
1313 msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1315 struct mmc_host *mmc = mmc_get_drvdata(dev);
1319 struct msmsdcc_host *host = mmc_priv(mmc);
1322 disable_irq(host->stat_irq);
1324 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1325 rc = mmc_suspend_host(mmc, state);
1327 msmsdcc_writel(host, 0, MMCIMASK0);
1331 msmsdcc_disable_clocks(host, 0);
1337 msmsdcc_resume(struct platform_device *dev)
1339 struct mmc_host *mmc = mmc_get_drvdata(dev);
1342 struct msmsdcc_host *host = mmc_priv(mmc);
1344 msmsdcc_enable_clocks(host);
1346 msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
1348 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1349 mmc_resume_host(mmc);
1351 enable_irq(host->stat_irq);
1353 msmsdcc_disable_clocks(host, 1);
1359 static struct platform_driver msmsdcc_driver = {
1360 .probe = msmsdcc_probe,
1361 .suspend = msmsdcc_suspend,
1362 .resume = msmsdcc_resume,
1368 static int __init msmsdcc_init(void)
1370 return platform_driver_register(&msmsdcc_driver);
1373 static void __exit msmsdcc_exit(void)
1375 platform_driver_unregister(&msmsdcc_driver);
1378 module_init(msmsdcc_init);
1379 module_exit(msmsdcc_exit);
1381 MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
1382 MODULE_LICENSE("GPL");