2 * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
4 * Copyright (C) 2007 Google Inc,
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
6 * Copyright (C) 2009, Code Aurora Forum. All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
14 * Author: San Mehat (san@android.com)
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/init.h>
21 #include <linux/ioport.h>
22 #include <linux/device.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25 #include <linux/err.h>
26 #include <linux/highmem.h>
27 #include <linux/log2.h>
28 #include <linux/mmc/host.h>
29 #include <linux/mmc/card.h>
30 #include <linux/mmc/sdio.h>
31 #include <linux/clk.h>
32 #include <linux/scatterlist.h>
33 #include <linux/platform_device.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/debugfs.h>
37 #include <linux/memory.h>
39 #include <asm/cacheflush.h>
40 #include <asm/div64.h>
41 #include <asm/sizes.h>
44 #include <mach/msm_iomap.h>
49 #define DRIVER_NAME "msm-sdcc"
51 #define BUSCLK_PWRSAVE 1
52 #define BUSCLK_TIMEOUT (HZ)
53 static unsigned int msmsdcc_fmin = 144000;
54 static unsigned int msmsdcc_fmax = 50000000;
55 static unsigned int msmsdcc_4bit = 1;
56 static unsigned int msmsdcc_pwrsave = 1;
57 static unsigned int msmsdcc_piopoll = 1;
58 static unsigned int msmsdcc_sdioirq;
60 #define PIO_SPINMAX 30
61 #define CMD_SPINMAX 20
65 msmsdcc_disable_clocks(struct msmsdcc_host *host, int deferr)
67 WARN_ON(!host->clks_on);
69 BUG_ON(host->curr.mrq);
72 mod_timer(&host->busclk_timer, jiffies + BUSCLK_TIMEOUT);
74 del_timer_sync(&host->busclk_timer);
75 clk_disable(host->clk);
76 clk_disable(host->pclk);
82 msmsdcc_enable_clocks(struct msmsdcc_host *host)
86 WARN_ON(host->clks_on);
88 del_timer_sync(&host->busclk_timer);
90 rc = clk_enable(host->pclk);
93 rc = clk_enable(host->clk);
95 clk_disable(host->pclk);
98 udelay(1 + ((3 * USEC_PER_SEC) /
99 (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
104 static inline unsigned int
105 msmsdcc_readl(struct msmsdcc_host *host, unsigned int reg)
107 return readl(host->base + reg);
111 msmsdcc_writel(struct msmsdcc_host *host, u32 data, unsigned int reg)
113 writel(data, host->base + reg);
114 /* 3 clk delay required! */
115 udelay(1 + ((3 * USEC_PER_SEC) /
116 (host->clk_rate ? host->clk_rate : msmsdcc_fmin)));
120 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
124 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
126 BUG_ON(host->curr.data);
128 host->curr.mrq = NULL;
129 host->curr.cmd = NULL;
132 mrq->data->bytes_xfered = host->curr.data_xfered;
133 if (mrq->cmd->error == -ETIMEDOUT)
137 msmsdcc_disable_clocks(host, 1);
140 * Need to drop the host lock here; mmc_request_done may call
141 * back into the driver...
143 spin_unlock(&host->lock);
144 mmc_request_done(host->mmc, mrq);
145 spin_lock(&host->lock);
149 msmsdcc_stop_data(struct msmsdcc_host *host)
151 host->curr.data = NULL;
152 host->curr.got_dataend = host->curr.got_datablkend = 0;
155 uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
157 switch (host->pdev_id) {
159 return MSM_SDC1_PHYS + MMCIFIFO;
161 return MSM_SDC2_PHYS + MMCIFIFO;
163 return MSM_SDC3_PHYS + MMCIFIFO;
165 return MSM_SDC4_PHYS + MMCIFIFO;
172 msmsdcc_start_command_exec(struct msmsdcc_host *host, u32 arg, u32 c) {
173 msmsdcc_writel(host, arg, MMCIARGUMENT);
174 msmsdcc_writel(host, c, MMCICOMMAND);
178 msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
180 struct msmsdcc_host *host = (struct msmsdcc_host *)cmd->data;
182 msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);
183 msmsdcc_writel(host, (unsigned int)host->curr.xfer_size, MMCIDATALENGTH);
184 msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1);
185 msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);
188 msmsdcc_start_command_exec(host,
189 (u32) host->cmd_cmd->arg,
192 host->dma.active = 1;
196 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
198 struct msm_dmov_errdata *err)
200 struct msmsdcc_dma_data *dma_data =
201 container_of(cmd, struct msmsdcc_dma_data, hdr);
202 struct msmsdcc_host *host = dma_data->host;
204 struct mmc_request *mrq;
206 spin_lock_irqsave(&host->lock, flags);
207 host->dma.active = 0;
209 mrq = host->curr.mrq;
213 if (!(result & DMOV_RSLT_VALID)) {
214 pr_err("msmsdcc: Invalid DataMover result\n");
218 if (result & DMOV_RSLT_DONE) {
219 host->curr.data_xfered = host->curr.xfer_size;
222 if (result & DMOV_RSLT_ERROR)
223 pr_err("%s: DMA error (0x%.8x)\n",
224 mmc_hostname(host->mmc), result);
225 if (result & DMOV_RSLT_FLUSH)
226 pr_err("%s: DMA channel flushed (0x%.8x)\n",
227 mmc_hostname(host->mmc), result);
229 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
230 err->flush[0], err->flush[1], err->flush[2],
231 err->flush[3], err->flush[4], err->flush[5]);
232 if (!mrq->data->error)
233 mrq->data->error = -EIO;
235 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
238 if (host->curr.user_pages) {
239 struct scatterlist *sg = host->dma.sg;
242 for (i = 0; i < host->dma.num_ents; i++)
243 flush_dcache_page(sg_page(sg++));
249 if ((host->curr.got_dataend && host->curr.got_datablkend)
250 || mrq->data->error) {
253 * If we've already gotten our DATAEND / DATABLKEND
254 * for this request, then complete it through here.
256 msmsdcc_stop_data(host);
258 if (!mrq->data->error)
259 host->curr.data_xfered = host->curr.xfer_size;
260 if (!mrq->data->stop || mrq->cmd->error) {
261 host->curr.mrq = NULL;
262 host->curr.cmd = NULL;
263 mrq->data->bytes_xfered = host->curr.data_xfered;
265 spin_unlock_irqrestore(&host->lock, flags);
267 msmsdcc_disable_clocks(host, 1);
269 mmc_request_done(host->mmc, mrq);
272 msmsdcc_start_command(host, mrq->data->stop, 0);
276 spin_unlock_irqrestore(&host->lock, flags);
280 static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
282 if (host->dma.channel == -1)
285 if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
287 if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
292 static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
294 struct msmsdcc_nc_dmadata *nc;
300 struct scatterlist *sg = data->sg;
302 rc = validate_dma(host, data);
306 host->dma.sg = data->sg;
307 host->dma.num_ents = data->sg_len;
309 BUG_ON(host->dma.num_ents > NR_SG); /* Prevent memory corruption */
313 switch (host->pdev_id) {
315 crci = MSMSDCC_CRCI_SDC1;
318 crci = MSMSDCC_CRCI_SDC2;
321 crci = MSMSDCC_CRCI_SDC3;
324 crci = MSMSDCC_CRCI_SDC4;
328 host->dma.num_ents = 0;
332 if (data->flags & MMC_DATA_READ)
333 host->dma.dir = DMA_FROM_DEVICE;
335 host->dma.dir = DMA_TO_DEVICE;
337 host->curr.user_pages = 0;
340 for (i = 0; i < host->dma.num_ents; i++) {
341 box->cmd = CMD_MODE_BOX;
343 /* Initialize sg dma address */
344 sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
347 if (i == (host->dma.num_ents - 1))
349 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
350 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
351 (sg_dma_len(sg) / MCI_FIFOSIZE) ;
353 if (data->flags & MMC_DATA_READ) {
354 box->src_row_addr = msmsdcc_fifo_addr(host);
355 box->dst_row_addr = sg_dma_address(sg);
357 box->src_dst_len = (MCI_FIFOSIZE << 16) |
359 box->row_offset = MCI_FIFOSIZE;
361 box->num_rows = rows * ((1 << 16) + 1);
362 box->cmd |= CMD_SRC_CRCI(crci);
364 box->src_row_addr = sg_dma_address(sg);
365 box->dst_row_addr = msmsdcc_fifo_addr(host);
367 box->src_dst_len = (MCI_FIFOSIZE << 16) |
369 box->row_offset = (MCI_FIFOSIZE << 16);
371 box->num_rows = rows * ((1 << 16) + 1);
372 box->cmd |= CMD_DST_CRCI(crci);
378 /* location of command block must be 64 bit aligned */
379 BUG_ON(host->dma.cmd_busaddr & 0x07);
381 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
382 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
383 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
384 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
386 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
387 host->dma.num_ents, host->dma.dir);
388 /* dsb inside dma_map_sg will write nc out to mem as well */
390 if (n != host->dma.num_ents) {
391 printk(KERN_ERR "%s: Unable to map in all sg elements\n",
392 mmc_hostname(host->mmc));
394 host->dma.num_ents = 0;
402 snoop_cccr_abort(struct mmc_command *cmd)
404 if ((cmd->opcode == 52) &&
405 (cmd->arg & 0x80000000) &&
406 (((cmd->arg >> 9) & 0x1ffff) == SDIO_CCCR_ABORT))
412 msmsdcc_start_command_deferred(struct msmsdcc_host *host,
413 struct mmc_command *cmd, u32 *c)
415 *c |= (cmd->opcode | MCI_CPSM_ENABLE);
417 if (cmd->flags & MMC_RSP_PRESENT) {
418 if (cmd->flags & MMC_RSP_136)
419 *c |= MCI_CPSM_LONGRSP;
420 *c |= MCI_CPSM_RESPONSE;
424 *c |= MCI_CPSM_INTERRUPT;
426 if ((((cmd->opcode == 17) || (cmd->opcode == 18)) ||
427 ((cmd->opcode == 24) || (cmd->opcode == 25))) ||
429 *c |= MCI_CSPM_DATCMD;
431 if (cmd == cmd->mrq->stop)
432 *c |= MCI_CSPM_MCIABORT;
434 if (snoop_cccr_abort(cmd))
435 *c |= MCI_CSPM_MCIABORT;
437 if (host->curr.cmd != NULL) {
438 printk(KERN_ERR "%s: Overlapping command requests\n",
439 mmc_hostname(host->mmc));
441 host->curr.cmd = cmd;
445 msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
446 struct mmc_command *cmd, u32 c)
448 unsigned int datactrl, timeout;
449 unsigned long long clks;
450 unsigned int pio_irqmask = 0;
452 host->curr.data = data;
453 host->curr.xfer_size = data->blksz * data->blocks;
454 host->curr.xfer_remain = host->curr.xfer_size;
455 host->curr.data_xfered = 0;
456 host->curr.got_dataend = 0;
457 host->curr.got_datablkend = 0;
459 memset(&host->pio, 0, sizeof(host->pio));
461 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
463 if (!msmsdcc_config_dma(host, data))
464 datactrl |= MCI_DPSM_DMAENABLE;
466 host->pio.sg = data->sg;
467 host->pio.sg_len = data->sg_len;
468 host->pio.sg_off = 0;
470 if (data->flags & MMC_DATA_READ) {
471 pio_irqmask = MCI_RXFIFOHALFFULLMASK;
472 if (host->curr.xfer_remain < MCI_FIFOSIZE)
473 pio_irqmask |= MCI_RXDATAAVLBLMASK;
475 pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
478 if (data->flags & MMC_DATA_READ)
479 datactrl |= MCI_DPSM_DIRECTION;
481 clks = (unsigned long long)data->timeout_ns * host->clk_rate;
482 do_div(clks, NSEC_PER_SEC);
483 timeout = data->timeout_clks + (unsigned int)clks*2 ;
485 if (datactrl & MCI_DPSM_DMAENABLE) {
486 /* Save parameters for the exec function */
487 host->cmd_timeout = timeout;
488 host->cmd_pio_irqmask = pio_irqmask;
489 host->cmd_datactrl = datactrl;
492 host->dma.hdr.execute_func = msmsdcc_dma_exec_func;
493 host->dma.hdr.data = (void *)host;
497 msmsdcc_start_command_deferred(host, cmd, &c);
500 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
502 msmsdcc_writel(host, timeout, MMCIDATATIMER);
504 msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH);
506 msmsdcc_writel(host, pio_irqmask, MMCIMASK1);
507 msmsdcc_writel(host, datactrl, MMCIDATACTRL);
510 /* Daisy-chain the command if requested */
511 msmsdcc_start_command(host, cmd, c);
517 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
519 if (cmd == cmd->mrq->stop)
520 c |= MCI_CSPM_MCIABORT;
524 msmsdcc_start_command_deferred(host, cmd, &c);
525 msmsdcc_start_command_exec(host, cmd->arg, c);
529 msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
532 if (status & MCI_DATACRCFAIL) {
533 pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
534 pr_err("%s: opcode 0x%.8x\n", __func__,
535 data->mrq->cmd->opcode);
536 pr_err("%s: blksz %d, blocks %d\n", __func__,
537 data->blksz, data->blocks);
538 data->error = -EILSEQ;
539 } else if (status & MCI_DATATIMEOUT) {
540 pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
541 data->error = -ETIMEDOUT;
542 } else if (status & MCI_RXOVERRUN) {
543 pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
545 } else if (status & MCI_TXUNDERRUN) {
546 pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
549 pr_err("%s: Unknown error (0x%.8x)\n",
550 mmc_hostname(host->mmc), status);
557 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
559 uint32_t *ptr = (uint32_t *) buffer;
562 while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
563 *ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
565 count += sizeof(uint32_t);
567 remain -= sizeof(uint32_t);
575 msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
576 unsigned int remain, u32 status)
578 void __iomem *base = host->base;
582 unsigned int count, maxcnt;
584 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
586 count = min(remain, maxcnt);
588 writesl(base + MMCIFIFO, ptr, count >> 2);
595 status = msmsdcc_readl(host, MMCISTATUS);
596 } while (status & MCI_TXFIFOHALFEMPTY);
602 msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
605 if ((msmsdcc_readl(host, MMCISTATUS) & mask))
614 msmsdcc_pio_irq(int irq, void *dev_id)
616 struct msmsdcc_host *host = dev_id;
619 status = msmsdcc_readl(host, MMCISTATUS);
623 unsigned int remain, len;
626 if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
627 if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
630 if (msmsdcc_spin_on_status(host,
631 (MCI_TXFIFOHALFEMPTY |
638 /* Map the current scatter buffer */
639 local_irq_save(flags);
640 buffer = kmap_atomic(sg_page(host->pio.sg),
641 KM_BIO_SRC_IRQ) + host->pio.sg->offset;
642 buffer += host->pio.sg_off;
643 remain = host->pio.sg->length - host->pio.sg_off;
645 if (status & MCI_RXACTIVE)
646 len = msmsdcc_pio_read(host, buffer, remain);
647 if (status & MCI_TXACTIVE)
648 len = msmsdcc_pio_write(host, buffer, remain, status);
650 /* Unmap the buffer */
651 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
652 local_irq_restore(flags);
654 host->pio.sg_off += len;
655 host->curr.xfer_remain -= len;
656 host->curr.data_xfered += len;
660 /* This sg page is full - do some housekeeping */
661 if (status & MCI_RXACTIVE && host->curr.user_pages)
662 flush_dcache_page(sg_page(host->pio.sg));
664 if (!--host->pio.sg_len) {
665 memset(&host->pio, 0, sizeof(host->pio));
669 /* Advance to next sg */
671 host->pio.sg_off = 0;
674 status = msmsdcc_readl(host, MMCISTATUS);
677 if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
678 msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1);
680 if (!host->curr.xfer_remain)
681 msmsdcc_writel(host, 0, MMCIMASK1);
686 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
688 struct mmc_command *cmd = host->curr.cmd;
690 host->curr.cmd = NULL;
691 cmd->resp[0] = msmsdcc_readl(host, MMCIRESPONSE0);
692 cmd->resp[1] = msmsdcc_readl(host, MMCIRESPONSE1);
693 cmd->resp[2] = msmsdcc_readl(host, MMCIRESPONSE2);
694 cmd->resp[3] = msmsdcc_readl(host, MMCIRESPONSE3);
696 if (status & MCI_CMDTIMEOUT) {
697 cmd->error = -ETIMEDOUT;
698 } else if (status & MCI_CMDCRCFAIL &&
699 cmd->flags & MMC_RSP_CRC) {
700 pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
701 cmd->error = -EILSEQ;
704 if (!cmd->data || cmd->error) {
705 if (host->curr.data && host->dma.sg)
706 msm_dmov_stop_cmd(host->dma.channel,
708 else if (host->curr.data) { /* Non DMA */
709 msmsdcc_stop_data(host);
710 msmsdcc_request_end(host, cmd->mrq);
711 } else /* host->data == NULL */
712 msmsdcc_request_end(host, cmd->mrq);
713 } else if (cmd->data)
714 if (!(cmd->data->flags & MMC_DATA_READ))
715 msmsdcc_start_data(host, cmd->data,
720 msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
723 struct mmc_data *data = host->curr.data;
725 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
726 MCI_CMDTIMEOUT) && host->curr.cmd) {
727 msmsdcc_do_cmdirq(host, status);
733 /* Check for data errors */
734 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
735 MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
736 msmsdcc_data_err(host, data, status);
737 host->curr.data_xfered = 0;
739 msm_dmov_stop_cmd(host->dma.channel,
743 msmsdcc_stop_data(host);
745 msmsdcc_request_end(host, data->mrq);
747 msmsdcc_start_command(host, data->stop, 0);
751 /* Check for data done */
752 if (!host->curr.got_dataend && (status & MCI_DATAEND))
753 host->curr.got_dataend = 1;
755 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
756 host->curr.got_datablkend = 1;
759 * If DMA is still in progress, we complete via the completion handler
761 if (host->curr.got_dataend && host->curr.got_datablkend &&
764 * There appears to be an issue in the controller where
765 * if you request a small block transfer (< fifo size),
766 * you may get your DATAEND/DATABLKEND irq without the
769 * Check to see if there is still data to be read,
770 * and simulate a PIO irq.
772 if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
773 msmsdcc_pio_irq(1, host);
775 msmsdcc_stop_data(host);
777 host->curr.data_xfered = host->curr.xfer_size;
780 msmsdcc_request_end(host, data->mrq);
782 msmsdcc_start_command(host, data->stop, 0);
787 msmsdcc_irq(int irq, void *dev_id)
789 struct msmsdcc_host *host = dev_id;
790 void __iomem *base = host->base;
795 spin_lock(&host->lock);
798 struct mmc_data *data;
799 status = msmsdcc_readl(host, MMCISTATUS);
800 status &= (msmsdcc_readl(host, MMCIMASK0) |
801 MCI_DATABLOCKENDMASK);
802 msmsdcc_writel(host, status, MMCICLEAR);
804 if (status & MCI_SDIOINTR)
805 status &= ~MCI_SDIOINTR;
810 msmsdcc_handle_irq_data(host, status, base);
812 if (status & MCI_SDIOINTOPER) {
814 status &= ~MCI_SDIOINTOPER;
819 spin_unlock(&host->lock);
822 * We have to delay handling the card interrupt as it calls
823 * back into the driver.
826 mmc_signal_sdio_irq(host->mmc);
828 return IRQ_RETVAL(ret);
832 msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
834 struct msmsdcc_host *host = mmc_priv(mmc);
837 WARN_ON(host->curr.mrq != NULL);
838 WARN_ON(host->pwr == 0);
840 spin_lock_irqsave(&host->lock, flags);
845 if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
847 mrq->data->bytes_xfered = mrq->data->blksz *
850 mrq->cmd->error = -ENOMEDIUM;
852 spin_unlock_irqrestore(&host->lock, flags);
853 mmc_request_done(mmc, mrq);
857 /* Need to drop the host lock here in case
858 * the busclk wd fires
860 spin_unlock_irqrestore(&host->lock, flags);
862 msmsdcc_enable_clocks(host);
863 spin_lock_irqsave(&host->lock, flags);
865 host->curr.mrq = mrq;
867 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
868 /* Queue/read data, daisy-chain command when data starts */
869 msmsdcc_start_data(host, mrq->data, mrq->cmd, 0);
871 msmsdcc_start_command(host, mrq->cmd, 0);
873 if (host->cmdpoll && !msmsdcc_spin_on_status(host,
874 MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
876 uint32_t status = msmsdcc_readl(host, MMCISTATUS);
877 msmsdcc_do_cmdirq(host, status);
879 MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
881 host->stats.cmdpoll_hits++;
883 host->stats.cmdpoll_misses++;
885 spin_unlock_irqrestore(&host->lock, flags);
889 msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
891 struct msmsdcc_host *host = mmc_priv(mmc);
892 u32 clk = 0, pwr = 0;
897 msmsdcc_enable_clocks(host);
899 spin_lock_irqsave(&host->lock, flags);
902 if (ios->clock != host->clk_rate) {
903 rc = clk_set_rate(host->clk, ios->clock);
905 pr_err("%s: Error setting clock rate (%d)\n",
906 mmc_hostname(host->mmc), rc);
908 host->clk_rate = ios->clock;
910 clk |= MCI_CLK_ENABLE;
913 if (ios->bus_width == MMC_BUS_WIDTH_4)
914 clk |= (2 << 10); /* Set WIDEBUS */
916 if (ios->clock > 400000 && msmsdcc_pwrsave)
917 clk |= (1 << 9); /* PWRSAVE */
919 clk |= (1 << 12); /* FLOW_ENA */
920 clk |= (1 << 15); /* feedback clock */
922 if (host->plat->translate_vdd)
923 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
925 switch (ios->power_mode) {
936 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
939 msmsdcc_writel(host, clk, MMCICLOCK);
941 if (host->pwr != pwr) {
943 msmsdcc_writel(host, pwr, MMCIPOWER);
946 msmsdcc_disable_clocks(host, 1);
948 spin_unlock_irqrestore(&host->lock, flags);
951 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
953 struct msmsdcc_host *host = mmc_priv(mmc);
957 spin_lock_irqsave(&host->lock, flags);
958 if (msmsdcc_sdioirq == 1) {
959 status = msmsdcc_readl(host, MMCIMASK0);
961 status |= MCI_SDIOINTOPERMASK;
963 status &= ~MCI_SDIOINTOPERMASK;
964 host->saved_irq0mask = status;
965 msmsdcc_writel(host, status, MMCIMASK0);
967 spin_unlock_irqrestore(&host->lock, flags);
970 static const struct mmc_host_ops msmsdcc_ops = {
971 .request = msmsdcc_request,
972 .set_ios = msmsdcc_set_ios,
973 .enable_sdio_irq = msmsdcc_enable_sdio_irq,
977 msmsdcc_check_status(unsigned long data)
979 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
982 if (!host->plat->status) {
983 mmc_detect_change(host->mmc, 0);
987 status = host->plat->status(mmc_dev(host->mmc));
988 host->eject = !status;
989 if (status ^ host->oldstat) {
990 pr_info("%s: Slot status change detected (%d -> %d)\n",
991 mmc_hostname(host->mmc), host->oldstat, status);
993 mmc_detect_change(host->mmc, (5 * HZ) / 2);
995 mmc_detect_change(host->mmc, 0);
998 host->oldstat = status;
1001 if (host->timer.function)
1002 mod_timer(&host->timer, jiffies + HZ);
1006 msmsdcc_platform_status_irq(int irq, void *dev_id)
1008 struct msmsdcc_host *host = dev_id;
1010 printk(KERN_DEBUG "%s: %d\n", __func__, irq);
1011 msmsdcc_check_status((unsigned long) host);
1016 msmsdcc_status_notify_cb(int card_present, void *dev_id)
1018 struct msmsdcc_host *host = dev_id;
1020 printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
1022 msmsdcc_check_status((unsigned long) host);
1026 msmsdcc_busclk_expired(unsigned long _data)
1028 struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
1029 unsigned long flags;
1031 spin_lock_irqsave(&host->lock, flags);
1032 dev_info(mmc_dev(host->mmc), "Bus clock timer expired\n");
1034 msmsdcc_disable_clocks(host, 0);
1035 spin_unlock_irqrestore(&host->lock, flags);
1039 msmsdcc_init_dma(struct msmsdcc_host *host)
1041 memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
1042 host->dma.host = host;
1043 host->dma.channel = -1;
1048 host->dma.nc = dma_alloc_coherent(NULL,
1049 sizeof(struct msmsdcc_nc_dmadata),
1050 &host->dma.nc_busaddr,
1052 if (host->dma.nc == NULL) {
1053 pr_err("Unable to allocate DMA buffer\n");
1056 memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
1057 host->dma.cmd_busaddr = host->dma.nc_busaddr;
1058 host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
1059 offsetof(struct msmsdcc_nc_dmadata, cmdptr);
1060 host->dma.channel = host->dmares->start;
1065 #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
1067 do_resume_work(struct work_struct *work)
1069 struct msmsdcc_host *host =
1070 container_of(work, struct msmsdcc_host, resume_task);
1071 struct mmc_host *mmc = host->mmc;
1074 mmc_resume_host(mmc);
1076 enable_irq(host->stat_irq);
1082 msmsdcc_probe(struct platform_device *pdev)
1084 struct mmc_platform_data *plat = pdev->dev.platform_data;
1085 struct msmsdcc_host *host;
1086 struct mmc_host *mmc;
1087 struct resource *cmd_irqres = NULL;
1088 struct resource *pio_irqres = NULL;
1089 struct resource *stat_irqres = NULL;
1090 struct resource *memres = NULL;
1091 struct resource *dmares = NULL;
1094 /* must have platform data */
1096 pr_err("%s: Platform data not available\n", __func__);
1101 if (pdev->id < 1 || pdev->id > 4)
1104 if (pdev->resource == NULL || pdev->num_resources < 2) {
1105 pr_err("%s: Invalid resource\n", __func__);
1109 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1110 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1111 cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1113 pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1115 stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1118 if (!cmd_irqres || !pio_irqres || !memres) {
1119 pr_err("%s: Invalid resource\n", __func__);
1124 * Setup our host structure
1127 mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
1133 host = mmc_priv(mmc);
1134 host->pdev_id = pdev->id;
1137 host->curr.cmd = NULL;
1141 host->base = ioremap(memres->start, PAGE_SIZE);
1147 host->cmd_irqres = cmd_irqres;
1148 host->pio_irqres = pio_irqres;
1149 host->memres = memres;
1150 host->dmares = dmares;
1151 spin_lock_init(&host->lock);
1156 msmsdcc_init_dma(host);
1158 /* Get our clocks */
1159 host->pclk = clk_get(&pdev->dev, "sdc_pclk");
1160 if (IS_ERR(host->pclk)) {
1161 ret = PTR_ERR(host->pclk);
1165 host->clk = clk_get(&pdev->dev, "sdc_clk");
1166 if (IS_ERR(host->clk)) {
1167 ret = PTR_ERR(host->clk);
1172 ret = msmsdcc_enable_clocks(host);
1176 ret = clk_set_rate(host->clk, msmsdcc_fmin);
1178 pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
1182 host->pclk_rate = clk_get_rate(host->pclk);
1183 host->clk_rate = clk_get_rate(host->clk);
1186 * Setup MMC host structure
1188 mmc->ops = &msmsdcc_ops;
1189 mmc->f_min = msmsdcc_fmin;
1190 mmc->f_max = msmsdcc_fmax;
1191 mmc->ocr_avail = plat->ocr_mask;
1194 mmc->caps |= MMC_CAP_4_BIT_DATA;
1195 if (msmsdcc_sdioirq)
1196 mmc->caps |= MMC_CAP_SDIO_IRQ;
1197 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1199 mmc->max_phys_segs = NR_SG;
1200 mmc->max_hw_segs = NR_SG;
1201 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1202 mmc->max_blk_count = 65536;
1204 mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
1205 mmc->max_seg_size = mmc->max_req_size;
1207 msmsdcc_writel(host, 0, MMCIMASK0);
1208 msmsdcc_writel(host, 0x5e007ff, MMCICLEAR);
1210 msmsdcc_writel(host, MCI_IRQENABLE, MMCIMASK0);
1211 host->saved_irq0mask = MCI_IRQENABLE;
1214 * Setup card detect change
1217 memset(&host->timer, 0, sizeof(host->timer));
1219 if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
1220 unsigned long irqflags = IRQF_SHARED |
1221 (stat_irqres->flags & IRQF_TRIGGER_MASK);
1223 host->stat_irq = stat_irqres->start;
1224 ret = request_irq(host->stat_irq,
1225 msmsdcc_platform_status_irq,
1227 DRIVER_NAME " (slot)",
1230 pr_err("%s: Unable to get slot IRQ %d (%d)\n",
1231 mmc_hostname(mmc), host->stat_irq, ret);
1234 } else if (plat->register_status_notify) {
1235 plat->register_status_notify(msmsdcc_status_notify_cb, host);
1236 } else if (!plat->status)
1237 pr_err("%s: No card detect facilities available\n",
1240 init_timer(&host->timer);
1241 host->timer.data = (unsigned long)host;
1242 host->timer.function = msmsdcc_check_status;
1243 host->timer.expires = jiffies + HZ;
1244 add_timer(&host->timer);
1248 host->oldstat = host->plat->status(mmc_dev(host->mmc));
1249 host->eject = !host->oldstat;
1252 init_timer(&host->busclk_timer);
1253 host->busclk_timer.data = (unsigned long) host;
1254 host->busclk_timer.function = msmsdcc_busclk_expired;
1256 ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
1257 DRIVER_NAME " (cmd)", host);
1261 ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
1262 DRIVER_NAME " (pio)", host);
1266 mmc_set_drvdata(pdev, mmc);
1269 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
1270 mmc_hostname(mmc), (unsigned long long)memres->start,
1271 (unsigned int) cmd_irqres->start,
1272 (unsigned int) host->stat_irq, host->dma.channel);
1273 pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
1274 (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
1275 pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
1276 mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
1277 pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
1278 pr_info("%s: Power save feature enable = %d\n",
1279 mmc_hostname(mmc), msmsdcc_pwrsave);
1281 if (host->dma.channel != -1) {
1282 pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
1283 mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
1284 pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
1285 mmc_hostname(mmc), host->dma.cmd_busaddr,
1286 host->dma.cmdptr_busaddr);
1288 pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
1289 if (host->timer.function)
1290 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1293 msmsdcc_disable_clocks(host, 1);
1297 free_irq(cmd_irqres->start, host);
1300 free_irq(host->stat_irq, host);
1302 msmsdcc_disable_clocks(host, 0);
1306 clk_put(host->pclk);
1314 msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1316 struct mmc_host *mmc = mmc_get_drvdata(dev);
1320 struct msmsdcc_host *host = mmc_priv(mmc);
1323 disable_irq(host->stat_irq);
1325 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1326 rc = mmc_suspend_host(mmc, state);
1328 msmsdcc_writel(host, 0, MMCIMASK0);
1332 msmsdcc_disable_clocks(host, 0);
1338 msmsdcc_resume(struct platform_device *dev)
1340 struct mmc_host *mmc = mmc_get_drvdata(dev);
1343 struct msmsdcc_host *host = mmc_priv(mmc);
1345 msmsdcc_enable_clocks(host);
1347 msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
1349 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1350 mmc_resume_host(mmc);
1352 enable_irq(host->stat_irq);
1354 msmsdcc_disable_clocks(host, 1);
1360 static struct platform_driver msmsdcc_driver = {
1361 .probe = msmsdcc_probe,
1362 .suspend = msmsdcc_suspend,
1363 .resume = msmsdcc_resume,
1369 static int __init msmsdcc_init(void)
1371 return platform_driver_register(&msmsdcc_driver);
1374 static void __exit msmsdcc_exit(void)
1376 platform_driver_unregister(&msmsdcc_driver);
1379 module_init(msmsdcc_init);
1380 module_exit(msmsdcc_exit);
1382 MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
1383 MODULE_LICENSE("GPL");