2 * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
4 * Copyright (C) 2007 Google Inc,
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 * Author: San Mehat (san@android.com)
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/device.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/highmem.h>
26 #include <linux/log2.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/card.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/platform_device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/debugfs.h>
35 #include <linux/memory.h>
37 #include <asm/cacheflush.h>
38 #include <asm/div64.h>
39 #include <asm/sizes.h>
41 #include <asm/mach/mmc.h>
42 #include <mach/msm_iomap.h>
44 #include <mach/htc_pwrsink.h>
48 #define DRIVER_NAME "msm-sdcc"
50 static unsigned int msmsdcc_fmin = 144000;
51 static unsigned int msmsdcc_fmax = 50000000;
52 static unsigned int msmsdcc_4bit = 1;
53 static unsigned int msmsdcc_pwrsave = 1;
54 static unsigned int msmsdcc_piopoll = 1;
55 static unsigned int msmsdcc_sdioirq;
57 #define PIO_SPINMAX 30
58 #define CMD_SPINMAX 20
61 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
65 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
67 writel(0, host->base + MMCICOMMAND);
69 BUG_ON(host->curr.data);
71 host->curr.mrq = NULL;
72 host->curr.cmd = NULL;
75 mrq->data->bytes_xfered = host->curr.data_xfered;
76 if (mrq->cmd->error == -ETIMEDOUT)
80 * Need to drop the host lock here; mmc_request_done may call
81 * back into the driver...
83 spin_unlock(&host->lock);
84 mmc_request_done(host->mmc, mrq);
85 spin_lock(&host->lock);
89 msmsdcc_stop_data(struct msmsdcc_host *host)
91 writel(0, host->base + MMCIDATACTRL);
92 host->curr.data = NULL;
93 host->curr.got_dataend = host->curr.got_datablkend = 0;
96 uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
98 switch (host->pdev_id) {
100 return MSM_SDC1_PHYS + MMCIFIFO;
102 return MSM_SDC2_PHYS + MMCIFIFO;
104 return MSM_SDC3_PHYS + MMCIFIFO;
106 return MSM_SDC4_PHYS + MMCIFIFO;
113 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
115 struct msm_dmov_errdata *err)
117 struct msmsdcc_dma_data *dma_data =
118 container_of(cmd, struct msmsdcc_dma_data, hdr);
119 struct msmsdcc_host *host = dma_data->host;
121 struct mmc_request *mrq;
123 spin_lock_irqsave(&host->lock, flags);
124 mrq = host->curr.mrq;
127 if (!(result & DMOV_RSLT_VALID)) {
128 pr_err("msmsdcc: Invalid DataMover result\n");
132 if (result & DMOV_RSLT_DONE) {
133 host->curr.data_xfered = host->curr.xfer_size;
136 if (result & DMOV_RSLT_ERROR)
137 pr_err("%s: DMA error (0x%.8x)\n",
138 mmc_hostname(host->mmc), result);
139 if (result & DMOV_RSLT_FLUSH)
140 pr_err("%s: DMA channel flushed (0x%.8x)\n",
141 mmc_hostname(host->mmc), result);
143 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
144 err->flush[0], err->flush[1], err->flush[2],
145 err->flush[3], err->flush[4], err->flush[5]);
146 if (!mrq->data->error)
147 mrq->data->error = -EIO;
150 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
153 if (host->curr.user_pages) {
154 struct scatterlist *sg = host->dma.sg;
157 for (i = 0; i < host->dma.num_ents; i++)
158 flush_dcache_page(sg_page(sg++));
163 if ((host->curr.got_dataend && host->curr.got_datablkend)
164 || mrq->data->error) {
167 * If we've already gotten our DATAEND / DATABLKEND
168 * for this request, then complete it through here.
170 msmsdcc_stop_data(host);
172 if (!mrq->data->error)
173 host->curr.data_xfered = host->curr.xfer_size;
174 if (!mrq->data->stop || mrq->cmd->error) {
175 writel(0, host->base + MMCICOMMAND);
176 host->curr.mrq = NULL;
177 host->curr.cmd = NULL;
178 mrq->data->bytes_xfered = host->curr.data_xfered;
180 spin_unlock_irqrestore(&host->lock, flags);
181 mmc_request_done(host->mmc, mrq);
184 msmsdcc_start_command(host, mrq->data->stop, 0);
188 spin_unlock_irqrestore(&host->lock, flags);
192 static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
194 if (host->dma.channel == -1)
197 if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
199 if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
204 static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
206 struct msmsdcc_nc_dmadata *nc;
212 struct scatterlist *sg = data->sg;
214 rc = validate_dma(host, data);
218 host->dma.sg = data->sg;
219 host->dma.num_ents = data->sg_len;
223 switch (host->pdev_id) {
225 crci = MSMSDCC_CRCI_SDC1;
228 crci = MSMSDCC_CRCI_SDC2;
231 crci = MSMSDCC_CRCI_SDC3;
234 crci = MSMSDCC_CRCI_SDC4;
238 host->dma.num_ents = 0;
242 if (data->flags & MMC_DATA_READ)
243 host->dma.dir = DMA_FROM_DEVICE;
245 host->dma.dir = DMA_TO_DEVICE;
247 host->curr.user_pages = 0;
249 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
250 host->dma.num_ents, host->dma.dir);
252 if (n != host->dma.num_ents) {
253 pr_err("%s: Unable to map in all sg elements\n",
254 mmc_hostname(host->mmc));
256 host->dma.num_ents = 0;
261 for (i = 0; i < host->dma.num_ents; i++) {
262 box->cmd = CMD_MODE_BOX;
264 if (i == (host->dma.num_ents - 1))
266 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
267 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
268 (sg_dma_len(sg) / MCI_FIFOSIZE) ;
270 if (data->flags & MMC_DATA_READ) {
271 box->src_row_addr = msmsdcc_fifo_addr(host);
272 box->dst_row_addr = sg_dma_address(sg);
274 box->src_dst_len = (MCI_FIFOSIZE << 16) |
276 box->row_offset = MCI_FIFOSIZE;
278 box->num_rows = rows * ((1 << 16) + 1);
279 box->cmd |= CMD_SRC_CRCI(crci);
281 box->src_row_addr = sg_dma_address(sg);
282 box->dst_row_addr = msmsdcc_fifo_addr(host);
284 box->src_dst_len = (MCI_FIFOSIZE << 16) |
286 box->row_offset = (MCI_FIFOSIZE << 16);
288 box->num_rows = rows * ((1 << 16) + 1);
289 box->cmd |= CMD_DST_CRCI(crci);
295 /* location of command block must be 64 bit aligned */
296 BUG_ON(host->dma.cmd_busaddr & 0x07);
298 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
299 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
300 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
301 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
307 msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
309 unsigned int datactrl, timeout;
310 unsigned long long clks;
311 void __iomem *base = host->base;
312 unsigned int pio_irqmask = 0;
314 host->curr.data = data;
315 host->curr.xfer_size = data->blksz * data->blocks;
316 host->curr.xfer_remain = host->curr.xfer_size;
317 host->curr.data_xfered = 0;
318 host->curr.got_dataend = 0;
319 host->curr.got_datablkend = 0;
321 memset(&host->pio, 0, sizeof(host->pio));
323 clks = (unsigned long long)data->timeout_ns * host->clk_rate;
324 do_div(clks, NSEC_PER_SEC);
325 timeout = data->timeout_clks + (unsigned int)clks;
326 writel(timeout, base + MMCIDATATIMER);
328 writel(host->curr.xfer_size, base + MMCIDATALENGTH);
330 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
332 if (!msmsdcc_config_dma(host, data))
333 datactrl |= MCI_DPSM_DMAENABLE;
335 host->pio.sg = data->sg;
336 host->pio.sg_len = data->sg_len;
337 host->pio.sg_off = 0;
339 if (data->flags & MMC_DATA_READ) {
340 pio_irqmask = MCI_RXFIFOHALFFULLMASK;
341 if (host->curr.xfer_remain < MCI_FIFOSIZE)
342 pio_irqmask |= MCI_RXDATAAVLBLMASK;
344 pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
347 if (data->flags & MMC_DATA_READ)
348 datactrl |= MCI_DPSM_DIRECTION;
350 writel(pio_irqmask, base + MMCIMASK1);
351 writel(datactrl, base + MMCIDATACTRL);
353 if (datactrl & MCI_DPSM_DMAENABLE) {
355 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
360 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
362 void __iomem *base = host->base;
364 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
365 writel(0, base + MMCICOMMAND);
366 udelay(2 + ((5 * 1000000) / host->clk_rate));
369 c |= cmd->opcode | MCI_CPSM_ENABLE;
371 if (cmd->flags & MMC_RSP_PRESENT) {
372 if (cmd->flags & MMC_RSP_136)
373 c |= MCI_CPSM_LONGRSP;
374 c |= MCI_CPSM_RESPONSE;
377 if (cmd->opcode == 17 || cmd->opcode == 18 ||
378 cmd->opcode == 24 || cmd->opcode == 25 ||
380 c |= MCI_CSPM_DATCMD;
382 if (cmd == cmd->mrq->stop)
383 c |= MCI_CSPM_MCIABORT;
385 host->curr.cmd = cmd;
389 writel(cmd->arg, base + MMCIARGUMENT);
390 writel(c, base + MMCICOMMAND);
394 msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
397 if (status & MCI_DATACRCFAIL) {
398 pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
399 pr_err("%s: opcode 0x%.8x\n", __func__,
400 data->mrq->cmd->opcode);
401 pr_err("%s: blksz %d, blocks %d\n", __func__,
402 data->blksz, data->blocks);
403 data->error = -EILSEQ;
404 } else if (status & MCI_DATATIMEOUT) {
405 pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
406 data->error = -ETIMEDOUT;
407 } else if (status & MCI_RXOVERRUN) {
408 pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
410 } else if (status & MCI_TXUNDERRUN) {
411 pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
414 pr_err("%s: Unknown error (0x%.8x)\n",
415 mmc_hostname(host->mmc), status);
422 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
424 void __iomem *base = host->base;
425 uint32_t *ptr = (uint32_t *) buffer;
428 while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
430 *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
432 count += sizeof(uint32_t);
434 remain -= sizeof(uint32_t);
442 msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
443 unsigned int remain, u32 status)
445 void __iomem *base = host->base;
449 unsigned int count, maxcnt;
451 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
453 count = min(remain, maxcnt);
455 writesl(base + MMCIFIFO, ptr, count >> 2);
462 status = readl(base + MMCISTATUS);
463 } while (status & MCI_TXFIFOHALFEMPTY);
469 msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
472 if ((readl(host->base + MMCISTATUS) & mask))
481 msmsdcc_pio_irq(int irq, void *dev_id)
483 struct msmsdcc_host *host = dev_id;
484 void __iomem *base = host->base;
487 status = readl(base + MMCISTATUS);
491 unsigned int remain, len;
494 if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
495 if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
498 if (msmsdcc_spin_on_status(host,
499 (MCI_TXFIFOHALFEMPTY |
506 /* Map the current scatter buffer */
507 local_irq_save(flags);
508 buffer = kmap_atomic(sg_page(host->pio.sg),
509 KM_BIO_SRC_IRQ) + host->pio.sg->offset;
510 buffer += host->pio.sg_off;
511 remain = host->pio.sg->length - host->pio.sg_off;
513 if (status & MCI_RXACTIVE)
514 len = msmsdcc_pio_read(host, buffer, remain);
515 if (status & MCI_TXACTIVE)
516 len = msmsdcc_pio_write(host, buffer, remain, status);
518 /* Unmap the buffer */
519 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
520 local_irq_restore(flags);
522 host->pio.sg_off += len;
523 host->curr.xfer_remain -= len;
524 host->curr.data_xfered += len;
528 /* This sg page is full - do some housekeeping */
529 if (status & MCI_RXACTIVE && host->curr.user_pages)
530 flush_dcache_page(sg_page(host->pio.sg));
532 if (!--host->pio.sg_len) {
533 memset(&host->pio, 0, sizeof(host->pio));
537 /* Advance to next sg */
539 host->pio.sg_off = 0;
542 status = readl(base + MMCISTATUS);
545 if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
546 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
548 if (!host->curr.xfer_remain)
549 writel(0, base + MMCIMASK1);
554 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
556 struct mmc_command *cmd = host->curr.cmd;
557 void __iomem *base = host->base;
559 host->curr.cmd = NULL;
560 cmd->resp[0] = readl(base + MMCIRESPONSE0);
561 cmd->resp[1] = readl(base + MMCIRESPONSE1);
562 cmd->resp[2] = readl(base + MMCIRESPONSE2);
563 cmd->resp[3] = readl(base + MMCIRESPONSE3);
565 del_timer(&host->command_timer);
566 if (status & MCI_CMDTIMEOUT) {
567 cmd->error = -ETIMEDOUT;
568 } else if (status & MCI_CMDCRCFAIL &&
569 cmd->flags & MMC_RSP_CRC) {
570 pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
571 cmd->error = -EILSEQ;
574 if (!cmd->data || cmd->error) {
575 if (host->curr.data && host->dma.sg)
576 msm_dmov_stop_cmd(host->dma.channel,
578 else if (host->curr.data) { /* Non DMA */
579 msmsdcc_stop_data(host);
580 msmsdcc_request_end(host, cmd->mrq);
581 } else /* host->data == NULL */
582 msmsdcc_request_end(host, cmd->mrq);
583 } else if (!(cmd->data->flags & MMC_DATA_READ))
584 msmsdcc_start_data(host, cmd->data);
588 msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
591 struct mmc_data *data = host->curr.data;
596 /* Check for data errors */
597 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
598 MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
599 msmsdcc_data_err(host, data, status);
600 host->curr.data_xfered = 0;
602 msm_dmov_stop_cmd(host->dma.channel,
605 msmsdcc_stop_data(host);
607 msmsdcc_request_end(host, data->mrq);
609 msmsdcc_start_command(host, data->stop, 0);
613 /* Check for data done */
614 if (!host->curr.got_dataend && (status & MCI_DATAEND))
615 host->curr.got_dataend = 1;
617 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
618 host->curr.got_datablkend = 1;
621 * If DMA is still in progress, we complete via the completion handler
623 if (host->curr.got_dataend && host->curr.got_datablkend &&
626 * There appears to be an issue in the controller where
627 * if you request a small block transfer (< fifo size),
628 * you may get your DATAEND/DATABLKEND irq without the
631 * Check to see if there is still data to be read,
632 * and simulate a PIO irq.
634 if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
635 msmsdcc_pio_irq(1, host);
637 msmsdcc_stop_data(host);
639 host->curr.data_xfered = host->curr.xfer_size;
642 msmsdcc_request_end(host, data->mrq);
644 msmsdcc_start_command(host, data->stop, 0);
649 msmsdcc_irq(int irq, void *dev_id)
651 struct msmsdcc_host *host = dev_id;
652 void __iomem *base = host->base;
657 spin_lock(&host->lock);
660 status = readl(base + MMCISTATUS);
662 status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
663 writel(status, base + MMCICLEAR);
665 msmsdcc_handle_irq_data(host, status, base);
667 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
668 MCI_CMDTIMEOUT) && host->curr.cmd) {
669 msmsdcc_do_cmdirq(host, status);
672 if (status & MCI_SDIOINTOPER) {
674 status &= ~MCI_SDIOINTOPER;
679 spin_unlock(&host->lock);
682 * We have to delay handling the card interrupt as it calls
683 * back into the driver.
686 mmc_signal_sdio_irq(host->mmc);
688 return IRQ_RETVAL(ret);
692 msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
694 struct msmsdcc_host *host = mmc_priv(mmc);
697 WARN_ON(host->curr.mrq != NULL);
698 WARN_ON(host->pwr == 0);
700 spin_lock_irqsave(&host->lock, flags);
705 if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
707 mrq->data->bytes_xfered = mrq->data->blksz *
710 mrq->cmd->error = -ENOMEDIUM;
712 spin_unlock_irqrestore(&host->lock, flags);
713 mmc_request_done(mmc, mrq);
717 host->curr.mrq = mrq;
719 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
720 msmsdcc_start_data(host, mrq->data);
722 msmsdcc_start_command(host, mrq->cmd, 0);
724 if (host->cmdpoll && !msmsdcc_spin_on_status(host,
725 MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
727 uint32_t status = readl(host->base + MMCISTATUS);
728 msmsdcc_do_cmdirq(host, status);
729 writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
730 host->base + MMCICLEAR);
731 host->stats.cmdpoll_hits++;
733 host->stats.cmdpoll_misses++;
734 mod_timer(&host->command_timer, jiffies + HZ);
736 spin_unlock_irqrestore(&host->lock, flags);
740 msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
742 struct msmsdcc_host *host = mmc_priv(mmc);
743 u32 clk = 0, pwr = 0;
748 if (!host->clks_on) {
749 clk_enable(host->pclk);
750 clk_enable(host->clk);
753 if (ios->clock != host->clk_rate) {
754 rc = clk_set_rate(host->clk, ios->clock);
756 pr_err("%s: Error setting clock rate (%d)\n",
757 mmc_hostname(host->mmc), rc);
759 host->clk_rate = ios->clock;
761 clk |= MCI_CLK_ENABLE;
764 if (ios->bus_width == MMC_BUS_WIDTH_4)
765 clk |= (2 << 10); /* Set WIDEBUS */
767 if (ios->clock > 400000 && msmsdcc_pwrsave)
768 clk |= (1 << 9); /* PWRSAVE */
770 clk |= (1 << 12); /* FLOW_ENA */
771 clk |= (1 << 15); /* feedback clock */
773 if (host->plat->translate_vdd)
774 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
776 switch (ios->power_mode) {
778 htc_pwrsink_set(PWRSINK_SDCARD, 0);
784 htc_pwrsink_set(PWRSINK_SDCARD, 100);
789 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
792 writel(clk, host->base + MMCICLOCK);
794 if (host->pwr != pwr) {
796 writel(pwr, host->base + MMCIPOWER);
799 if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
800 clk_disable(host->clk);
801 clk_disable(host->pclk);
806 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
808 struct msmsdcc_host *host = mmc_priv(mmc);
812 spin_lock_irqsave(&host->lock, flags);
813 if (msmsdcc_sdioirq == 1) {
814 status = readl(host->base + MMCIMASK0);
816 status |= MCI_SDIOINTOPERMASK;
818 status &= ~MCI_SDIOINTOPERMASK;
819 host->saved_irq0mask = status;
820 writel(status, host->base + MMCIMASK0);
822 spin_unlock_irqrestore(&host->lock, flags);
825 static const struct mmc_host_ops msmsdcc_ops = {
826 .request = msmsdcc_request,
827 .set_ios = msmsdcc_set_ios,
828 .enable_sdio_irq = msmsdcc_enable_sdio_irq,
832 msmsdcc_check_status(unsigned long data)
834 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
837 if (!host->plat->status) {
838 mmc_detect_change(host->mmc, 0);
842 status = host->plat->status(mmc_dev(host->mmc));
843 host->eject = !status;
844 if (status ^ host->oldstat) {
845 pr_info("%s: Slot status change detected (%d -> %d)\n",
846 mmc_hostname(host->mmc), host->oldstat, status);
848 mmc_detect_change(host->mmc, (5 * HZ) / 2);
850 mmc_detect_change(host->mmc, 0);
853 host->oldstat = status;
856 if (host->timer.function)
857 mod_timer(&host->timer, jiffies + HZ);
861 msmsdcc_platform_status_irq(int irq, void *dev_id)
863 struct msmsdcc_host *host = dev_id;
865 printk(KERN_DEBUG "%s: %d\n", __func__, irq);
866 msmsdcc_check_status((unsigned long) host);
871 msmsdcc_status_notify_cb(int card_present, void *dev_id)
873 struct msmsdcc_host *host = dev_id;
875 printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
877 msmsdcc_check_status((unsigned long) host);
881 * called when a command expires.
882 * Dump some debugging, and then error
883 * out the transaction.
886 msmsdcc_command_expired(unsigned long _data)
888 struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
889 struct mmc_request *mrq;
892 spin_lock_irqsave(&host->lock, flags);
893 mrq = host->curr.mrq;
896 pr_info("%s: Command expiry misfire\n",
897 mmc_hostname(host->mmc));
898 spin_unlock_irqrestore(&host->lock, flags);
902 pr_err("%s: Command timeout (%p %p %p %p)\n",
903 mmc_hostname(host->mmc), mrq, mrq->cmd,
904 mrq->data, host->dma.sg);
906 mrq->cmd->error = -ETIMEDOUT;
907 msmsdcc_stop_data(host);
909 writel(0, host->base + MMCICOMMAND);
911 host->curr.mrq = NULL;
912 host->curr.cmd = NULL;
914 spin_unlock_irqrestore(&host->lock, flags);
915 mmc_request_done(host->mmc, mrq);
919 msmsdcc_init_dma(struct msmsdcc_host *host)
921 memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
922 host->dma.host = host;
923 host->dma.channel = -1;
928 host->dma.nc = dma_alloc_coherent(NULL,
929 sizeof(struct msmsdcc_nc_dmadata),
930 &host->dma.nc_busaddr,
932 if (host->dma.nc == NULL) {
933 pr_err("Unable to allocate DMA buffer\n");
936 memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
937 host->dma.cmd_busaddr = host->dma.nc_busaddr;
938 host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
939 offsetof(struct msmsdcc_nc_dmadata, cmdptr);
940 host->dma.channel = host->dmares->start;
945 #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
947 do_resume_work(struct work_struct *work)
949 struct msmsdcc_host *host =
950 container_of(work, struct msmsdcc_host, resume_task);
951 struct mmc_host *mmc = host->mmc;
954 mmc_resume_host(mmc);
956 enable_irq(host->stat_irq);
962 msmsdcc_probe(struct platform_device *pdev)
964 struct mmc_platform_data *plat = pdev->dev.platform_data;
965 struct msmsdcc_host *host;
966 struct mmc_host *mmc;
967 struct resource *cmd_irqres = NULL;
968 struct resource *pio_irqres = NULL;
969 struct resource *stat_irqres = NULL;
970 struct resource *memres = NULL;
971 struct resource *dmares = NULL;
974 /* must have platform data */
976 pr_err("%s: Platform data not available\n", __func__);
981 if (pdev->id < 1 || pdev->id > 4)
984 if (pdev->resource == NULL || pdev->num_resources < 2) {
985 pr_err("%s: Invalid resource\n", __func__);
989 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
990 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
991 cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
993 pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
995 stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
998 if (!cmd_irqres || !pio_irqres || !memres) {
999 pr_err("%s: Invalid resource\n", __func__);
1004 * Setup our host structure
1007 mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
1013 host = mmc_priv(mmc);
1014 host->pdev_id = pdev->id;
1020 host->base = ioremap(memres->start, PAGE_SIZE);
1026 host->cmd_irqres = cmd_irqres;
1027 host->pio_irqres = pio_irqres;
1028 host->memres = memres;
1029 host->dmares = dmares;
1030 spin_lock_init(&host->lock);
1035 msmsdcc_init_dma(host);
1038 * Setup main peripheral bus clock
1040 host->pclk = clk_get(&pdev->dev, "sdc_pclk");
1041 if (IS_ERR(host->pclk)) {
1042 ret = PTR_ERR(host->pclk);
1046 ret = clk_enable(host->pclk);
1050 host->pclk_rate = clk_get_rate(host->pclk);
1053 * Setup SDC MMC clock
1055 host->clk = clk_get(&pdev->dev, "sdc_clk");
1056 if (IS_ERR(host->clk)) {
1057 ret = PTR_ERR(host->clk);
1061 ret = clk_enable(host->clk);
1065 ret = clk_set_rate(host->clk, msmsdcc_fmin);
1067 pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
1071 host->clk_rate = clk_get_rate(host->clk);
1076 * Setup MMC host structure
1078 mmc->ops = &msmsdcc_ops;
1079 mmc->f_min = msmsdcc_fmin;
1080 mmc->f_max = msmsdcc_fmax;
1081 mmc->ocr_avail = plat->ocr_mask;
1084 mmc->caps |= MMC_CAP_4_BIT_DATA;
1085 if (msmsdcc_sdioirq)
1086 mmc->caps |= MMC_CAP_SDIO_IRQ;
1087 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1089 mmc->max_phys_segs = NR_SG;
1090 mmc->max_hw_segs = NR_SG;
1091 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1092 mmc->max_blk_count = 65536;
1094 mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
1095 mmc->max_seg_size = mmc->max_req_size;
1097 writel(0, host->base + MMCIMASK0);
1098 writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
1100 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1101 host->saved_irq0mask = MCI_IRQENABLE;
1104 * Setup card detect change
1107 memset(&host->timer, 0, sizeof(host->timer));
1109 if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
1110 unsigned long irqflags = IRQF_SHARED |
1111 (stat_irqres->flags & IRQF_TRIGGER_MASK);
1113 host->stat_irq = stat_irqres->start;
1114 ret = request_irq(host->stat_irq,
1115 msmsdcc_platform_status_irq,
1117 DRIVER_NAME " (slot)",
1120 pr_err("%s: Unable to get slot IRQ %d (%d)\n",
1121 mmc_hostname(mmc), host->stat_irq, ret);
1124 } else if (plat->register_status_notify) {
1125 plat->register_status_notify(msmsdcc_status_notify_cb, host);
1126 } else if (!plat->status)
1127 pr_err("%s: No card detect facilities available\n",
1130 init_timer(&host->timer);
1131 host->timer.data = (unsigned long)host;
1132 host->timer.function = msmsdcc_check_status;
1133 host->timer.expires = jiffies + HZ;
1134 add_timer(&host->timer);
1138 host->oldstat = host->plat->status(mmc_dev(host->mmc));
1139 host->eject = !host->oldstat;
1143 * Setup a command timer. We currently need this due to
1144 * some 'strange' timeout / error handling situations.
1146 init_timer(&host->command_timer);
1147 host->command_timer.data = (unsigned long) host;
1148 host->command_timer.function = msmsdcc_command_expired;
1150 ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
1151 DRIVER_NAME " (cmd)", host);
1155 ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
1156 DRIVER_NAME " (pio)", host);
1160 mmc_set_drvdata(pdev, mmc);
1163 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
1164 mmc_hostname(mmc), (unsigned long long)memres->start,
1165 (unsigned int) cmd_irqres->start,
1166 (unsigned int) host->stat_irq, host->dma.channel);
1167 pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
1168 (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
1169 pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
1170 mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
1171 pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
1172 pr_info("%s: Power save feature enable = %d\n",
1173 mmc_hostname(mmc), msmsdcc_pwrsave);
1175 if (host->dma.channel != -1) {
1176 pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
1177 mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
1178 pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
1179 mmc_hostname(mmc), host->dma.cmd_busaddr,
1180 host->dma.cmdptr_busaddr);
1182 pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
1183 if (host->timer.function)
1184 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1188 free_irq(cmd_irqres->start, host);
1191 free_irq(host->stat_irq, host);
1193 clk_disable(host->clk);
1197 clk_disable(host->pclk);
1199 clk_put(host->pclk);
1207 msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1209 struct mmc_host *mmc = mmc_get_drvdata(dev);
1213 struct msmsdcc_host *host = mmc_priv(mmc);
1216 disable_irq(host->stat_irq);
1218 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1219 rc = mmc_suspend_host(mmc, state);
1221 writel(0, host->base + MMCIMASK0);
1223 if (host->clks_on) {
1224 clk_disable(host->clk);
1225 clk_disable(host->pclk);
1234 msmsdcc_resume(struct platform_device *dev)
1236 struct mmc_host *mmc = mmc_get_drvdata(dev);
1237 unsigned long flags;
1240 struct msmsdcc_host *host = mmc_priv(mmc);
1242 spin_lock_irqsave(&host->lock, flags);
1244 if (!host->clks_on) {
1245 clk_enable(host->pclk);
1246 clk_enable(host->clk);
1250 writel(host->saved_irq0mask, host->base + MMCIMASK0);
1252 spin_unlock_irqrestore(&host->lock, flags);
1254 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1255 mmc_resume_host(mmc);
1257 enable_irq(host->stat_irq);
1258 else if (host->stat_irq)
1259 enable_irq(host->stat_irq);
1264 static struct platform_driver msmsdcc_driver = {
1265 .probe = msmsdcc_probe,
1266 .suspend = msmsdcc_suspend,
1267 .resume = msmsdcc_resume,
1273 static int __init msmsdcc_init(void)
1275 return platform_driver_register(&msmsdcc_driver);
1278 static void __exit msmsdcc_exit(void)
1280 platform_driver_unregister(&msmsdcc_driver);
1283 module_init(msmsdcc_init);
1284 module_exit(msmsdcc_exit);
1286 MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
1287 MODULE_LICENSE("GPL");