2 * linux/drivers/mmc/host/msm_sdcc.c - Qualcomm MSM 7X00A SDCC Driver
4 * Copyright (C) 2007 Google Inc,
5 * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 * Author: San Mehat (san@android.com)
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/device.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/highmem.h>
26 #include <linux/log2.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/card.h>
29 #include <linux/clk.h>
30 #include <linux/scatterlist.h>
31 #include <linux/platform_device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/debugfs.h>
35 #include <linux/memory.h>
37 #include <asm/cacheflush.h>
38 #include <asm/div64.h>
39 #include <asm/sizes.h>
42 #include <mach/msm_iomap.h>
47 #define DRIVER_NAME "msm-sdcc"
49 static unsigned int msmsdcc_fmin = 144000;
50 static unsigned int msmsdcc_fmax = 50000000;
51 static unsigned int msmsdcc_4bit = 1;
52 static unsigned int msmsdcc_pwrsave = 1;
53 static unsigned int msmsdcc_piopoll = 1;
54 static unsigned int msmsdcc_sdioirq;
56 #define PIO_SPINMAX 30
57 #define CMD_SPINMAX 20
60 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,
64 msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)
66 writel(0, host->base + MMCICOMMAND);
68 BUG_ON(host->curr.data);
70 host->curr.mrq = NULL;
71 host->curr.cmd = NULL;
74 mrq->data->bytes_xfered = host->curr.data_xfered;
75 if (mrq->cmd->error == -ETIMEDOUT)
79 * Need to drop the host lock here; mmc_request_done may call
80 * back into the driver...
82 spin_unlock(&host->lock);
83 mmc_request_done(host->mmc, mrq);
84 spin_lock(&host->lock);
88 msmsdcc_stop_data(struct msmsdcc_host *host)
90 writel(0, host->base + MMCIDATACTRL);
91 host->curr.data = NULL;
92 host->curr.got_dataend = host->curr.got_datablkend = 0;
95 uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host)
97 switch (host->pdev_id) {
99 return MSM_SDC1_PHYS + MMCIFIFO;
101 return MSM_SDC2_PHYS + MMCIFIFO;
103 return MSM_SDC3_PHYS + MMCIFIFO;
105 return MSM_SDC4_PHYS + MMCIFIFO;
112 msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
114 struct msm_dmov_errdata *err)
116 struct msmsdcc_dma_data *dma_data =
117 container_of(cmd, struct msmsdcc_dma_data, hdr);
118 struct msmsdcc_host *host = dma_data->host;
120 struct mmc_request *mrq;
122 spin_lock_irqsave(&host->lock, flags);
123 mrq = host->curr.mrq;
126 if (!(result & DMOV_RSLT_VALID)) {
127 pr_err("msmsdcc: Invalid DataMover result\n");
131 if (result & DMOV_RSLT_DONE) {
132 host->curr.data_xfered = host->curr.xfer_size;
135 if (result & DMOV_RSLT_ERROR)
136 pr_err("%s: DMA error (0x%.8x)\n",
137 mmc_hostname(host->mmc), result);
138 if (result & DMOV_RSLT_FLUSH)
139 pr_err("%s: DMA channel flushed (0x%.8x)\n",
140 mmc_hostname(host->mmc), result);
142 pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
143 err->flush[0], err->flush[1], err->flush[2],
144 err->flush[3], err->flush[4], err->flush[5]);
145 if (!mrq->data->error)
146 mrq->data->error = -EIO;
149 dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,
152 if (host->curr.user_pages) {
153 struct scatterlist *sg = host->dma.sg;
156 for (i = 0; i < host->dma.num_ents; i++)
157 flush_dcache_page(sg_page(sg++));
162 if ((host->curr.got_dataend && host->curr.got_datablkend)
163 || mrq->data->error) {
166 * If we've already gotten our DATAEND / DATABLKEND
167 * for this request, then complete it through here.
169 msmsdcc_stop_data(host);
171 if (!mrq->data->error)
172 host->curr.data_xfered = host->curr.xfer_size;
173 if (!mrq->data->stop || mrq->cmd->error) {
174 writel(0, host->base + MMCICOMMAND);
175 host->curr.mrq = NULL;
176 host->curr.cmd = NULL;
177 mrq->data->bytes_xfered = host->curr.data_xfered;
179 spin_unlock_irqrestore(&host->lock, flags);
180 mmc_request_done(host->mmc, mrq);
183 msmsdcc_start_command(host, mrq->data->stop, 0);
187 spin_unlock_irqrestore(&host->lock, flags);
191 static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
193 if (host->dma.channel == -1)
196 if ((data->blksz * data->blocks) < MCI_FIFOSIZE)
198 if ((data->blksz * data->blocks) % MCI_FIFOSIZE)
203 static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
205 struct msmsdcc_nc_dmadata *nc;
211 struct scatterlist *sg = data->sg;
213 rc = validate_dma(host, data);
217 host->dma.sg = data->sg;
218 host->dma.num_ents = data->sg_len;
222 switch (host->pdev_id) {
224 crci = MSMSDCC_CRCI_SDC1;
227 crci = MSMSDCC_CRCI_SDC2;
230 crci = MSMSDCC_CRCI_SDC3;
233 crci = MSMSDCC_CRCI_SDC4;
237 host->dma.num_ents = 0;
241 if (data->flags & MMC_DATA_READ)
242 host->dma.dir = DMA_FROM_DEVICE;
244 host->dma.dir = DMA_TO_DEVICE;
246 host->curr.user_pages = 0;
248 n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
249 host->dma.num_ents, host->dma.dir);
251 if (n != host->dma.num_ents) {
252 pr_err("%s: Unable to map in all sg elements\n",
253 mmc_hostname(host->mmc));
255 host->dma.num_ents = 0;
260 for (i = 0; i < host->dma.num_ents; i++) {
261 box->cmd = CMD_MODE_BOX;
263 if (i == (host->dma.num_ents - 1))
265 rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
266 (sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
267 (sg_dma_len(sg) / MCI_FIFOSIZE) ;
269 if (data->flags & MMC_DATA_READ) {
270 box->src_row_addr = msmsdcc_fifo_addr(host);
271 box->dst_row_addr = sg_dma_address(sg);
273 box->src_dst_len = (MCI_FIFOSIZE << 16) |
275 box->row_offset = MCI_FIFOSIZE;
277 box->num_rows = rows * ((1 << 16) + 1);
278 box->cmd |= CMD_SRC_CRCI(crci);
280 box->src_row_addr = sg_dma_address(sg);
281 box->dst_row_addr = msmsdcc_fifo_addr(host);
283 box->src_dst_len = (MCI_FIFOSIZE << 16) |
285 box->row_offset = (MCI_FIFOSIZE << 16);
287 box->num_rows = rows * ((1 << 16) + 1);
288 box->cmd |= CMD_DST_CRCI(crci);
294 /* location of command block must be 64 bit aligned */
295 BUG_ON(host->dma.cmd_busaddr & 0x07);
297 nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
298 host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
299 DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
300 host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
306 msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data)
308 unsigned int datactrl, timeout;
309 unsigned long long clks;
310 void __iomem *base = host->base;
311 unsigned int pio_irqmask = 0;
313 host->curr.data = data;
314 host->curr.xfer_size = data->blksz * data->blocks;
315 host->curr.xfer_remain = host->curr.xfer_size;
316 host->curr.data_xfered = 0;
317 host->curr.got_dataend = 0;
318 host->curr.got_datablkend = 0;
320 memset(&host->pio, 0, sizeof(host->pio));
322 clks = (unsigned long long)data->timeout_ns * host->clk_rate;
323 do_div(clks, NSEC_PER_SEC);
324 timeout = data->timeout_clks + (unsigned int)clks;
325 writel(timeout, base + MMCIDATATIMER);
327 writel(host->curr.xfer_size, base + MMCIDATALENGTH);
329 datactrl = MCI_DPSM_ENABLE | (data->blksz << 4);
331 if (!msmsdcc_config_dma(host, data))
332 datactrl |= MCI_DPSM_DMAENABLE;
334 host->pio.sg = data->sg;
335 host->pio.sg_len = data->sg_len;
336 host->pio.sg_off = 0;
338 if (data->flags & MMC_DATA_READ) {
339 pio_irqmask = MCI_RXFIFOHALFFULLMASK;
340 if (host->curr.xfer_remain < MCI_FIFOSIZE)
341 pio_irqmask |= MCI_RXDATAAVLBLMASK;
343 pio_irqmask = MCI_TXFIFOHALFEMPTYMASK;
346 if (data->flags & MMC_DATA_READ)
347 datactrl |= MCI_DPSM_DIRECTION;
349 writel(pio_irqmask, base + MMCIMASK1);
350 writel(datactrl, base + MMCIDATACTRL);
352 if (datactrl & MCI_DPSM_DMAENABLE) {
354 msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr);
359 msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd, u32 c)
361 void __iomem *base = host->base;
363 if (readl(base + MMCICOMMAND) & MCI_CPSM_ENABLE) {
364 writel(0, base + MMCICOMMAND);
365 udelay(2 + ((5 * 1000000) / host->clk_rate));
368 c |= cmd->opcode | MCI_CPSM_ENABLE;
370 if (cmd->flags & MMC_RSP_PRESENT) {
371 if (cmd->flags & MMC_RSP_136)
372 c |= MCI_CPSM_LONGRSP;
373 c |= MCI_CPSM_RESPONSE;
376 if (cmd->opcode == 17 || cmd->opcode == 18 ||
377 cmd->opcode == 24 || cmd->opcode == 25 ||
379 c |= MCI_CSPM_DATCMD;
381 if (cmd == cmd->mrq->stop)
382 c |= MCI_CSPM_MCIABORT;
384 host->curr.cmd = cmd;
388 writel(cmd->arg, base + MMCIARGUMENT);
389 writel(c, base + MMCICOMMAND);
393 msmsdcc_data_err(struct msmsdcc_host *host, struct mmc_data *data,
396 if (status & MCI_DATACRCFAIL) {
397 pr_err("%s: Data CRC error\n", mmc_hostname(host->mmc));
398 pr_err("%s: opcode 0x%.8x\n", __func__,
399 data->mrq->cmd->opcode);
400 pr_err("%s: blksz %d, blocks %d\n", __func__,
401 data->blksz, data->blocks);
402 data->error = -EILSEQ;
403 } else if (status & MCI_DATATIMEOUT) {
404 pr_err("%s: Data timeout\n", mmc_hostname(host->mmc));
405 data->error = -ETIMEDOUT;
406 } else if (status & MCI_RXOVERRUN) {
407 pr_err("%s: RX overrun\n", mmc_hostname(host->mmc));
409 } else if (status & MCI_TXUNDERRUN) {
410 pr_err("%s: TX underrun\n", mmc_hostname(host->mmc));
413 pr_err("%s: Unknown error (0x%.8x)\n",
414 mmc_hostname(host->mmc), status);
421 msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
423 void __iomem *base = host->base;
424 uint32_t *ptr = (uint32_t *) buffer;
427 while (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL) {
429 *ptr = readl(base + MMCIFIFO + (count % MCI_FIFOSIZE));
431 count += sizeof(uint32_t);
433 remain -= sizeof(uint32_t);
441 msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
442 unsigned int remain, u32 status)
444 void __iomem *base = host->base;
448 unsigned int count, maxcnt;
450 maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
452 count = min(remain, maxcnt);
454 writesl(base + MMCIFIFO, ptr, count >> 2);
461 status = readl(base + MMCISTATUS);
462 } while (status & MCI_TXFIFOHALFEMPTY);
468 msmsdcc_spin_on_status(struct msmsdcc_host *host, uint32_t mask, int maxspin)
471 if ((readl(host->base + MMCISTATUS) & mask))
480 msmsdcc_pio_irq(int irq, void *dev_id)
482 struct msmsdcc_host *host = dev_id;
483 void __iomem *base = host->base;
486 status = readl(base + MMCISTATUS);
490 unsigned int remain, len;
493 if (!(status & (MCI_TXFIFOHALFEMPTY | MCI_RXDATAAVLBL))) {
494 if (host->curr.xfer_remain == 0 || !msmsdcc_piopoll)
497 if (msmsdcc_spin_on_status(host,
498 (MCI_TXFIFOHALFEMPTY |
505 /* Map the current scatter buffer */
506 local_irq_save(flags);
507 buffer = kmap_atomic(sg_page(host->pio.sg),
508 KM_BIO_SRC_IRQ) + host->pio.sg->offset;
509 buffer += host->pio.sg_off;
510 remain = host->pio.sg->length - host->pio.sg_off;
512 if (status & MCI_RXACTIVE)
513 len = msmsdcc_pio_read(host, buffer, remain);
514 if (status & MCI_TXACTIVE)
515 len = msmsdcc_pio_write(host, buffer, remain, status);
517 /* Unmap the buffer */
518 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
519 local_irq_restore(flags);
521 host->pio.sg_off += len;
522 host->curr.xfer_remain -= len;
523 host->curr.data_xfered += len;
527 /* This sg page is full - do some housekeeping */
528 if (status & MCI_RXACTIVE && host->curr.user_pages)
529 flush_dcache_page(sg_page(host->pio.sg));
531 if (!--host->pio.sg_len) {
532 memset(&host->pio, 0, sizeof(host->pio));
536 /* Advance to next sg */
538 host->pio.sg_off = 0;
541 status = readl(base + MMCISTATUS);
544 if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE)
545 writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
547 if (!host->curr.xfer_remain)
548 writel(0, base + MMCIMASK1);
553 static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
555 struct mmc_command *cmd = host->curr.cmd;
556 void __iomem *base = host->base;
558 host->curr.cmd = NULL;
559 cmd->resp[0] = readl(base + MMCIRESPONSE0);
560 cmd->resp[1] = readl(base + MMCIRESPONSE1);
561 cmd->resp[2] = readl(base + MMCIRESPONSE2);
562 cmd->resp[3] = readl(base + MMCIRESPONSE3);
564 del_timer(&host->command_timer);
565 if (status & MCI_CMDTIMEOUT) {
566 cmd->error = -ETIMEDOUT;
567 } else if (status & MCI_CMDCRCFAIL &&
568 cmd->flags & MMC_RSP_CRC) {
569 pr_err("%s: Command CRC error\n", mmc_hostname(host->mmc));
570 cmd->error = -EILSEQ;
573 if (!cmd->data || cmd->error) {
574 if (host->curr.data && host->dma.sg)
575 msm_dmov_stop_cmd(host->dma.channel,
577 else if (host->curr.data) { /* Non DMA */
578 msmsdcc_stop_data(host);
579 msmsdcc_request_end(host, cmd->mrq);
580 } else /* host->data == NULL */
581 msmsdcc_request_end(host, cmd->mrq);
582 } else if (!(cmd->data->flags & MMC_DATA_READ))
583 msmsdcc_start_data(host, cmd->data);
587 msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
590 struct mmc_data *data = host->curr.data;
595 /* Check for data errors */
596 if (status & (MCI_DATACRCFAIL | MCI_DATATIMEOUT |
597 MCI_TXUNDERRUN | MCI_RXOVERRUN)) {
598 msmsdcc_data_err(host, data, status);
599 host->curr.data_xfered = 0;
601 msm_dmov_stop_cmd(host->dma.channel,
604 msmsdcc_stop_data(host);
606 msmsdcc_request_end(host, data->mrq);
608 msmsdcc_start_command(host, data->stop, 0);
612 /* Check for data done */
613 if (!host->curr.got_dataend && (status & MCI_DATAEND))
614 host->curr.got_dataend = 1;
616 if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND))
617 host->curr.got_datablkend = 1;
620 * If DMA is still in progress, we complete via the completion handler
622 if (host->curr.got_dataend && host->curr.got_datablkend &&
625 * There appears to be an issue in the controller where
626 * if you request a small block transfer (< fifo size),
627 * you may get your DATAEND/DATABLKEND irq without the
630 * Check to see if there is still data to be read,
631 * and simulate a PIO irq.
633 if (readl(base + MMCISTATUS) & MCI_RXDATAAVLBL)
634 msmsdcc_pio_irq(1, host);
636 msmsdcc_stop_data(host);
638 host->curr.data_xfered = host->curr.xfer_size;
641 msmsdcc_request_end(host, data->mrq);
643 msmsdcc_start_command(host, data->stop, 0);
648 msmsdcc_irq(int irq, void *dev_id)
650 struct msmsdcc_host *host = dev_id;
651 void __iomem *base = host->base;
656 spin_lock(&host->lock);
659 status = readl(base + MMCISTATUS);
661 status &= (readl(base + MMCIMASK0) | MCI_DATABLOCKENDMASK);
662 writel(status, base + MMCICLEAR);
664 msmsdcc_handle_irq_data(host, status, base);
666 if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
667 MCI_CMDTIMEOUT) && host->curr.cmd) {
668 msmsdcc_do_cmdirq(host, status);
671 if (status & MCI_SDIOINTOPER) {
673 status &= ~MCI_SDIOINTOPER;
678 spin_unlock(&host->lock);
681 * We have to delay handling the card interrupt as it calls
682 * back into the driver.
685 mmc_signal_sdio_irq(host->mmc);
687 return IRQ_RETVAL(ret);
691 msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)
693 struct msmsdcc_host *host = mmc_priv(mmc);
696 WARN_ON(host->curr.mrq != NULL);
697 WARN_ON(host->pwr == 0);
699 spin_lock_irqsave(&host->lock, flags);
704 if (mrq->data && !(mrq->data->flags & MMC_DATA_READ)) {
706 mrq->data->bytes_xfered = mrq->data->blksz *
709 mrq->cmd->error = -ENOMEDIUM;
711 spin_unlock_irqrestore(&host->lock, flags);
712 mmc_request_done(mmc, mrq);
716 host->curr.mrq = mrq;
718 if (mrq->data && mrq->data->flags & MMC_DATA_READ)
719 msmsdcc_start_data(host, mrq->data);
721 msmsdcc_start_command(host, mrq->cmd, 0);
723 if (host->cmdpoll && !msmsdcc_spin_on_status(host,
724 MCI_CMDRESPEND|MCI_CMDCRCFAIL|MCI_CMDTIMEOUT,
726 uint32_t status = readl(host->base + MMCISTATUS);
727 msmsdcc_do_cmdirq(host, status);
728 writel(MCI_CMDRESPEND | MCI_CMDCRCFAIL | MCI_CMDTIMEOUT,
729 host->base + MMCICLEAR);
730 host->stats.cmdpoll_hits++;
732 host->stats.cmdpoll_misses++;
733 mod_timer(&host->command_timer, jiffies + HZ);
735 spin_unlock_irqrestore(&host->lock, flags);
739 msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
741 struct msmsdcc_host *host = mmc_priv(mmc);
742 u32 clk = 0, pwr = 0;
747 if (!host->clks_on) {
748 clk_enable(host->pclk);
749 clk_enable(host->clk);
752 if (ios->clock != host->clk_rate) {
753 rc = clk_set_rate(host->clk, ios->clock);
755 pr_err("%s: Error setting clock rate (%d)\n",
756 mmc_hostname(host->mmc), rc);
758 host->clk_rate = ios->clock;
760 clk |= MCI_CLK_ENABLE;
763 if (ios->bus_width == MMC_BUS_WIDTH_4)
764 clk |= (2 << 10); /* Set WIDEBUS */
766 if (ios->clock > 400000 && msmsdcc_pwrsave)
767 clk |= (1 << 9); /* PWRSAVE */
769 clk |= (1 << 12); /* FLOW_ENA */
770 clk |= (1 << 15); /* feedback clock */
772 if (host->plat->translate_vdd)
773 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
775 switch (ios->power_mode) {
786 if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
789 writel(clk, host->base + MMCICLOCK);
791 if (host->pwr != pwr) {
793 writel(pwr, host->base + MMCIPOWER);
796 if (!(clk & MCI_CLK_ENABLE) && host->clks_on) {
797 clk_disable(host->clk);
798 clk_disable(host->pclk);
803 static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)
805 struct msmsdcc_host *host = mmc_priv(mmc);
809 spin_lock_irqsave(&host->lock, flags);
810 if (msmsdcc_sdioirq == 1) {
811 status = readl(host->base + MMCIMASK0);
813 status |= MCI_SDIOINTOPERMASK;
815 status &= ~MCI_SDIOINTOPERMASK;
816 host->saved_irq0mask = status;
817 writel(status, host->base + MMCIMASK0);
819 spin_unlock_irqrestore(&host->lock, flags);
822 static const struct mmc_host_ops msmsdcc_ops = {
823 .request = msmsdcc_request,
824 .set_ios = msmsdcc_set_ios,
825 .enable_sdio_irq = msmsdcc_enable_sdio_irq,
829 msmsdcc_check_status(unsigned long data)
831 struct msmsdcc_host *host = (struct msmsdcc_host *)data;
834 if (!host->plat->status) {
835 mmc_detect_change(host->mmc, 0);
839 status = host->plat->status(mmc_dev(host->mmc));
840 host->eject = !status;
841 if (status ^ host->oldstat) {
842 pr_info("%s: Slot status change detected (%d -> %d)\n",
843 mmc_hostname(host->mmc), host->oldstat, status);
845 mmc_detect_change(host->mmc, (5 * HZ) / 2);
847 mmc_detect_change(host->mmc, 0);
850 host->oldstat = status;
853 if (host->timer.function)
854 mod_timer(&host->timer, jiffies + HZ);
858 msmsdcc_platform_status_irq(int irq, void *dev_id)
860 struct msmsdcc_host *host = dev_id;
862 printk(KERN_DEBUG "%s: %d\n", __func__, irq);
863 msmsdcc_check_status((unsigned long) host);
868 msmsdcc_status_notify_cb(int card_present, void *dev_id)
870 struct msmsdcc_host *host = dev_id;
872 printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc),
874 msmsdcc_check_status((unsigned long) host);
878 * called when a command expires.
879 * Dump some debugging, and then error
880 * out the transaction.
883 msmsdcc_command_expired(unsigned long _data)
885 struct msmsdcc_host *host = (struct msmsdcc_host *) _data;
886 struct mmc_request *mrq;
889 spin_lock_irqsave(&host->lock, flags);
890 mrq = host->curr.mrq;
893 pr_info("%s: Command expiry misfire\n",
894 mmc_hostname(host->mmc));
895 spin_unlock_irqrestore(&host->lock, flags);
899 pr_err("%s: Command timeout (%p %p %p %p)\n",
900 mmc_hostname(host->mmc), mrq, mrq->cmd,
901 mrq->data, host->dma.sg);
903 mrq->cmd->error = -ETIMEDOUT;
904 msmsdcc_stop_data(host);
906 writel(0, host->base + MMCICOMMAND);
908 host->curr.mrq = NULL;
909 host->curr.cmd = NULL;
911 spin_unlock_irqrestore(&host->lock, flags);
912 mmc_request_done(host->mmc, mrq);
916 msmsdcc_init_dma(struct msmsdcc_host *host)
918 memset(&host->dma, 0, sizeof(struct msmsdcc_dma_data));
919 host->dma.host = host;
920 host->dma.channel = -1;
925 host->dma.nc = dma_alloc_coherent(NULL,
926 sizeof(struct msmsdcc_nc_dmadata),
927 &host->dma.nc_busaddr,
929 if (host->dma.nc == NULL) {
930 pr_err("Unable to allocate DMA buffer\n");
933 memset(host->dma.nc, 0x00, sizeof(struct msmsdcc_nc_dmadata));
934 host->dma.cmd_busaddr = host->dma.nc_busaddr;
935 host->dma.cmdptr_busaddr = host->dma.nc_busaddr +
936 offsetof(struct msmsdcc_nc_dmadata, cmdptr);
937 host->dma.channel = host->dmares->start;
942 #ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
944 do_resume_work(struct work_struct *work)
946 struct msmsdcc_host *host =
947 container_of(work, struct msmsdcc_host, resume_task);
948 struct mmc_host *mmc = host->mmc;
951 mmc_resume_host(mmc);
953 enable_irq(host->stat_irq);
959 msmsdcc_probe(struct platform_device *pdev)
961 struct mmc_platform_data *plat = pdev->dev.platform_data;
962 struct msmsdcc_host *host;
963 struct mmc_host *mmc;
964 struct resource *cmd_irqres = NULL;
965 struct resource *pio_irqres = NULL;
966 struct resource *stat_irqres = NULL;
967 struct resource *memres = NULL;
968 struct resource *dmares = NULL;
971 /* must have platform data */
973 pr_err("%s: Platform data not available\n", __func__);
978 if (pdev->id < 1 || pdev->id > 4)
981 if (pdev->resource == NULL || pdev->num_resources < 2) {
982 pr_err("%s: Invalid resource\n", __func__);
986 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
987 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
988 cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
990 pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
992 stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
995 if (!cmd_irqres || !pio_irqres || !memres) {
996 pr_err("%s: Invalid resource\n", __func__);
1001 * Setup our host structure
1004 mmc = mmc_alloc_host(sizeof(struct msmsdcc_host), &pdev->dev);
1010 host = mmc_priv(mmc);
1011 host->pdev_id = pdev->id;
1017 host->base = ioremap(memres->start, PAGE_SIZE);
1023 host->cmd_irqres = cmd_irqres;
1024 host->pio_irqres = pio_irqres;
1025 host->memres = memres;
1026 host->dmares = dmares;
1027 spin_lock_init(&host->lock);
1032 msmsdcc_init_dma(host);
1035 * Setup main peripheral bus clock
1037 host->pclk = clk_get(&pdev->dev, "sdc_pclk");
1038 if (IS_ERR(host->pclk)) {
1039 ret = PTR_ERR(host->pclk);
1043 ret = clk_enable(host->pclk);
1047 host->pclk_rate = clk_get_rate(host->pclk);
1050 * Setup SDC MMC clock
1052 host->clk = clk_get(&pdev->dev, "sdc_clk");
1053 if (IS_ERR(host->clk)) {
1054 ret = PTR_ERR(host->clk);
1058 ret = clk_enable(host->clk);
1062 ret = clk_set_rate(host->clk, msmsdcc_fmin);
1064 pr_err("%s: Clock rate set failed (%d)\n", __func__, ret);
1068 host->clk_rate = clk_get_rate(host->clk);
1073 * Setup MMC host structure
1075 mmc->ops = &msmsdcc_ops;
1076 mmc->f_min = msmsdcc_fmin;
1077 mmc->f_max = msmsdcc_fmax;
1078 mmc->ocr_avail = plat->ocr_mask;
1081 mmc->caps |= MMC_CAP_4_BIT_DATA;
1082 if (msmsdcc_sdioirq)
1083 mmc->caps |= MMC_CAP_SDIO_IRQ;
1084 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED;
1086 mmc->max_phys_segs = NR_SG;
1087 mmc->max_hw_segs = NR_SG;
1088 mmc->max_blk_size = 4096; /* MCI_DATA_CTL BLOCKSIZE up to 4096 */
1089 mmc->max_blk_count = 65536;
1091 mmc->max_req_size = 33554432; /* MCI_DATA_LENGTH is 25 bits */
1092 mmc->max_seg_size = mmc->max_req_size;
1094 writel(0, host->base + MMCIMASK0);
1095 writel(0x5e007ff, host->base + MMCICLEAR); /* Add: 1 << 25 */
1097 writel(MCI_IRQENABLE, host->base + MMCIMASK0);
1098 host->saved_irq0mask = MCI_IRQENABLE;
1101 * Setup card detect change
1104 memset(&host->timer, 0, sizeof(host->timer));
1106 if (stat_irqres && !(stat_irqres->flags & IORESOURCE_DISABLED)) {
1107 unsigned long irqflags = IRQF_SHARED |
1108 (stat_irqres->flags & IRQF_TRIGGER_MASK);
1110 host->stat_irq = stat_irqres->start;
1111 ret = request_irq(host->stat_irq,
1112 msmsdcc_platform_status_irq,
1114 DRIVER_NAME " (slot)",
1117 pr_err("%s: Unable to get slot IRQ %d (%d)\n",
1118 mmc_hostname(mmc), host->stat_irq, ret);
1121 } else if (plat->register_status_notify) {
1122 plat->register_status_notify(msmsdcc_status_notify_cb, host);
1123 } else if (!plat->status)
1124 pr_err("%s: No card detect facilities available\n",
1127 init_timer(&host->timer);
1128 host->timer.data = (unsigned long)host;
1129 host->timer.function = msmsdcc_check_status;
1130 host->timer.expires = jiffies + HZ;
1131 add_timer(&host->timer);
1135 host->oldstat = host->plat->status(mmc_dev(host->mmc));
1136 host->eject = !host->oldstat;
1140 * Setup a command timer. We currently need this due to
1141 * some 'strange' timeout / error handling situations.
1143 init_timer(&host->command_timer);
1144 host->command_timer.data = (unsigned long) host;
1145 host->command_timer.function = msmsdcc_command_expired;
1147 ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,
1148 DRIVER_NAME " (cmd)", host);
1152 ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,
1153 DRIVER_NAME " (pio)", host);
1157 mmc_set_drvdata(pdev, mmc);
1160 pr_info("%s: Qualcomm MSM SDCC at 0x%016llx irq %d,%d dma %d\n",
1161 mmc_hostname(mmc), (unsigned long long)memres->start,
1162 (unsigned int) cmd_irqres->start,
1163 (unsigned int) host->stat_irq, host->dma.channel);
1164 pr_info("%s: 4 bit data mode %s\n", mmc_hostname(mmc),
1165 (mmc->caps & MMC_CAP_4_BIT_DATA ? "enabled" : "disabled"));
1166 pr_info("%s: MMC clock %u -> %u Hz, PCLK %u Hz\n",
1167 mmc_hostname(mmc), msmsdcc_fmin, msmsdcc_fmax, host->pclk_rate);
1168 pr_info("%s: Slot eject status = %d\n", mmc_hostname(mmc), host->eject);
1169 pr_info("%s: Power save feature enable = %d\n",
1170 mmc_hostname(mmc), msmsdcc_pwrsave);
1172 if (host->dma.channel != -1) {
1173 pr_info("%s: DM non-cached buffer at %p, dma_addr 0x%.8x\n",
1174 mmc_hostname(mmc), host->dma.nc, host->dma.nc_busaddr);
1175 pr_info("%s: DM cmd busaddr 0x%.8x, cmdptr busaddr 0x%.8x\n",
1176 mmc_hostname(mmc), host->dma.cmd_busaddr,
1177 host->dma.cmdptr_busaddr);
1179 pr_info("%s: PIO transfer enabled\n", mmc_hostname(mmc));
1180 if (host->timer.function)
1181 pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc));
1185 free_irq(cmd_irqres->start, host);
1188 free_irq(host->stat_irq, host);
1190 clk_disable(host->clk);
1194 clk_disable(host->pclk);
1196 clk_put(host->pclk);
1204 msmsdcc_suspend(struct platform_device *dev, pm_message_t state)
1206 struct mmc_host *mmc = mmc_get_drvdata(dev);
1210 struct msmsdcc_host *host = mmc_priv(mmc);
1213 disable_irq(host->stat_irq);
1215 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1216 rc = mmc_suspend_host(mmc, state);
1218 writel(0, host->base + MMCIMASK0);
1220 if (host->clks_on) {
1221 clk_disable(host->clk);
1222 clk_disable(host->pclk);
1231 msmsdcc_resume(struct platform_device *dev)
1233 struct mmc_host *mmc = mmc_get_drvdata(dev);
1234 unsigned long flags;
1237 struct msmsdcc_host *host = mmc_priv(mmc);
1239 spin_lock_irqsave(&host->lock, flags);
1241 if (!host->clks_on) {
1242 clk_enable(host->pclk);
1243 clk_enable(host->clk);
1247 writel(host->saved_irq0mask, host->base + MMCIMASK0);
1249 spin_unlock_irqrestore(&host->lock, flags);
1251 if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
1252 mmc_resume_host(mmc);
1254 enable_irq(host->stat_irq);
1255 else if (host->stat_irq)
1256 enable_irq(host->stat_irq);
1261 static struct platform_driver msmsdcc_driver = {
1262 .probe = msmsdcc_probe,
1263 .suspend = msmsdcc_suspend,
1264 .resume = msmsdcc_resume,
1270 static int __init msmsdcc_init(void)
1272 return platform_driver_register(&msmsdcc_driver);
1275 static void __exit msmsdcc_exit(void)
1277 platform_driver_unregister(&msmsdcc_driver);
1280 module_init(msmsdcc_init);
1281 module_exit(msmsdcc_exit);
1283 MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
1284 MODULE_LICENSE("GPL");