2 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
11 * Thanks to the following companies for their support:
13 * - JMicron (hardware and technical support)
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
28 #include <linux/leds.h>
30 #include <linux/mmc/mmc.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/card.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/mmc/slot-gpio.h>
38 #define DRIVER_NAME "sdhci"
40 #define DBG(f, x...) \
41 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
43 #define SDHCI_DUMP(f, x...) \
44 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
46 #define MAX_TUNING_LOOP 40
48 static unsigned int debug_quirks = 0;
49 static unsigned int debug_quirks2;
51 static void sdhci_finish_data(struct sdhci_host *);
53 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
55 void sdhci_dumpregs(struct sdhci_host *host)
57 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
59 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
60 sdhci_readl(host, SDHCI_DMA_ADDRESS),
61 sdhci_readw(host, SDHCI_HOST_VERSION));
62 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
63 sdhci_readw(host, SDHCI_BLOCK_SIZE),
64 sdhci_readw(host, SDHCI_BLOCK_COUNT));
65 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
66 sdhci_readl(host, SDHCI_ARGUMENT),
67 sdhci_readw(host, SDHCI_TRANSFER_MODE));
68 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
69 sdhci_readl(host, SDHCI_PRESENT_STATE),
70 sdhci_readb(host, SDHCI_HOST_CONTROL));
71 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
72 sdhci_readb(host, SDHCI_POWER_CONTROL),
73 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
74 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
75 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
76 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
77 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
78 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
79 sdhci_readl(host, SDHCI_INT_STATUS));
80 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
81 sdhci_readl(host, SDHCI_INT_ENABLE),
82 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
83 SDHCI_DUMP("AC12 err: 0x%08x | Slot int: 0x%08x\n",
84 sdhci_readw(host, SDHCI_ACMD12_ERR),
85 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
86 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
87 sdhci_readl(host, SDHCI_CAPABILITIES),
88 sdhci_readl(host, SDHCI_CAPABILITIES_1));
89 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
90 sdhci_readw(host, SDHCI_COMMAND),
91 sdhci_readl(host, SDHCI_MAX_CURRENT));
92 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
93 sdhci_readl(host, SDHCI_RESPONSE),
94 sdhci_readl(host, SDHCI_RESPONSE + 4));
95 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
96 sdhci_readl(host, SDHCI_RESPONSE + 8),
97 sdhci_readl(host, SDHCI_RESPONSE + 12));
98 SDHCI_DUMP("Host ctl2: 0x%08x\n",
99 sdhci_readw(host, SDHCI_HOST_CONTROL2));
101 if (host->flags & SDHCI_USE_ADMA) {
102 if (host->flags & SDHCI_USE_64_BIT_DMA) {
103 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
104 sdhci_readl(host, SDHCI_ADMA_ERROR),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
106 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
108 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
109 sdhci_readl(host, SDHCI_ADMA_ERROR),
110 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
114 SDHCI_DUMP("============================================\n");
116 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
118 /*****************************************************************************\
120 * Low level functions *
122 \*****************************************************************************/
124 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
126 return cmd->data || cmd->flags & MMC_RSP_BUSY;
129 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
133 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
134 !mmc_card_is_removable(host->mmc))
138 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
141 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
142 SDHCI_INT_CARD_INSERT;
144 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
147 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
148 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
151 static void sdhci_enable_card_detection(struct sdhci_host *host)
153 sdhci_set_card_detection(host, true);
156 static void sdhci_disable_card_detection(struct sdhci_host *host)
158 sdhci_set_card_detection(host, false);
161 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
166 pm_runtime_get_noresume(host->mmc->parent);
169 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
173 host->bus_on = false;
174 pm_runtime_put_noidle(host->mmc->parent);
177 void sdhci_reset(struct sdhci_host *host, u8 mask)
181 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
183 if (mask & SDHCI_RESET_ALL) {
185 /* Reset-all turns off SD Bus Power */
186 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
187 sdhci_runtime_pm_bus_off(host);
190 /* Wait max 100 ms */
191 timeout = ktime_add_ms(ktime_get(), 100);
193 /* hw clears the bit when it's done */
194 while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
195 if (ktime_after(ktime_get(), timeout)) {
196 pr_err("%s: Reset 0x%x never completed.\n",
197 mmc_hostname(host->mmc), (int)mask);
198 sdhci_dumpregs(host);
204 EXPORT_SYMBOL_GPL(sdhci_reset);
206 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
208 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
209 struct mmc_host *mmc = host->mmc;
211 if (!mmc->ops->get_cd(mmc))
215 host->ops->reset(host, mask);
217 if (mask & SDHCI_RESET_ALL) {
218 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
219 if (host->ops->enable_dma)
220 host->ops->enable_dma(host);
223 /* Resetting the controller clears many */
224 host->preset_enabled = false;
228 static void sdhci_set_default_irqs(struct sdhci_host *host)
230 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
231 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
232 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
233 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
236 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
237 host->tuning_mode == SDHCI_TUNING_MODE_3)
238 host->ier |= SDHCI_INT_RETUNE;
240 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
241 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
244 static void sdhci_init(struct sdhci_host *host, int soft)
246 struct mmc_host *mmc = host->mmc;
249 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
251 sdhci_do_reset(host, SDHCI_RESET_ALL);
253 sdhci_set_default_irqs(host);
255 host->cqe_on = false;
258 /* force clock reconfiguration */
260 mmc->ops->set_ios(mmc, &mmc->ios);
264 static void sdhci_reinit(struct sdhci_host *host)
267 sdhci_enable_card_detection(host);
270 static void __sdhci_led_activate(struct sdhci_host *host)
274 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
275 ctrl |= SDHCI_CTRL_LED;
276 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
279 static void __sdhci_led_deactivate(struct sdhci_host *host)
283 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
284 ctrl &= ~SDHCI_CTRL_LED;
285 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
288 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
289 static void sdhci_led_control(struct led_classdev *led,
290 enum led_brightness brightness)
292 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
295 spin_lock_irqsave(&host->lock, flags);
297 if (host->runtime_suspended)
300 if (brightness == LED_OFF)
301 __sdhci_led_deactivate(host);
303 __sdhci_led_activate(host);
305 spin_unlock_irqrestore(&host->lock, flags);
308 static int sdhci_led_register(struct sdhci_host *host)
310 struct mmc_host *mmc = host->mmc;
312 snprintf(host->led_name, sizeof(host->led_name),
313 "%s::", mmc_hostname(mmc));
315 host->led.name = host->led_name;
316 host->led.brightness = LED_OFF;
317 host->led.default_trigger = mmc_hostname(mmc);
318 host->led.brightness_set = sdhci_led_control;
320 return led_classdev_register(mmc_dev(mmc), &host->led);
323 static void sdhci_led_unregister(struct sdhci_host *host)
325 led_classdev_unregister(&host->led);
328 static inline void sdhci_led_activate(struct sdhci_host *host)
332 static inline void sdhci_led_deactivate(struct sdhci_host *host)
338 static inline int sdhci_led_register(struct sdhci_host *host)
343 static inline void sdhci_led_unregister(struct sdhci_host *host)
347 static inline void sdhci_led_activate(struct sdhci_host *host)
349 __sdhci_led_activate(host);
352 static inline void sdhci_led_deactivate(struct sdhci_host *host)
354 __sdhci_led_deactivate(host);
359 /*****************************************************************************\
363 \*****************************************************************************/
365 static void sdhci_read_block_pio(struct sdhci_host *host)
368 size_t blksize, len, chunk;
369 u32 uninitialized_var(scratch);
372 DBG("PIO reading\n");
374 blksize = host->data->blksz;
377 local_irq_save(flags);
380 BUG_ON(!sg_miter_next(&host->sg_miter));
382 len = min(host->sg_miter.length, blksize);
385 host->sg_miter.consumed = len;
387 buf = host->sg_miter.addr;
391 scratch = sdhci_readl(host, SDHCI_BUFFER);
395 *buf = scratch & 0xFF;
404 sg_miter_stop(&host->sg_miter);
406 local_irq_restore(flags);
409 static void sdhci_write_block_pio(struct sdhci_host *host)
412 size_t blksize, len, chunk;
416 DBG("PIO writing\n");
418 blksize = host->data->blksz;
422 local_irq_save(flags);
425 BUG_ON(!sg_miter_next(&host->sg_miter));
427 len = min(host->sg_miter.length, blksize);
430 host->sg_miter.consumed = len;
432 buf = host->sg_miter.addr;
435 scratch |= (u32)*buf << (chunk * 8);
441 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
442 sdhci_writel(host, scratch, SDHCI_BUFFER);
449 sg_miter_stop(&host->sg_miter);
451 local_irq_restore(flags);
454 static void sdhci_transfer_pio(struct sdhci_host *host)
458 if (host->blocks == 0)
461 if (host->data->flags & MMC_DATA_READ)
462 mask = SDHCI_DATA_AVAILABLE;
464 mask = SDHCI_SPACE_AVAILABLE;
467 * Some controllers (JMicron JMB38x) mess up the buffer bits
468 * for transfers < 4 bytes. As long as it is just one block,
469 * we can ignore the bits.
471 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
472 (host->data->blocks == 1))
475 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
476 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
479 if (host->data->flags & MMC_DATA_READ)
480 sdhci_read_block_pio(host);
482 sdhci_write_block_pio(host);
485 if (host->blocks == 0)
489 DBG("PIO transfer complete.\n");
492 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
493 struct mmc_data *data, int cookie)
498 * If the data buffers are already mapped, return the previous
499 * dma_map_sg() result.
501 if (data->host_cookie == COOKIE_PRE_MAPPED)
502 return data->sg_count;
504 sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
505 mmc_get_dma_dir(data));
510 data->sg_count = sg_count;
511 data->host_cookie = cookie;
516 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
518 local_irq_save(*flags);
519 return kmap_atomic(sg_page(sg)) + sg->offset;
522 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
524 kunmap_atomic(buffer);
525 local_irq_restore(*flags);
528 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
529 dma_addr_t addr, int len, unsigned cmd)
531 struct sdhci_adma2_64_desc *dma_desc = desc;
533 /* 32-bit and 64-bit descriptors have these members in same position */
534 dma_desc->cmd = cpu_to_le16(cmd);
535 dma_desc->len = cpu_to_le16(len);
536 dma_desc->addr_lo = cpu_to_le32((u32)addr);
538 if (host->flags & SDHCI_USE_64_BIT_DMA)
539 dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
542 static void sdhci_adma_mark_end(void *desc)
544 struct sdhci_adma2_64_desc *dma_desc = desc;
546 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
547 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
550 static void sdhci_adma_table_pre(struct sdhci_host *host,
551 struct mmc_data *data, int sg_count)
553 struct scatterlist *sg;
555 dma_addr_t addr, align_addr;
561 * The spec does not specify endianness of descriptor table.
562 * We currently guess that it is LE.
565 host->sg_count = sg_count;
567 desc = host->adma_table;
568 align = host->align_buffer;
570 align_addr = host->align_addr;
572 for_each_sg(data->sg, sg, host->sg_count, i) {
573 addr = sg_dma_address(sg);
574 len = sg_dma_len(sg);
577 * The SDHCI specification states that ADMA addresses must
578 * be 32-bit aligned. If they aren't, then we use a bounce
579 * buffer for the (up to three) bytes that screw up the
582 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
585 if (data->flags & MMC_DATA_WRITE) {
586 buffer = sdhci_kmap_atomic(sg, &flags);
587 memcpy(align, buffer, offset);
588 sdhci_kunmap_atomic(buffer, &flags);
592 sdhci_adma_write_desc(host, desc, align_addr, offset,
595 BUG_ON(offset > 65536);
597 align += SDHCI_ADMA2_ALIGN;
598 align_addr += SDHCI_ADMA2_ALIGN;
600 desc += host->desc_sz;
610 sdhci_adma_write_desc(host, desc, addr, len,
612 desc += host->desc_sz;
616 * If this triggers then we have a calculation bug
619 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
622 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
623 /* Mark the last descriptor as the terminating descriptor */
624 if (desc != host->adma_table) {
625 desc -= host->desc_sz;
626 sdhci_adma_mark_end(desc);
629 /* Add a terminating entry - nop, end, valid */
630 sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
634 static void sdhci_adma_table_post(struct sdhci_host *host,
635 struct mmc_data *data)
637 struct scatterlist *sg;
643 if (data->flags & MMC_DATA_READ) {
644 bool has_unaligned = false;
646 /* Do a quick scan of the SG list for any unaligned mappings */
647 for_each_sg(data->sg, sg, host->sg_count, i)
648 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
649 has_unaligned = true;
654 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
655 data->sg_len, DMA_FROM_DEVICE);
657 align = host->align_buffer;
659 for_each_sg(data->sg, sg, host->sg_count, i) {
660 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
661 size = SDHCI_ADMA2_ALIGN -
662 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
664 buffer = sdhci_kmap_atomic(sg, &flags);
665 memcpy(buffer, align, size);
666 sdhci_kunmap_atomic(buffer, &flags);
668 align += SDHCI_ADMA2_ALIGN;
675 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
678 struct mmc_data *data = cmd->data;
679 unsigned target_timeout, current_timeout;
682 * If the host controller provides us with an incorrect timeout
683 * value, just skip the check and use 0xE. The hardware may take
684 * longer to time out, but that's much better than having a too-short
687 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
690 /* Unspecified timeout, assume max */
691 if (!data && !cmd->busy_timeout)
696 target_timeout = cmd->busy_timeout * 1000;
698 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
699 if (host->clock && data->timeout_clks) {
700 unsigned long long val;
703 * data->timeout_clks is in units of clock cycles.
704 * host->clock is in Hz. target_timeout is in us.
705 * Hence, us = 1000000 * cycles / Hz. Round up.
707 val = 1000000ULL * data->timeout_clks;
708 if (do_div(val, host->clock))
710 target_timeout += val;
715 * Figure out needed cycles.
716 * We do this in steps in order to fit inside a 32 bit int.
717 * The first step is the minimum timeout, which will have a
718 * minimum resolution of 6 bits:
719 * (1) 2^13*1000 > 2^22,
720 * (2) host->timeout_clk < 2^16
725 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
726 while (current_timeout < target_timeout) {
728 current_timeout <<= 1;
734 DBG("Too large timeout 0x%x requested for CMD%d!\n",
742 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
744 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
745 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
747 if (host->flags & SDHCI_REQ_USE_DMA)
748 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
750 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
752 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
753 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
756 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
760 if (host->ops->set_timeout) {
761 host->ops->set_timeout(host, cmd);
763 count = sdhci_calc_timeout(host, cmd);
764 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
768 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
771 struct mmc_data *data = cmd->data;
773 if (sdhci_data_line_cmd(cmd))
774 sdhci_set_timeout(host, cmd);
782 BUG_ON(data->blksz * data->blocks > 524288);
783 BUG_ON(data->blksz > host->mmc->max_blk_size);
784 BUG_ON(data->blocks > 65535);
787 host->data_early = 0;
788 host->data->bytes_xfered = 0;
790 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
791 struct scatterlist *sg;
792 unsigned int length_mask, offset_mask;
795 host->flags |= SDHCI_REQ_USE_DMA;
798 * FIXME: This doesn't account for merging when mapping the
801 * The assumption here being that alignment and lengths are
802 * the same after DMA mapping to device address space.
806 if (host->flags & SDHCI_USE_ADMA) {
807 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
810 * As we use up to 3 byte chunks to work
811 * around alignment problems, we need to
812 * check the offset as well.
817 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
819 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
823 if (unlikely(length_mask | offset_mask)) {
824 for_each_sg(data->sg, sg, data->sg_len, i) {
825 if (sg->length & length_mask) {
826 DBG("Reverting to PIO because of transfer size (%d)\n",
828 host->flags &= ~SDHCI_REQ_USE_DMA;
831 if (sg->offset & offset_mask) {
832 DBG("Reverting to PIO because of bad alignment\n");
833 host->flags &= ~SDHCI_REQ_USE_DMA;
840 if (host->flags & SDHCI_REQ_USE_DMA) {
841 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
845 * This only happens when someone fed
846 * us an invalid request.
849 host->flags &= ~SDHCI_REQ_USE_DMA;
850 } else if (host->flags & SDHCI_USE_ADMA) {
851 sdhci_adma_table_pre(host, data, sg_cnt);
853 sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
854 if (host->flags & SDHCI_USE_64_BIT_DMA)
856 (u64)host->adma_addr >> 32,
857 SDHCI_ADMA_ADDRESS_HI);
859 WARN_ON(sg_cnt != 1);
860 sdhci_writel(host, sg_dma_address(data->sg),
866 * Always adjust the DMA selection as some controllers
867 * (e.g. JMicron) can't do PIO properly when the selection
870 if (host->version >= SDHCI_SPEC_200) {
871 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
872 ctrl &= ~SDHCI_CTRL_DMA_MASK;
873 if ((host->flags & SDHCI_REQ_USE_DMA) &&
874 (host->flags & SDHCI_USE_ADMA)) {
875 if (host->flags & SDHCI_USE_64_BIT_DMA)
876 ctrl |= SDHCI_CTRL_ADMA64;
878 ctrl |= SDHCI_CTRL_ADMA32;
880 ctrl |= SDHCI_CTRL_SDMA;
882 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
885 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
888 flags = SG_MITER_ATOMIC;
889 if (host->data->flags & MMC_DATA_READ)
890 flags |= SG_MITER_TO_SG;
892 flags |= SG_MITER_FROM_SG;
893 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
894 host->blocks = data->blocks;
897 sdhci_set_transfer_irqs(host);
899 /* Set the DMA boundary value and block size */
900 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
901 data->blksz), SDHCI_BLOCK_SIZE);
902 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
905 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
906 struct mmc_request *mrq)
908 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
909 !mrq->cap_cmd_during_tfr;
912 static void sdhci_set_transfer_mode(struct sdhci_host *host,
913 struct mmc_command *cmd)
916 struct mmc_data *data = cmd->data;
920 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
921 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
923 /* clear Auto CMD settings for no data CMDs */
924 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
925 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
926 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
931 WARN_ON(!host->data);
933 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
934 mode = SDHCI_TRNS_BLK_CNT_EN;
936 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
937 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
939 * If we are sending CMD23, CMD12 never gets sent
940 * on successful completion (so no Auto-CMD12).
942 if (sdhci_auto_cmd12(host, cmd->mrq) &&
943 (cmd->opcode != SD_IO_RW_EXTENDED))
944 mode |= SDHCI_TRNS_AUTO_CMD12;
945 else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
946 mode |= SDHCI_TRNS_AUTO_CMD23;
947 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
951 if (data->flags & MMC_DATA_READ)
952 mode |= SDHCI_TRNS_READ;
953 if (host->flags & SDHCI_REQ_USE_DMA)
954 mode |= SDHCI_TRNS_DMA;
956 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
959 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
961 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
962 ((mrq->cmd && mrq->cmd->error) ||
963 (mrq->sbc && mrq->sbc->error) ||
964 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
965 (mrq->data->stop && mrq->data->stop->error))) ||
966 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
969 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
973 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
974 if (host->mrqs_done[i] == mrq) {
980 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
981 if (!host->mrqs_done[i]) {
982 host->mrqs_done[i] = mrq;
987 WARN_ON(i >= SDHCI_MAX_MRQS);
989 tasklet_schedule(&host->finish_tasklet);
992 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
994 if (host->cmd && host->cmd->mrq == mrq)
997 if (host->data_cmd && host->data_cmd->mrq == mrq)
998 host->data_cmd = NULL;
1000 if (host->data && host->data->mrq == mrq)
1003 if (sdhci_needs_reset(host, mrq))
1004 host->pending_reset = true;
1006 __sdhci_finish_mrq(host, mrq);
1009 static void sdhci_finish_data(struct sdhci_host *host)
1011 struct mmc_command *data_cmd = host->data_cmd;
1012 struct mmc_data *data = host->data;
1015 host->data_cmd = NULL;
1017 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1018 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1019 sdhci_adma_table_post(host, data);
1022 * The specification states that the block count register must
1023 * be updated, but it does not specify at what point in the
1024 * data flow. That makes the register entirely useless to read
1025 * back so we have to assume that nothing made it to the card
1026 * in the event of an error.
1029 data->bytes_xfered = 0;
1031 data->bytes_xfered = data->blksz * data->blocks;
1034 * Need to send CMD12 if -
1035 * a) open-ended multiblock transfer (no CMD23)
1036 * b) error in multiblock transfer
1043 * The controller needs a reset of internal state machines
1044 * upon error conditions.
1047 if (!host->cmd || host->cmd == data_cmd)
1048 sdhci_do_reset(host, SDHCI_RESET_CMD);
1049 sdhci_do_reset(host, SDHCI_RESET_DATA);
1053 * 'cap_cmd_during_tfr' request must not use the command line
1054 * after mmc_command_done() has been called. It is upper layer's
1055 * responsibility to send the stop command if required.
1057 if (data->mrq->cap_cmd_during_tfr) {
1058 sdhci_finish_mrq(host, data->mrq);
1060 /* Avoid triggering warning in sdhci_send_command() */
1062 sdhci_send_command(host, data->stop);
1065 sdhci_finish_mrq(host, data->mrq);
1069 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1070 unsigned long timeout)
1072 if (sdhci_data_line_cmd(mrq->cmd))
1073 mod_timer(&host->data_timer, timeout);
1075 mod_timer(&host->timer, timeout);
1078 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1080 if (sdhci_data_line_cmd(mrq->cmd))
1081 del_timer(&host->data_timer);
1083 del_timer(&host->timer);
1086 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1090 unsigned long timeout;
1094 /* Initially, a command has no error */
1097 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1098 cmd->opcode == MMC_STOP_TRANSMISSION)
1099 cmd->flags |= MMC_RSP_BUSY;
1101 /* Wait max 10 ms */
1104 mask = SDHCI_CMD_INHIBIT;
1105 if (sdhci_data_line_cmd(cmd))
1106 mask |= SDHCI_DATA_INHIBIT;
1108 /* We shouldn't wait for data inihibit for stop commands, even
1109 though they might use busy signaling */
1110 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1111 mask &= ~SDHCI_DATA_INHIBIT;
1113 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1115 pr_err("%s: Controller never released inhibit bit(s).\n",
1116 mmc_hostname(host->mmc));
1117 sdhci_dumpregs(host);
1119 sdhci_finish_mrq(host, cmd->mrq);
1127 if (!cmd->data && cmd->busy_timeout > 9000)
1128 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1131 sdhci_mod_timer(host, cmd->mrq, timeout);
1134 if (sdhci_data_line_cmd(cmd)) {
1135 WARN_ON(host->data_cmd);
1136 host->data_cmd = cmd;
1139 sdhci_prepare_data(host, cmd);
1141 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1143 sdhci_set_transfer_mode(host, cmd);
1145 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1146 pr_err("%s: Unsupported response type!\n",
1147 mmc_hostname(host->mmc));
1148 cmd->error = -EINVAL;
1149 sdhci_finish_mrq(host, cmd->mrq);
1153 if (!(cmd->flags & MMC_RSP_PRESENT))
1154 flags = SDHCI_CMD_RESP_NONE;
1155 else if (cmd->flags & MMC_RSP_136)
1156 flags = SDHCI_CMD_RESP_LONG;
1157 else if (cmd->flags & MMC_RSP_BUSY)
1158 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1160 flags = SDHCI_CMD_RESP_SHORT;
1162 if (cmd->flags & MMC_RSP_CRC)
1163 flags |= SDHCI_CMD_CRC;
1164 if (cmd->flags & MMC_RSP_OPCODE)
1165 flags |= SDHCI_CMD_INDEX;
1167 /* CMD19 is special in that the Data Present Select should be set */
1168 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1169 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1170 flags |= SDHCI_CMD_DATA;
1172 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1174 EXPORT_SYMBOL_GPL(sdhci_send_command);
1176 static void sdhci_finish_command(struct sdhci_host *host)
1178 struct mmc_command *cmd = host->cmd;
1183 if (cmd->flags & MMC_RSP_PRESENT) {
1184 if (cmd->flags & MMC_RSP_136) {
1185 /* CRC is stripped so we need to do some shifting. */
1186 for (i = 0;i < 4;i++) {
1187 cmd->resp[i] = sdhci_readl(host,
1188 SDHCI_RESPONSE + (3-i)*4) << 8;
1192 SDHCI_RESPONSE + (3-i)*4-1);
1195 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1199 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1200 mmc_command_done(host->mmc, cmd->mrq);
1203 * The host can send and interrupt when the busy state has
1204 * ended, allowing us to wait without wasting CPU cycles.
1205 * The busy signal uses DAT0 so this is similar to waiting
1206 * for data to complete.
1208 * Note: The 1.0 specification is a bit ambiguous about this
1209 * feature so there might be some problems with older
1212 if (cmd->flags & MMC_RSP_BUSY) {
1214 DBG("Cannot wait for busy signal when also doing a data transfer");
1215 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1216 cmd == host->data_cmd) {
1217 /* Command complete before busy is ended */
1222 /* Finished CMD23, now send actual command. */
1223 if (cmd == cmd->mrq->sbc) {
1224 sdhci_send_command(host, cmd->mrq->cmd);
1227 /* Processed actual command. */
1228 if (host->data && host->data_early)
1229 sdhci_finish_data(host);
1232 sdhci_finish_mrq(host, cmd->mrq);
1236 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1240 switch (host->timing) {
1241 case MMC_TIMING_UHS_SDR12:
1242 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1244 case MMC_TIMING_UHS_SDR25:
1245 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1247 case MMC_TIMING_UHS_SDR50:
1248 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1250 case MMC_TIMING_UHS_SDR104:
1251 case MMC_TIMING_MMC_HS200:
1252 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1254 case MMC_TIMING_UHS_DDR50:
1255 case MMC_TIMING_MMC_DDR52:
1256 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1258 case MMC_TIMING_MMC_HS400:
1259 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1262 pr_warn("%s: Invalid UHS-I mode selected\n",
1263 mmc_hostname(host->mmc));
1264 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1270 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1271 unsigned int *actual_clock)
1273 int div = 0; /* Initialized for compiler warning */
1274 int real_div = div, clk_mul = 1;
1276 bool switch_base_clk = false;
1278 if (host->version >= SDHCI_SPEC_300) {
1279 if (host->preset_enabled) {
1282 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1283 pre_val = sdhci_get_preset_value(host);
1284 div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1285 >> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1286 if (host->clk_mul &&
1287 (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1288 clk = SDHCI_PROG_CLOCK_MODE;
1290 clk_mul = host->clk_mul;
1292 real_div = max_t(int, 1, div << 1);
1298 * Check if the Host Controller supports Programmable Clock
1301 if (host->clk_mul) {
1302 for (div = 1; div <= 1024; div++) {
1303 if ((host->max_clk * host->clk_mul / div)
1307 if ((host->max_clk * host->clk_mul / div) <= clock) {
1309 * Set Programmable Clock Mode in the Clock
1312 clk = SDHCI_PROG_CLOCK_MODE;
1314 clk_mul = host->clk_mul;
1318 * Divisor can be too small to reach clock
1319 * speed requirement. Then use the base clock.
1321 switch_base_clk = true;
1325 if (!host->clk_mul || switch_base_clk) {
1326 /* Version 3.00 divisors must be a multiple of 2. */
1327 if (host->max_clk <= clock)
1330 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1332 if ((host->max_clk / div) <= clock)
1338 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1339 && !div && host->max_clk <= 25000000)
1343 /* Version 2.00 divisors must be a power of 2. */
1344 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1345 if ((host->max_clk / div) <= clock)
1354 *actual_clock = (host->max_clk * clk_mul) / real_div;
1355 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1356 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1357 << SDHCI_DIVIDER_HI_SHIFT;
1361 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1363 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1367 clk |= SDHCI_CLOCK_INT_EN;
1368 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1370 /* Wait max 20 ms */
1371 timeout = ktime_add_ms(ktime_get(), 20);
1372 while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1373 & SDHCI_CLOCK_INT_STABLE)) {
1374 if (ktime_after(ktime_get(), timeout)) {
1375 pr_err("%s: Internal clock never stabilised.\n",
1376 mmc_hostname(host->mmc));
1377 sdhci_dumpregs(host);
1383 clk |= SDHCI_CLOCK_CARD_EN;
1384 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1386 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1388 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1392 host->mmc->actual_clock = 0;
1394 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1399 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1400 sdhci_enable_clk(host, clk);
1402 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1404 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1407 struct mmc_host *mmc = host->mmc;
1409 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1411 if (mode != MMC_POWER_OFF)
1412 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1414 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1417 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1422 if (mode != MMC_POWER_OFF) {
1424 case MMC_VDD_165_195:
1425 pwr = SDHCI_POWER_180;
1429 pwr = SDHCI_POWER_300;
1433 pwr = SDHCI_POWER_330;
1436 WARN(1, "%s: Invalid vdd %#x\n",
1437 mmc_hostname(host->mmc), vdd);
1442 if (host->pwr == pwr)
1448 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1449 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1450 sdhci_runtime_pm_bus_off(host);
1453 * Spec says that we should clear the power reg before setting
1454 * a new value. Some controllers don't seem to like this though.
1456 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1457 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1460 * At least the Marvell CaFe chip gets confused if we set the
1461 * voltage and set turn on power at the same time, so set the
1464 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1465 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1467 pwr |= SDHCI_POWER_ON;
1469 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1471 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1472 sdhci_runtime_pm_bus_on(host);
1475 * Some controllers need an extra 10ms delay of 10ms before
1476 * they can apply clock after applying power
1478 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1482 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1484 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1487 if (IS_ERR(host->mmc->supply.vmmc))
1488 sdhci_set_power_noreg(host, mode, vdd);
1490 sdhci_set_power_reg(host, mode, vdd);
1492 EXPORT_SYMBOL_GPL(sdhci_set_power);
1494 /*****************************************************************************\
1498 \*****************************************************************************/
1500 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1502 struct sdhci_host *host;
1504 unsigned long flags;
1506 host = mmc_priv(mmc);
1508 /* Firstly check card presence */
1509 present = mmc->ops->get_cd(mmc);
1511 spin_lock_irqsave(&host->lock, flags);
1513 sdhci_led_activate(host);
1516 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1517 * requests if Auto-CMD12 is enabled.
1519 if (sdhci_auto_cmd12(host, mrq)) {
1521 mrq->data->stop = NULL;
1526 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1527 mrq->cmd->error = -ENOMEDIUM;
1528 sdhci_finish_mrq(host, mrq);
1530 if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1531 sdhci_send_command(host, mrq->sbc);
1533 sdhci_send_command(host, mrq->cmd);
1537 spin_unlock_irqrestore(&host->lock, flags);
1540 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1544 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1545 if (width == MMC_BUS_WIDTH_8) {
1546 ctrl &= ~SDHCI_CTRL_4BITBUS;
1547 if (host->version >= SDHCI_SPEC_300)
1548 ctrl |= SDHCI_CTRL_8BITBUS;
1550 if (host->version >= SDHCI_SPEC_300)
1551 ctrl &= ~SDHCI_CTRL_8BITBUS;
1552 if (width == MMC_BUS_WIDTH_4)
1553 ctrl |= SDHCI_CTRL_4BITBUS;
1555 ctrl &= ~SDHCI_CTRL_4BITBUS;
1557 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1559 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1561 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1565 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1566 /* Select Bus Speed Mode for host */
1567 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1568 if ((timing == MMC_TIMING_MMC_HS200) ||
1569 (timing == MMC_TIMING_UHS_SDR104))
1570 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1571 else if (timing == MMC_TIMING_UHS_SDR12)
1572 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1573 else if (timing == MMC_TIMING_UHS_SDR25)
1574 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1575 else if (timing == MMC_TIMING_UHS_SDR50)
1576 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1577 else if ((timing == MMC_TIMING_UHS_DDR50) ||
1578 (timing == MMC_TIMING_MMC_DDR52))
1579 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1580 else if (timing == MMC_TIMING_MMC_HS400)
1581 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1582 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1584 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1586 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1588 struct sdhci_host *host = mmc_priv(mmc);
1591 if (ios->power_mode == MMC_POWER_UNDEFINED)
1594 if (host->flags & SDHCI_DEVICE_DEAD) {
1595 if (!IS_ERR(mmc->supply.vmmc) &&
1596 ios->power_mode == MMC_POWER_OFF)
1597 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1602 * Reset the chip on each power off.
1603 * Should clear out any weird states.
1605 if (ios->power_mode == MMC_POWER_OFF) {
1606 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1610 if (host->version >= SDHCI_SPEC_300 &&
1611 (ios->power_mode == MMC_POWER_UP) &&
1612 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1613 sdhci_enable_preset_value(host, false);
1615 if (!ios->clock || ios->clock != host->clock) {
1616 host->ops->set_clock(host, ios->clock);
1617 host->clock = ios->clock;
1619 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1621 host->timeout_clk = host->mmc->actual_clock ?
1622 host->mmc->actual_clock / 1000 :
1624 host->mmc->max_busy_timeout =
1625 host->ops->get_max_timeout_count ?
1626 host->ops->get_max_timeout_count(host) :
1628 host->mmc->max_busy_timeout /= host->timeout_clk;
1632 if (host->ops->set_power)
1633 host->ops->set_power(host, ios->power_mode, ios->vdd);
1635 sdhci_set_power(host, ios->power_mode, ios->vdd);
1637 if (host->ops->platform_send_init_74_clocks)
1638 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1640 host->ops->set_bus_width(host, ios->bus_width);
1642 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1644 if ((ios->timing == MMC_TIMING_SD_HS ||
1645 ios->timing == MMC_TIMING_MMC_HS ||
1646 ios->timing == MMC_TIMING_MMC_HS400 ||
1647 ios->timing == MMC_TIMING_MMC_HS200 ||
1648 ios->timing == MMC_TIMING_MMC_DDR52 ||
1649 ios->timing == MMC_TIMING_UHS_SDR50 ||
1650 ios->timing == MMC_TIMING_UHS_SDR104 ||
1651 ios->timing == MMC_TIMING_UHS_DDR50 ||
1652 ios->timing == MMC_TIMING_UHS_SDR25)
1653 && !(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT))
1654 ctrl |= SDHCI_CTRL_HISPD;
1656 ctrl &= ~SDHCI_CTRL_HISPD;
1658 if (host->version >= SDHCI_SPEC_300) {
1661 if (!host->preset_enabled) {
1662 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1664 * We only need to set Driver Strength if the
1665 * preset value enable is not set.
1667 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1668 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1669 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1670 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1671 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1672 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1673 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1674 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1675 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1676 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1678 pr_warn("%s: invalid driver type, default to driver type B\n",
1680 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1683 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1686 * According to SDHC Spec v3.00, if the Preset Value
1687 * Enable in the Host Control 2 register is set, we
1688 * need to reset SD Clock Enable before changing High
1689 * Speed Enable to avoid generating clock gliches.
1692 /* Reset SD Clock Enable */
1693 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1694 clk &= ~SDHCI_CLOCK_CARD_EN;
1695 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1697 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1699 /* Re-enable SD Clock */
1700 host->ops->set_clock(host, host->clock);
1703 /* Reset SD Clock Enable */
1704 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1705 clk &= ~SDHCI_CLOCK_CARD_EN;
1706 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1708 host->ops->set_uhs_signaling(host, ios->timing);
1709 host->timing = ios->timing;
1711 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1712 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
1713 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1714 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1715 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1716 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1717 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1720 sdhci_enable_preset_value(host, true);
1721 preset = sdhci_get_preset_value(host);
1722 ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1723 >> SDHCI_PRESET_DRV_SHIFT;
1726 /* Re-enable SD Clock */
1727 host->ops->set_clock(host, host->clock);
1729 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1732 * Some (ENE) controllers go apeshit on some ios operation,
1733 * signalling timeout and CRC errors even on CMD0. Resetting
1734 * it on each ios seems to solve the problem.
1736 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1737 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1741 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1743 static int sdhci_get_cd(struct mmc_host *mmc)
1745 struct sdhci_host *host = mmc_priv(mmc);
1746 int gpio_cd = mmc_gpio_get_cd(mmc);
1748 if (host->flags & SDHCI_DEVICE_DEAD)
1751 /* If nonremovable, assume that the card is always present. */
1752 if (!mmc_card_is_removable(host->mmc))
1756 * Try slot gpio detect, if defined it take precedence
1757 * over build in controller functionality
1762 /* If polling, assume that the card is always present. */
1763 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1766 /* Host native card detect */
1767 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1770 static int sdhci_check_ro(struct sdhci_host *host)
1772 unsigned long flags;
1775 spin_lock_irqsave(&host->lock, flags);
1777 if (host->flags & SDHCI_DEVICE_DEAD)
1779 else if (host->ops->get_ro)
1780 is_readonly = host->ops->get_ro(host);
1782 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1783 & SDHCI_WRITE_PROTECT);
1785 spin_unlock_irqrestore(&host->lock, flags);
1787 /* This quirk needs to be replaced by a callback-function later */
1788 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1789 !is_readonly : is_readonly;
1792 #define SAMPLE_COUNT 5
1794 static int sdhci_get_ro(struct mmc_host *mmc)
1796 struct sdhci_host *host = mmc_priv(mmc);
1799 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1800 return sdhci_check_ro(host);
1803 for (i = 0; i < SAMPLE_COUNT; i++) {
1804 if (sdhci_check_ro(host)) {
1805 if (++ro_count > SAMPLE_COUNT / 2)
1813 static void sdhci_hw_reset(struct mmc_host *mmc)
1815 struct sdhci_host *host = mmc_priv(mmc);
1817 if (host->ops && host->ops->hw_reset)
1818 host->ops->hw_reset(host);
1821 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1823 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1825 host->ier |= SDHCI_INT_CARD_INT;
1827 host->ier &= ~SDHCI_INT_CARD_INT;
1829 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1830 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1835 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1837 struct sdhci_host *host = mmc_priv(mmc);
1838 unsigned long flags;
1841 pm_runtime_get_noresume(host->mmc->parent);
1843 spin_lock_irqsave(&host->lock, flags);
1845 host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1847 host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1849 sdhci_enable_sdio_irq_nolock(host, enable);
1850 spin_unlock_irqrestore(&host->lock, flags);
1853 pm_runtime_put_noidle(host->mmc->parent);
1855 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1857 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1858 struct mmc_ios *ios)
1860 struct sdhci_host *host = mmc_priv(mmc);
1865 * Signal Voltage Switching is only applicable for Host Controllers
1868 if (host->version < SDHCI_SPEC_300)
1871 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1873 switch (ios->signal_voltage) {
1874 case MMC_SIGNAL_VOLTAGE_330:
1875 if (!(host->flags & SDHCI_SIGNALING_330))
1877 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
1878 ctrl &= ~SDHCI_CTRL_VDD_180;
1879 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1881 if (!IS_ERR(mmc->supply.vqmmc)) {
1882 ret = mmc_regulator_set_vqmmc(mmc, ios);
1884 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
1890 usleep_range(5000, 5500);
1892 /* 3.3V regulator output should be stable within 5 ms */
1893 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1894 if (!(ctrl & SDHCI_CTRL_VDD_180))
1897 pr_warn("%s: 3.3V regulator output did not became stable\n",
1901 case MMC_SIGNAL_VOLTAGE_180:
1902 if (!(host->flags & SDHCI_SIGNALING_180))
1904 if (!IS_ERR(mmc->supply.vqmmc)) {
1905 ret = mmc_regulator_set_vqmmc(mmc, ios);
1907 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
1914 * Enable 1.8V Signal Enable in the Host Control2
1917 ctrl |= SDHCI_CTRL_VDD_180;
1918 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1920 /* Some controller need to do more when switching */
1921 if (host->ops->voltage_switch)
1922 host->ops->voltage_switch(host);
1924 /* 1.8V regulator output should be stable within 5 ms */
1925 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1926 if (ctrl & SDHCI_CTRL_VDD_180)
1929 pr_warn("%s: 1.8V regulator output did not became stable\n",
1933 case MMC_SIGNAL_VOLTAGE_120:
1934 if (!(host->flags & SDHCI_SIGNALING_120))
1936 if (!IS_ERR(mmc->supply.vqmmc)) {
1937 ret = mmc_regulator_set_vqmmc(mmc, ios);
1939 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
1946 /* No signal voltage switch required */
1950 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
1952 static int sdhci_card_busy(struct mmc_host *mmc)
1954 struct sdhci_host *host = mmc_priv(mmc);
1957 /* Check whether DAT[0] is 0 */
1958 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
1960 return !(present_state & SDHCI_DATA_0_LVL_MASK);
1963 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
1965 struct sdhci_host *host = mmc_priv(mmc);
1966 unsigned long flags;
1968 spin_lock_irqsave(&host->lock, flags);
1969 host->flags |= SDHCI_HS400_TUNING;
1970 spin_unlock_irqrestore(&host->lock, flags);
1975 static void sdhci_start_tuning(struct sdhci_host *host)
1979 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1980 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1981 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
1982 ctrl |= SDHCI_CTRL_TUNED_CLK;
1983 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
1986 * As per the Host Controller spec v3.00, tuning command
1987 * generates Buffer Read Ready interrupt, so enable that.
1989 * Note: The spec clearly says that when tuning sequence
1990 * is being performed, the controller does not generate
1991 * interrupts other than Buffer Read Ready interrupt. But
1992 * to make sure we don't hit a controller bug, we _only_
1993 * enable Buffer Read Ready interrupt here.
1995 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
1996 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
1999 static void sdhci_end_tuning(struct sdhci_host *host)
2001 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2002 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2005 static void sdhci_reset_tuning(struct sdhci_host *host)
2009 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2010 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2011 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2012 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2015 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2017 sdhci_reset_tuning(host);
2019 sdhci_do_reset(host, SDHCI_RESET_CMD);
2020 sdhci_do_reset(host, SDHCI_RESET_DATA);
2022 sdhci_end_tuning(host);
2024 mmc_abort_tuning(host->mmc, opcode);
2028 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2029 * tuning command does not have a data payload (or rather the hardware does it
2030 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2031 * interrupt setup is different to other commands and there is no timeout
2032 * interrupt so special handling is needed.
2034 static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2036 struct mmc_host *mmc = host->mmc;
2037 struct mmc_command cmd = {};
2038 struct mmc_request mrq = {};
2039 unsigned long flags;
2041 spin_lock_irqsave(&host->lock, flags);
2043 cmd.opcode = opcode;
2044 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2049 * In response to CMD19, the card sends 64 bytes of tuning
2050 * block to the Host Controller. So we set the block size
2053 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2054 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2055 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), SDHCI_BLOCK_SIZE);
2057 sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
2060 * The tuning block is sent by the card to the host controller.
2061 * So we set the TRNS_READ bit in the Transfer Mode register.
2062 * This also takes care of setting DMA Enable and Multi Block
2063 * Select in the same register to 0.
2065 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2067 sdhci_send_command(host, &cmd);
2071 sdhci_del_timer(host, &mrq);
2073 host->tuning_done = 0;
2076 spin_unlock_irqrestore(&host->lock, flags);
2078 /* Wait for Buffer Read Ready interrupt */
2079 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2080 msecs_to_jiffies(50));
2084 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2089 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2090 * of loops reaches 40 times.
2092 for (i = 0; i < MAX_TUNING_LOOP; i++) {
2095 sdhci_send_tuning(host, opcode);
2097 if (!host->tuning_done) {
2098 pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2099 mmc_hostname(host->mmc));
2100 sdhci_abort_tuning(host, opcode);
2104 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2105 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2106 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2107 return; /* Success! */
2111 /* Spec does not require a delay between tuning cycles */
2112 if (host->tuning_delay > 0)
2113 mdelay(host->tuning_delay);
2116 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2117 mmc_hostname(host->mmc));
2118 sdhci_reset_tuning(host);
2121 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2123 struct sdhci_host *host = mmc_priv(mmc);
2125 unsigned int tuning_count = 0;
2128 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2130 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2131 tuning_count = host->tuning_count;
2134 * The Host Controller needs tuning in case of SDR104 and DDR50
2135 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2136 * the Capabilities register.
2137 * If the Host Controller supports the HS200 mode then the
2138 * tuning function has to be executed.
2140 switch (host->timing) {
2141 /* HS400 tuning is done in HS200 mode */
2142 case MMC_TIMING_MMC_HS400:
2146 case MMC_TIMING_MMC_HS200:
2148 * Periodic re-tuning for HS400 is not expected to be needed, so
2155 case MMC_TIMING_UHS_SDR104:
2156 case MMC_TIMING_UHS_DDR50:
2159 case MMC_TIMING_UHS_SDR50:
2160 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2168 if (host->ops->platform_execute_tuning) {
2169 err = host->ops->platform_execute_tuning(host, opcode);
2173 host->mmc->retune_period = tuning_count;
2175 if (host->tuning_delay < 0)
2176 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2178 sdhci_start_tuning(host);
2180 __sdhci_execute_tuning(host, opcode);
2182 sdhci_end_tuning(host);
2184 host->flags &= ~SDHCI_HS400_TUNING;
2188 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2190 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2192 /* Host Controller v3.00 defines preset value registers */
2193 if (host->version < SDHCI_SPEC_300)
2197 * We only enable or disable Preset Value if they are not already
2198 * enabled or disabled respectively. Otherwise, we bail out.
2200 if (host->preset_enabled != enable) {
2201 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2204 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2206 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2208 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2211 host->flags |= SDHCI_PV_ENABLED;
2213 host->flags &= ~SDHCI_PV_ENABLED;
2215 host->preset_enabled = enable;
2219 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2222 struct sdhci_host *host = mmc_priv(mmc);
2223 struct mmc_data *data = mrq->data;
2225 if (data->host_cookie != COOKIE_UNMAPPED)
2226 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2227 mmc_get_dma_dir(data));
2229 data->host_cookie = COOKIE_UNMAPPED;
2232 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2234 struct sdhci_host *host = mmc_priv(mmc);
2236 mrq->data->host_cookie = COOKIE_UNMAPPED;
2238 if (host->flags & SDHCI_REQ_USE_DMA)
2239 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2242 static inline bool sdhci_has_requests(struct sdhci_host *host)
2244 return host->cmd || host->data_cmd;
2247 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2249 if (host->data_cmd) {
2250 host->data_cmd->error = err;
2251 sdhci_finish_mrq(host, host->data_cmd->mrq);
2255 host->cmd->error = err;
2256 sdhci_finish_mrq(host, host->cmd->mrq);
2260 static void sdhci_card_event(struct mmc_host *mmc)
2262 struct sdhci_host *host = mmc_priv(mmc);
2263 unsigned long flags;
2266 /* First check if client has provided their own card event */
2267 if (host->ops->card_event)
2268 host->ops->card_event(host);
2270 present = mmc->ops->get_cd(mmc);
2272 spin_lock_irqsave(&host->lock, flags);
2274 /* Check sdhci_has_requests() first in case we are runtime suspended */
2275 if (sdhci_has_requests(host) && !present) {
2276 pr_err("%s: Card removed during transfer!\n",
2277 mmc_hostname(host->mmc));
2278 pr_err("%s: Resetting controller.\n",
2279 mmc_hostname(host->mmc));
2281 sdhci_do_reset(host, SDHCI_RESET_CMD);
2282 sdhci_do_reset(host, SDHCI_RESET_DATA);
2284 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2287 spin_unlock_irqrestore(&host->lock, flags);
2290 static const struct mmc_host_ops sdhci_ops = {
2291 .request = sdhci_request,
2292 .post_req = sdhci_post_req,
2293 .pre_req = sdhci_pre_req,
2294 .set_ios = sdhci_set_ios,
2295 .get_cd = sdhci_get_cd,
2296 .get_ro = sdhci_get_ro,
2297 .hw_reset = sdhci_hw_reset,
2298 .enable_sdio_irq = sdhci_enable_sdio_irq,
2299 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2300 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2301 .execute_tuning = sdhci_execute_tuning,
2302 .card_event = sdhci_card_event,
2303 .card_busy = sdhci_card_busy,
2306 /*****************************************************************************\
2310 \*****************************************************************************/
2312 static bool sdhci_request_done(struct sdhci_host *host)
2314 unsigned long flags;
2315 struct mmc_request *mrq;
2318 spin_lock_irqsave(&host->lock, flags);
2320 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2321 mrq = host->mrqs_done[i];
2327 spin_unlock_irqrestore(&host->lock, flags);
2331 sdhci_del_timer(host, mrq);
2334 * Always unmap the data buffers if they were mapped by
2335 * sdhci_prepare_data() whenever we finish with a request.
2336 * This avoids leaking DMA mappings on error.
2338 if (host->flags & SDHCI_REQ_USE_DMA) {
2339 struct mmc_data *data = mrq->data;
2341 if (data && data->host_cookie == COOKIE_MAPPED) {
2342 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2343 mmc_get_dma_dir(data));
2344 data->host_cookie = COOKIE_UNMAPPED;
2349 * The controller needs a reset of internal state machines
2350 * upon error conditions.
2352 if (sdhci_needs_reset(host, mrq)) {
2354 * Do not finish until command and data lines are available for
2355 * reset. Note there can only be one other mrq, so it cannot
2356 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2357 * would both be null.
2359 if (host->cmd || host->data_cmd) {
2360 spin_unlock_irqrestore(&host->lock, flags);
2364 /* Some controllers need this kick or reset won't work here */
2365 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2366 /* This is to force an update */
2367 host->ops->set_clock(host, host->clock);
2369 /* Spec says we should do both at the same time, but Ricoh
2370 controllers do not like that. */
2371 sdhci_do_reset(host, SDHCI_RESET_CMD);
2372 sdhci_do_reset(host, SDHCI_RESET_DATA);
2374 host->pending_reset = false;
2377 if (!sdhci_has_requests(host))
2378 sdhci_led_deactivate(host);
2380 host->mrqs_done[i] = NULL;
2383 spin_unlock_irqrestore(&host->lock, flags);
2385 mmc_request_done(host->mmc, mrq);
2390 static void sdhci_tasklet_finish(unsigned long param)
2392 struct sdhci_host *host = (struct sdhci_host *)param;
2394 while (!sdhci_request_done(host))
2398 static void sdhci_timeout_timer(unsigned long data)
2400 struct sdhci_host *host;
2401 unsigned long flags;
2403 host = (struct sdhci_host*)data;
2405 spin_lock_irqsave(&host->lock, flags);
2407 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2408 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2409 mmc_hostname(host->mmc));
2410 sdhci_dumpregs(host);
2412 host->cmd->error = -ETIMEDOUT;
2413 sdhci_finish_mrq(host, host->cmd->mrq);
2417 spin_unlock_irqrestore(&host->lock, flags);
2420 static void sdhci_timeout_data_timer(unsigned long data)
2422 struct sdhci_host *host;
2423 unsigned long flags;
2425 host = (struct sdhci_host *)data;
2427 spin_lock_irqsave(&host->lock, flags);
2429 if (host->data || host->data_cmd ||
2430 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2431 pr_err("%s: Timeout waiting for hardware interrupt.\n",
2432 mmc_hostname(host->mmc));
2433 sdhci_dumpregs(host);
2436 host->data->error = -ETIMEDOUT;
2437 sdhci_finish_data(host);
2438 } else if (host->data_cmd) {
2439 host->data_cmd->error = -ETIMEDOUT;
2440 sdhci_finish_mrq(host, host->data_cmd->mrq);
2442 host->cmd->error = -ETIMEDOUT;
2443 sdhci_finish_mrq(host, host->cmd->mrq);
2448 spin_unlock_irqrestore(&host->lock, flags);
2451 /*****************************************************************************\
2453 * Interrupt handling *
2455 \*****************************************************************************/
2457 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2461 * SDHCI recovers from errors by resetting the cmd and data
2462 * circuits. Until that is done, there very well might be more
2463 * interrupts, so ignore them in that case.
2465 if (host->pending_reset)
2467 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2468 mmc_hostname(host->mmc), (unsigned)intmask);
2469 sdhci_dumpregs(host);
2473 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2474 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2475 if (intmask & SDHCI_INT_TIMEOUT)
2476 host->cmd->error = -ETIMEDOUT;
2478 host->cmd->error = -EILSEQ;
2481 * If this command initiates a data phase and a response
2482 * CRC error is signalled, the card can start transferring
2483 * data - the card may have received the command without
2484 * error. We must not terminate the mmc_request early.
2486 * If the card did not receive the command or returned an
2487 * error which prevented it sending data, the data phase
2490 if (host->cmd->data &&
2491 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2497 sdhci_finish_mrq(host, host->cmd->mrq);
2501 if (intmask & SDHCI_INT_RESPONSE)
2502 sdhci_finish_command(host);
2505 #ifdef CONFIG_MMC_DEBUG
2506 static void sdhci_adma_show_error(struct sdhci_host *host)
2508 void *desc = host->adma_table;
2510 sdhci_dumpregs(host);
2513 struct sdhci_adma2_64_desc *dma_desc = desc;
2515 if (host->flags & SDHCI_USE_64_BIT_DMA)
2516 DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2517 desc, le32_to_cpu(dma_desc->addr_hi),
2518 le32_to_cpu(dma_desc->addr_lo),
2519 le16_to_cpu(dma_desc->len),
2520 le16_to_cpu(dma_desc->cmd));
2522 DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2523 desc, le32_to_cpu(dma_desc->addr_lo),
2524 le16_to_cpu(dma_desc->len),
2525 le16_to_cpu(dma_desc->cmd));
2527 desc += host->desc_sz;
2529 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2534 static void sdhci_adma_show_error(struct sdhci_host *host) { }
2537 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2541 /* CMD19 generates _only_ Buffer Read Ready interrupt */
2542 if (intmask & SDHCI_INT_DATA_AVAIL) {
2543 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2544 if (command == MMC_SEND_TUNING_BLOCK ||
2545 command == MMC_SEND_TUNING_BLOCK_HS200) {
2546 host->tuning_done = 1;
2547 wake_up(&host->buf_ready_int);
2553 struct mmc_command *data_cmd = host->data_cmd;
2556 * The "data complete" interrupt is also used to
2557 * indicate that a busy state has ended. See comment
2558 * above in sdhci_cmd_irq().
2560 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2561 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2562 host->data_cmd = NULL;
2563 data_cmd->error = -ETIMEDOUT;
2564 sdhci_finish_mrq(host, data_cmd->mrq);
2567 if (intmask & SDHCI_INT_DATA_END) {
2568 host->data_cmd = NULL;
2570 * Some cards handle busy-end interrupt
2571 * before the command completed, so make
2572 * sure we do things in the proper order.
2574 if (host->cmd == data_cmd)
2577 sdhci_finish_mrq(host, data_cmd->mrq);
2583 * SDHCI recovers from errors by resetting the cmd and data
2584 * circuits. Until that is done, there very well might be more
2585 * interrupts, so ignore them in that case.
2587 if (host->pending_reset)
2590 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2591 mmc_hostname(host->mmc), (unsigned)intmask);
2592 sdhci_dumpregs(host);
2597 if (intmask & SDHCI_INT_DATA_TIMEOUT)
2598 host->data->error = -ETIMEDOUT;
2599 else if (intmask & SDHCI_INT_DATA_END_BIT)
2600 host->data->error = -EILSEQ;
2601 else if ((intmask & SDHCI_INT_DATA_CRC) &&
2602 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2604 host->data->error = -EILSEQ;
2605 else if (intmask & SDHCI_INT_ADMA_ERROR) {
2606 pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2607 sdhci_adma_show_error(host);
2608 host->data->error = -EIO;
2609 if (host->ops->adma_workaround)
2610 host->ops->adma_workaround(host, intmask);
2613 if (host->data->error)
2614 sdhci_finish_data(host);
2616 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2617 sdhci_transfer_pio(host);
2620 * We currently don't do anything fancy with DMA
2621 * boundaries, but as we can't disable the feature
2622 * we need to at least restart the transfer.
2624 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2625 * should return a valid address to continue from, but as
2626 * some controllers are faulty, don't trust them.
2628 if (intmask & SDHCI_INT_DMA_END) {
2629 u32 dmastart, dmanow;
2630 dmastart = sg_dma_address(host->data->sg);
2631 dmanow = dmastart + host->data->bytes_xfered;
2633 * Force update to the next DMA block boundary.
2636 ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2637 SDHCI_DEFAULT_BOUNDARY_SIZE;
2638 host->data->bytes_xfered = dmanow - dmastart;
2639 DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2640 dmastart, host->data->bytes_xfered, dmanow);
2641 sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2644 if (intmask & SDHCI_INT_DATA_END) {
2645 if (host->cmd == host->data_cmd) {
2647 * Data managed to finish before the
2648 * command completed. Make sure we do
2649 * things in the proper order.
2651 host->data_early = 1;
2653 sdhci_finish_data(host);
2659 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2661 irqreturn_t result = IRQ_NONE;
2662 struct sdhci_host *host = dev_id;
2663 u32 intmask, mask, unexpected = 0;
2666 spin_lock(&host->lock);
2668 if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2669 spin_unlock(&host->lock);
2673 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2674 if (!intmask || intmask == 0xffffffff) {
2680 DBG("IRQ status 0x%08x\n", intmask);
2682 if (host->ops->irq) {
2683 intmask = host->ops->irq(host, intmask);
2688 /* Clear selected interrupts. */
2689 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2690 SDHCI_INT_BUS_POWER);
2691 sdhci_writel(host, mask, SDHCI_INT_STATUS);
2693 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2694 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2698 * There is a observation on i.mx esdhc. INSERT
2699 * bit will be immediately set again when it gets
2700 * cleared, if a card is inserted. We have to mask
2701 * the irq to prevent interrupt storm which will
2702 * freeze the system. And the REMOVE gets the
2705 * More testing are needed here to ensure it works
2706 * for other platforms though.
2708 host->ier &= ~(SDHCI_INT_CARD_INSERT |
2709 SDHCI_INT_CARD_REMOVE);
2710 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2711 SDHCI_INT_CARD_INSERT;
2712 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2713 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2715 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2716 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2718 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2719 SDHCI_INT_CARD_REMOVE);
2720 result = IRQ_WAKE_THREAD;
2723 if (intmask & SDHCI_INT_CMD_MASK)
2724 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2726 if (intmask & SDHCI_INT_DATA_MASK)
2727 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2729 if (intmask & SDHCI_INT_BUS_POWER)
2730 pr_err("%s: Card is consuming too much power!\n",
2731 mmc_hostname(host->mmc));
2733 if (intmask & SDHCI_INT_RETUNE)
2734 mmc_retune_needed(host->mmc);
2736 if ((intmask & SDHCI_INT_CARD_INT) &&
2737 (host->ier & SDHCI_INT_CARD_INT)) {
2738 sdhci_enable_sdio_irq_nolock(host, false);
2739 host->thread_isr |= SDHCI_INT_CARD_INT;
2740 result = IRQ_WAKE_THREAD;
2743 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2744 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2745 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2746 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2749 unexpected |= intmask;
2750 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2753 if (result == IRQ_NONE)
2754 result = IRQ_HANDLED;
2756 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2757 } while (intmask && --max_loops);
2759 spin_unlock(&host->lock);
2762 pr_err("%s: Unexpected interrupt 0x%08x.\n",
2763 mmc_hostname(host->mmc), unexpected);
2764 sdhci_dumpregs(host);
2770 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2772 struct sdhci_host *host = dev_id;
2773 unsigned long flags;
2776 spin_lock_irqsave(&host->lock, flags);
2777 isr = host->thread_isr;
2778 host->thread_isr = 0;
2779 spin_unlock_irqrestore(&host->lock, flags);
2781 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2782 struct mmc_host *mmc = host->mmc;
2784 mmc->ops->card_event(mmc);
2785 mmc_detect_change(mmc, msecs_to_jiffies(200));
2788 if (isr & SDHCI_INT_CARD_INT) {
2789 sdio_run_irqs(host->mmc);
2791 spin_lock_irqsave(&host->lock, flags);
2792 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2793 sdhci_enable_sdio_irq_nolock(host, true);
2794 spin_unlock_irqrestore(&host->lock, flags);
2797 return isr ? IRQ_HANDLED : IRQ_NONE;
2800 /*****************************************************************************\
2804 \*****************************************************************************/
2808 * To enable wakeup events, the corresponding events have to be enabled in
2809 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2810 * Table' in the SD Host Controller Standard Specification.
2811 * It is useless to restore SDHCI_INT_ENABLE state in
2812 * sdhci_disable_irq_wakeups() since it will be set by
2813 * sdhci_enable_card_detection() or sdhci_init().
2815 void sdhci_enable_irq_wakeups(struct sdhci_host *host)
2818 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2819 | SDHCI_WAKE_ON_INT;
2820 u32 irq_val = SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2823 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2825 /* Avoid fake wake up */
2826 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) {
2827 val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE);
2828 irq_val &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
2830 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2831 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
2833 EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
2835 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
2838 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
2839 | SDHCI_WAKE_ON_INT;
2841 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
2843 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
2846 int sdhci_suspend_host(struct sdhci_host *host)
2848 sdhci_disable_card_detection(host);
2850 mmc_retune_timer_stop(host->mmc);
2852 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2854 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
2855 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2856 free_irq(host->irq, host);
2858 sdhci_enable_irq_wakeups(host);
2859 enable_irq_wake(host->irq);
2864 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
2866 int sdhci_resume_host(struct sdhci_host *host)
2868 struct mmc_host *mmc = host->mmc;
2871 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2872 if (host->ops->enable_dma)
2873 host->ops->enable_dma(host);
2876 if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
2877 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
2878 /* Card keeps power but host controller does not */
2879 sdhci_init(host, 0);
2882 mmc->ops->set_ios(mmc, &mmc->ios);
2884 sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
2888 if (!device_may_wakeup(mmc_dev(host->mmc))) {
2889 ret = request_threaded_irq(host->irq, sdhci_irq,
2890 sdhci_thread_irq, IRQF_SHARED,
2891 mmc_hostname(host->mmc), host);
2895 sdhci_disable_irq_wakeups(host);
2896 disable_irq_wake(host->irq);
2899 sdhci_enable_card_detection(host);
2904 EXPORT_SYMBOL_GPL(sdhci_resume_host);
2906 int sdhci_runtime_suspend_host(struct sdhci_host *host)
2908 unsigned long flags;
2910 mmc_retune_timer_stop(host->mmc);
2912 spin_lock_irqsave(&host->lock, flags);
2913 host->ier &= SDHCI_INT_CARD_INT;
2914 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2915 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2916 spin_unlock_irqrestore(&host->lock, flags);
2918 synchronize_hardirq(host->irq);
2920 spin_lock_irqsave(&host->lock, flags);
2921 host->runtime_suspended = true;
2922 spin_unlock_irqrestore(&host->lock, flags);
2926 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
2928 int sdhci_runtime_resume_host(struct sdhci_host *host)
2930 struct mmc_host *mmc = host->mmc;
2931 unsigned long flags;
2932 int host_flags = host->flags;
2934 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
2935 if (host->ops->enable_dma)
2936 host->ops->enable_dma(host);
2939 sdhci_init(host, 0);
2941 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED) {
2942 /* Force clock and power re-program */
2945 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
2946 mmc->ops->set_ios(mmc, &mmc->ios);
2948 if ((host_flags & SDHCI_PV_ENABLED) &&
2949 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
2950 spin_lock_irqsave(&host->lock, flags);
2951 sdhci_enable_preset_value(host, true);
2952 spin_unlock_irqrestore(&host->lock, flags);
2955 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
2956 mmc->ops->hs400_enhanced_strobe)
2957 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
2960 spin_lock_irqsave(&host->lock, flags);
2962 host->runtime_suspended = false;
2964 /* Enable SDIO IRQ */
2965 if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2966 sdhci_enable_sdio_irq_nolock(host, true);
2968 /* Enable Card Detection */
2969 sdhci_enable_card_detection(host);
2971 spin_unlock_irqrestore(&host->lock, flags);
2975 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
2977 #endif /* CONFIG_PM */
2979 /*****************************************************************************\
2981 * Command Queue Engine (CQE) helpers *
2983 \*****************************************************************************/
2985 void sdhci_cqe_enable(struct mmc_host *mmc)
2987 struct sdhci_host *host = mmc_priv(mmc);
2988 unsigned long flags;
2991 spin_lock_irqsave(&host->lock, flags);
2993 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2994 ctrl &= ~SDHCI_CTRL_DMA_MASK;
2995 if (host->flags & SDHCI_USE_64_BIT_DMA)
2996 ctrl |= SDHCI_CTRL_ADMA64;
2998 ctrl |= SDHCI_CTRL_ADMA32;
2999 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3001 sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, 512),
3004 /* Set maximum timeout */
3005 sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3007 host->ier = host->cqe_ier;
3009 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3010 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3012 host->cqe_on = true;
3014 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3015 mmc_hostname(mmc), host->ier,
3016 sdhci_readl(host, SDHCI_INT_STATUS));
3019 spin_unlock_irqrestore(&host->lock, flags);
3021 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3023 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3025 struct sdhci_host *host = mmc_priv(mmc);
3026 unsigned long flags;
3028 spin_lock_irqsave(&host->lock, flags);
3030 sdhci_set_default_irqs(host);
3032 host->cqe_on = false;
3035 sdhci_do_reset(host, SDHCI_RESET_CMD);
3036 sdhci_do_reset(host, SDHCI_RESET_DATA);
3039 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3040 mmc_hostname(mmc), host->ier,
3041 sdhci_readl(host, SDHCI_INT_STATUS));
3044 spin_unlock_irqrestore(&host->lock, flags);
3046 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3048 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3056 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3057 *cmd_error = -EILSEQ;
3058 else if (intmask & SDHCI_INT_TIMEOUT)
3059 *cmd_error = -ETIMEDOUT;
3063 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3064 *data_error = -EILSEQ;
3065 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3066 *data_error = -ETIMEDOUT;
3067 else if (intmask & SDHCI_INT_ADMA_ERROR)
3072 /* Clear selected interrupts. */
3073 mask = intmask & host->cqe_ier;
3074 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3076 if (intmask & SDHCI_INT_BUS_POWER)
3077 pr_err("%s: Card is consuming too much power!\n",
3078 mmc_hostname(host->mmc));
3080 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3082 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3083 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3084 mmc_hostname(host->mmc), intmask);
3085 sdhci_dumpregs(host);
3090 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3092 /*****************************************************************************\
3094 * Device allocation/registration *
3096 \*****************************************************************************/
3098 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3101 struct mmc_host *mmc;
3102 struct sdhci_host *host;
3104 WARN_ON(dev == NULL);
3106 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3108 return ERR_PTR(-ENOMEM);
3110 host = mmc_priv(mmc);
3112 host->mmc_host_ops = sdhci_ops;
3113 mmc->ops = &host->mmc_host_ops;
3115 host->flags = SDHCI_SIGNALING_330;
3117 host->cqe_ier = SDHCI_CQE_INT_MASK;
3118 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3120 host->tuning_delay = -1;
3125 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3127 static int sdhci_set_dma_mask(struct sdhci_host *host)
3129 struct mmc_host *mmc = host->mmc;
3130 struct device *dev = mmc_dev(mmc);
3133 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3134 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3136 /* Try 64-bit mask if hardware is capable of it */
3137 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3138 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3140 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3142 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3146 /* 32-bit mask as default & fallback */
3148 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3150 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3157 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3160 u64 dt_caps_mask = 0;
3163 if (host->read_caps)
3166 host->read_caps = true;
3169 host->quirks = debug_quirks;
3172 host->quirks2 = debug_quirks2;
3174 sdhci_do_reset(host, SDHCI_RESET_ALL);
3176 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3177 "sdhci-caps-mask", &dt_caps_mask);
3178 of_property_read_u64(mmc_dev(host->mmc)->of_node,
3179 "sdhci-caps", &dt_caps);
3181 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3182 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3184 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3190 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3191 host->caps &= ~lower_32_bits(dt_caps_mask);
3192 host->caps |= lower_32_bits(dt_caps);
3195 if (host->version < SDHCI_SPEC_300)
3199 host->caps1 = *caps1;
3201 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3202 host->caps1 &= ~upper_32_bits(dt_caps_mask);
3203 host->caps1 |= upper_32_bits(dt_caps);
3206 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3208 int sdhci_setup_host(struct sdhci_host *host)
3210 struct mmc_host *mmc;
3211 u32 max_current_caps;
3212 unsigned int ocr_avail;
3213 unsigned int override_timeout_clk;
3217 WARN_ON(host == NULL);
3224 * If there are external regulators, get them. Note this must be done
3225 * early before resetting the host and reading the capabilities so that
3226 * the host can take the appropriate action if regulators are not
3229 ret = mmc_regulator_get_supply(mmc);
3230 if (ret == -EPROBE_DEFER)
3233 sdhci_read_caps(host);
3235 override_timeout_clk = host->timeout_clk;
3237 if (host->version > SDHCI_SPEC_300) {
3238 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3239 mmc_hostname(mmc), host->version);
3242 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3243 host->flags |= SDHCI_USE_SDMA;
3244 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3245 DBG("Controller doesn't have SDMA capability\n");
3247 host->flags |= SDHCI_USE_SDMA;
3249 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3250 (host->flags & SDHCI_USE_SDMA)) {
3251 DBG("Disabling DMA as it is marked broken\n");
3252 host->flags &= ~SDHCI_USE_SDMA;
3255 if ((host->version >= SDHCI_SPEC_200) &&
3256 (host->caps & SDHCI_CAN_DO_ADMA2))
3257 host->flags |= SDHCI_USE_ADMA;
3259 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3260 (host->flags & SDHCI_USE_ADMA)) {
3261 DBG("Disabling ADMA as it is marked broken\n");
3262 host->flags &= ~SDHCI_USE_ADMA;
3266 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3267 * and *must* do 64-bit DMA. A driver has the opportunity to change
3268 * that during the first call to ->enable_dma(). Similarly
3269 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3272 if (host->caps & SDHCI_CAN_64BIT)
3273 host->flags |= SDHCI_USE_64_BIT_DMA;
3275 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3276 ret = sdhci_set_dma_mask(host);
3278 if (!ret && host->ops->enable_dma)
3279 ret = host->ops->enable_dma(host);
3282 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3284 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3290 /* SDMA does not support 64-bit DMA */
3291 if (host->flags & SDHCI_USE_64_BIT_DMA)
3292 host->flags &= ~SDHCI_USE_SDMA;
3294 if (host->flags & SDHCI_USE_ADMA) {
3299 * The DMA descriptor table size is calculated as the maximum
3300 * number of segments times 2, to allow for an alignment
3301 * descriptor for each segment, plus 1 for a nop end descriptor,
3302 * all multipled by the descriptor size.
3304 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3305 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3306 SDHCI_ADMA2_64_DESC_SZ;
3307 host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3309 host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3310 SDHCI_ADMA2_32_DESC_SZ;
3311 host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3314 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3315 buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3316 host->adma_table_sz, &dma, GFP_KERNEL);
3318 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3320 host->flags &= ~SDHCI_USE_ADMA;
3321 } else if ((dma + host->align_buffer_sz) &
3322 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3323 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3325 host->flags &= ~SDHCI_USE_ADMA;
3326 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3327 host->adma_table_sz, buf, dma);
3329 host->align_buffer = buf;
3330 host->align_addr = dma;
3332 host->adma_table = buf + host->align_buffer_sz;
3333 host->adma_addr = dma + host->align_buffer_sz;
3338 * If we use DMA, then it's up to the caller to set the DMA
3339 * mask, but PIO does not need the hw shim so we set a new
3340 * mask here in that case.
3342 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3343 host->dma_mask = DMA_BIT_MASK(64);
3344 mmc_dev(mmc)->dma_mask = &host->dma_mask;
3347 if (host->version >= SDHCI_SPEC_300)
3348 host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3349 >> SDHCI_CLOCK_BASE_SHIFT;
3351 host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3352 >> SDHCI_CLOCK_BASE_SHIFT;
3354 host->max_clk *= 1000000;
3355 if (host->max_clk == 0 || host->quirks &
3356 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3357 if (!host->ops->get_max_clock) {
3358 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3363 host->max_clk = host->ops->get_max_clock(host);
3367 * In case of Host Controller v3.00, find out whether clock
3368 * multiplier is supported.
3370 host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3371 SDHCI_CLOCK_MUL_SHIFT;
3374 * In case the value in Clock Multiplier is 0, then programmable
3375 * clock mode is not supported, otherwise the actual clock
3376 * multiplier is one more than the value of Clock Multiplier
3377 * in the Capabilities Register.
3383 * Set host parameters.
3385 max_clk = host->max_clk;
3387 if (host->ops->get_min_clock)
3388 mmc->f_min = host->ops->get_min_clock(host);
3389 else if (host->version >= SDHCI_SPEC_300) {
3390 if (host->clk_mul) {
3391 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3392 max_clk = host->max_clk * host->clk_mul;
3394 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3396 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3398 if (!mmc->f_max || mmc->f_max > max_clk)
3399 mmc->f_max = max_clk;
3401 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3402 host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3403 SDHCI_TIMEOUT_CLK_SHIFT;
3405 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3406 host->timeout_clk *= 1000;
3408 if (host->timeout_clk == 0) {
3409 if (!host->ops->get_timeout_clock) {
3410 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3417 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3421 if (override_timeout_clk)
3422 host->timeout_clk = override_timeout_clk;
3424 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3425 host->ops->get_max_timeout_count(host) : 1 << 27;
3426 mmc->max_busy_timeout /= host->timeout_clk;
3429 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3430 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3432 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3433 host->flags |= SDHCI_AUTO_CMD12;
3435 /* Auto-CMD23 stuff only works in ADMA or PIO. */
3436 if ((host->version >= SDHCI_SPEC_300) &&
3437 ((host->flags & SDHCI_USE_ADMA) ||
3438 !(host->flags & SDHCI_USE_SDMA)) &&
3439 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3440 host->flags |= SDHCI_AUTO_CMD23;
3441 DBG("Auto-CMD23 available\n");
3443 DBG("Auto-CMD23 unavailable\n");
3447 * A controller may support 8-bit width, but the board itself
3448 * might not have the pins brought out. Boards that support
3449 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3450 * their platform code before calling sdhci_add_host(), and we
3451 * won't assume 8-bit width for hosts without that CAP.
3453 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3454 mmc->caps |= MMC_CAP_4_BIT_DATA;
3456 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3457 mmc->caps &= ~MMC_CAP_CMD23;
3459 if (host->caps & SDHCI_CAN_DO_HISPD)
3460 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3462 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3463 mmc_card_is_removable(mmc) &&
3464 mmc_gpio_get_cd(host->mmc) < 0)
3465 mmc->caps |= MMC_CAP_NEEDS_POLL;
3467 /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
3468 if (!IS_ERR(mmc->supply.vqmmc)) {
3469 ret = regulator_enable(mmc->supply.vqmmc);
3470 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3472 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3473 SDHCI_SUPPORT_SDR50 |
3474 SDHCI_SUPPORT_DDR50);
3476 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3477 mmc_hostname(mmc), ret);
3478 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3482 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3483 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3484 SDHCI_SUPPORT_DDR50);
3487 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3488 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3489 SDHCI_SUPPORT_DDR50))
3490 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3492 /* SDR104 supports also implies SDR50 support */
3493 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3494 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3495 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
3496 * field can be promoted to support HS200.
3498 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3499 mmc->caps2 |= MMC_CAP2_HS200;
3500 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3501 mmc->caps |= MMC_CAP_UHS_SDR50;
3504 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3505 (host->caps1 & SDHCI_SUPPORT_HS400))
3506 mmc->caps2 |= MMC_CAP2_HS400;
3508 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3509 (IS_ERR(mmc->supply.vqmmc) ||
3510 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3512 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3514 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3515 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3516 mmc->caps |= MMC_CAP_UHS_DDR50;
3518 /* Does the host need tuning for SDR50? */
3519 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3520 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3522 /* Driver Type(s) (A, C, D) supported by the host */
3523 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3524 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3525 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3526 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3527 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3528 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3530 /* Initial value for re-tuning timer count */
3531 host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3532 SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3535 * In case Re-tuning Timer is not disabled, the actual value of
3536 * re-tuning timer will be 2 ^ (n - 1).
3538 if (host->tuning_count)
3539 host->tuning_count = 1 << (host->tuning_count - 1);
3541 /* Re-tuning mode supported by the Host Controller */
3542 host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3543 SDHCI_RETUNING_MODE_SHIFT;
3548 * According to SD Host Controller spec v3.00, if the Host System
3549 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3550 * the value is meaningful only if Voltage Support in the Capabilities
3551 * register is set. The actual current value is 4 times the register
3554 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3555 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3556 int curr = regulator_get_current_limit(mmc->supply.vmmc);
3559 /* convert to SDHCI_MAX_CURRENT format */
3560 curr = curr/1000; /* convert to mA */
3561 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3563 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3565 (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3566 (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3567 (curr << SDHCI_MAX_CURRENT_180_SHIFT);
3571 if (host->caps & SDHCI_CAN_VDD_330) {
3572 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3574 mmc->max_current_330 = ((max_current_caps &
3575 SDHCI_MAX_CURRENT_330_MASK) >>
3576 SDHCI_MAX_CURRENT_330_SHIFT) *
3577 SDHCI_MAX_CURRENT_MULTIPLIER;
3579 if (host->caps & SDHCI_CAN_VDD_300) {
3580 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3582 mmc->max_current_300 = ((max_current_caps &
3583 SDHCI_MAX_CURRENT_300_MASK) >>
3584 SDHCI_MAX_CURRENT_300_SHIFT) *
3585 SDHCI_MAX_CURRENT_MULTIPLIER;
3587 if (host->caps & SDHCI_CAN_VDD_180) {
3588 ocr_avail |= MMC_VDD_165_195;
3590 mmc->max_current_180 = ((max_current_caps &
3591 SDHCI_MAX_CURRENT_180_MASK) >>
3592 SDHCI_MAX_CURRENT_180_SHIFT) *
3593 SDHCI_MAX_CURRENT_MULTIPLIER;
3596 /* If OCR set by host, use it instead. */
3598 ocr_avail = host->ocr_mask;
3600 /* If OCR set by external regulators, give it highest prio. */
3602 ocr_avail = mmc->ocr_avail;
3604 mmc->ocr_avail = ocr_avail;
3605 mmc->ocr_avail_sdio = ocr_avail;
3606 if (host->ocr_avail_sdio)
3607 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3608 mmc->ocr_avail_sd = ocr_avail;
3609 if (host->ocr_avail_sd)
3610 mmc->ocr_avail_sd &= host->ocr_avail_sd;
3611 else /* normal SD controllers don't support 1.8V */
3612 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3613 mmc->ocr_avail_mmc = ocr_avail;
3614 if (host->ocr_avail_mmc)
3615 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3617 if (mmc->ocr_avail == 0) {
3618 pr_err("%s: Hardware doesn't report any support voltages.\n",
3624 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3625 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3626 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3627 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3628 host->flags |= SDHCI_SIGNALING_180;
3630 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3631 host->flags |= SDHCI_SIGNALING_120;
3633 spin_lock_init(&host->lock);
3636 * Maximum number of segments. Depends on if the hardware
3637 * can do scatter/gather or not.
3639 if (host->flags & SDHCI_USE_ADMA)
3640 mmc->max_segs = SDHCI_MAX_SEGS;
3641 else if (host->flags & SDHCI_USE_SDMA)
3644 mmc->max_segs = SDHCI_MAX_SEGS;
3647 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3648 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3651 mmc->max_req_size = 524288;
3654 * Maximum segment size. Could be one segment with the maximum number
3655 * of bytes. When doing hardware scatter/gather, each entry cannot
3656 * be larger than 64 KiB though.
3658 if (host->flags & SDHCI_USE_ADMA) {
3659 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3660 mmc->max_seg_size = 65535;
3662 mmc->max_seg_size = 65536;
3664 mmc->max_seg_size = mmc->max_req_size;
3668 * Maximum block size. This varies from controller to controller and
3669 * is specified in the capabilities register.
3671 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3672 mmc->max_blk_size = 2;
3674 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3675 SDHCI_MAX_BLOCK_SHIFT;
3676 if (mmc->max_blk_size >= 3) {
3677 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3679 mmc->max_blk_size = 0;
3683 mmc->max_blk_size = 512 << mmc->max_blk_size;
3686 * Maximum block count.
3688 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3693 if (!IS_ERR(mmc->supply.vqmmc))
3694 regulator_disable(mmc->supply.vqmmc);
3696 if (host->align_buffer)
3697 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3698 host->adma_table_sz, host->align_buffer,
3700 host->adma_table = NULL;
3701 host->align_buffer = NULL;
3705 EXPORT_SYMBOL_GPL(sdhci_setup_host);
3707 void sdhci_cleanup_host(struct sdhci_host *host)
3709 struct mmc_host *mmc = host->mmc;
3711 if (!IS_ERR(mmc->supply.vqmmc))
3712 regulator_disable(mmc->supply.vqmmc);
3714 if (host->align_buffer)
3715 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3716 host->adma_table_sz, host->align_buffer,
3718 host->adma_table = NULL;
3719 host->align_buffer = NULL;
3721 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
3723 int __sdhci_add_host(struct sdhci_host *host)
3725 struct mmc_host *mmc = host->mmc;
3731 tasklet_init(&host->finish_tasklet,
3732 sdhci_tasklet_finish, (unsigned long)host);
3734 setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host);
3735 setup_timer(&host->data_timer, sdhci_timeout_data_timer,
3736 (unsigned long)host);
3738 init_waitqueue_head(&host->buf_ready_int);
3740 sdhci_init(host, 0);
3742 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
3743 IRQF_SHARED, mmc_hostname(mmc), host);
3745 pr_err("%s: Failed to request IRQ %d: %d\n",
3746 mmc_hostname(mmc), host->irq, ret);
3750 #ifdef CONFIG_MMC_DEBUG
3751 sdhci_dumpregs(host);
3754 ret = sdhci_led_register(host);
3756 pr_err("%s: Failed to register LED device: %d\n",
3757 mmc_hostname(mmc), ret);
3763 ret = mmc_add_host(mmc);
3767 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
3768 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
3769 (host->flags & SDHCI_USE_ADMA) ?
3770 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
3771 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
3773 sdhci_enable_card_detection(host);
3778 sdhci_led_unregister(host);
3780 sdhci_do_reset(host, SDHCI_RESET_ALL);
3781 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3782 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3783 free_irq(host->irq, host);
3785 tasklet_kill(&host->finish_tasklet);
3789 EXPORT_SYMBOL_GPL(__sdhci_add_host);
3791 int sdhci_add_host(struct sdhci_host *host)
3795 ret = sdhci_setup_host(host);
3799 ret = __sdhci_add_host(host);
3806 sdhci_cleanup_host(host);
3810 EXPORT_SYMBOL_GPL(sdhci_add_host);
3812 void sdhci_remove_host(struct sdhci_host *host, int dead)
3814 struct mmc_host *mmc = host->mmc;
3815 unsigned long flags;
3818 spin_lock_irqsave(&host->lock, flags);
3820 host->flags |= SDHCI_DEVICE_DEAD;
3822 if (sdhci_has_requests(host)) {
3823 pr_err("%s: Controller removed during "
3824 " transfer!\n", mmc_hostname(mmc));
3825 sdhci_error_out_mrqs(host, -ENOMEDIUM);
3828 spin_unlock_irqrestore(&host->lock, flags);
3831 sdhci_disable_card_detection(host);
3833 mmc_remove_host(mmc);
3835 sdhci_led_unregister(host);
3838 sdhci_do_reset(host, SDHCI_RESET_ALL);
3840 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3841 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3842 free_irq(host->irq, host);
3844 del_timer_sync(&host->timer);
3845 del_timer_sync(&host->data_timer);
3847 tasklet_kill(&host->finish_tasklet);
3849 if (!IS_ERR(mmc->supply.vqmmc))
3850 regulator_disable(mmc->supply.vqmmc);
3852 if (host->align_buffer)
3853 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3854 host->adma_table_sz, host->align_buffer,
3857 host->adma_table = NULL;
3858 host->align_buffer = NULL;
3861 EXPORT_SYMBOL_GPL(sdhci_remove_host);
3863 void sdhci_free_host(struct sdhci_host *host)
3865 mmc_free_host(host->mmc);
3868 EXPORT_SYMBOL_GPL(sdhci_free_host);
3870 /*****************************************************************************\
3872 * Driver init/exit *
3874 \*****************************************************************************/
3876 static int __init sdhci_drv_init(void)
3879 ": Secure Digital Host Controller Interface driver\n");
3880 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
3885 static void __exit sdhci_drv_exit(void)
3889 module_init(sdhci_drv_init);
3890 module_exit(sdhci_drv_exit);
3892 module_param(debug_quirks, uint, 0444);
3893 module_param(debug_quirks2, uint, 0444);
3895 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
3896 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
3897 MODULE_LICENSE("GPL");
3899 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
3900 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");