2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/suspend.h>
27 #include <linux/fault-inject.h>
28 #include <linux/random.h>
30 #include <linux/mmc/card.h>
31 #include <linux/mmc/host.h>
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/sd.h>
44 static struct workqueue_struct *workqueue;
47 * Enabling software CRCs on the data blocks can be a significant (30%)
48 * performance cost, and for other reasons may not always be desired.
49 * So we allow it it to be disabled.
52 module_param(use_spi_crc, bool, 0);
55 * We normally treat cards as removed during suspend if they are not
56 * known to be on a non-removable bus, to avoid the risk of writing
57 * back data to a different card after resume. Allow this to be
58 * overridden if necessary.
60 #ifdef CONFIG_MMC_UNSAFE_RESUME
61 bool mmc_assume_removable;
63 bool mmc_assume_removable = 1;
65 EXPORT_SYMBOL(mmc_assume_removable);
66 module_param_named(removable, mmc_assume_removable, bool, 0644);
69 "MMC/SD cards are removable and may be removed during suspend");
72 * Internal function. Schedule delayed work in the MMC work queue.
74 static int mmc_schedule_delayed_work(struct delayed_work *work,
77 return queue_delayed_work(workqueue, work, delay);
81 * Internal function. Flush all scheduled work from the MMC work queue.
83 static void mmc_flush_scheduled_work(void)
85 flush_workqueue(workqueue);
88 #ifdef CONFIG_FAIL_MMC_REQUEST
91 * Internal function. Inject random data errors.
92 * If mmc_data is NULL no errors are injected.
94 static void mmc_should_fail_request(struct mmc_host *host,
95 struct mmc_request *mrq)
97 struct mmc_command *cmd = mrq->cmd;
98 struct mmc_data *data = mrq->data;
99 static const int data_errors[] = {
108 if (cmd->error || data->error ||
109 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
112 data->error = data_errors[random32() % ARRAY_SIZE(data_errors)];
113 data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9;
116 #else /* CONFIG_FAIL_MMC_REQUEST */
118 static inline void mmc_should_fail_request(struct mmc_host *host,
119 struct mmc_request *mrq)
123 #endif /* CONFIG_FAIL_MMC_REQUEST */
126 * mmc_request_done - finish processing an MMC request
127 * @host: MMC host which completed request
128 * @mrq: MMC request which request
130 * MMC drivers should call this function when they have completed
131 * their processing of a request.
133 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
135 struct mmc_command *cmd = mrq->cmd;
136 int err = cmd->error;
138 if (err && cmd->retries && mmc_host_is_spi(host)) {
139 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
143 if (err && cmd->retries && !mmc_card_removed(host->card)) {
145 * Request starter must handle retries - see
146 * mmc_wait_for_req_done().
151 mmc_should_fail_request(host, mrq);
153 led_trigger_event(host->led, LED_OFF);
155 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
156 mmc_hostname(host), cmd->opcode, err,
157 cmd->resp[0], cmd->resp[1],
158 cmd->resp[2], cmd->resp[3]);
161 pr_debug("%s: %d bytes transferred: %d\n",
163 mrq->data->bytes_xfered, mrq->data->error);
167 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
168 mmc_hostname(host), mrq->stop->opcode,
170 mrq->stop->resp[0], mrq->stop->resp[1],
171 mrq->stop->resp[2], mrq->stop->resp[3]);
177 mmc_host_clk_release(host);
181 EXPORT_SYMBOL(mmc_request_done);
184 mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
186 #ifdef CONFIG_MMC_DEBUG
188 struct scatterlist *sg;
192 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
193 mmc_hostname(host), mrq->sbc->opcode,
194 mrq->sbc->arg, mrq->sbc->flags);
197 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
198 mmc_hostname(host), mrq->cmd->opcode,
199 mrq->cmd->arg, mrq->cmd->flags);
202 pr_debug("%s: blksz %d blocks %d flags %08x "
203 "tsac %d ms nsac %d\n",
204 mmc_hostname(host), mrq->data->blksz,
205 mrq->data->blocks, mrq->data->flags,
206 mrq->data->timeout_ns / 1000000,
207 mrq->data->timeout_clks);
211 pr_debug("%s: CMD%u arg %08x flags %08x\n",
212 mmc_hostname(host), mrq->stop->opcode,
213 mrq->stop->arg, mrq->stop->flags);
216 WARN_ON(!host->claimed);
221 BUG_ON(mrq->data->blksz > host->max_blk_size);
222 BUG_ON(mrq->data->blocks > host->max_blk_count);
223 BUG_ON(mrq->data->blocks * mrq->data->blksz >
226 #ifdef CONFIG_MMC_DEBUG
228 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
230 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
233 mrq->cmd->data = mrq->data;
234 mrq->data->error = 0;
235 mrq->data->mrq = mrq;
237 mrq->data->stop = mrq->stop;
238 mrq->stop->error = 0;
239 mrq->stop->mrq = mrq;
242 mmc_host_clk_hold(host);
243 led_trigger_event(host->led, LED_FULL);
244 host->ops->request(host, mrq);
247 static void mmc_wait_done(struct mmc_request *mrq)
249 complete(&mrq->completion);
252 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
254 init_completion(&mrq->completion);
255 mrq->done = mmc_wait_done;
256 if (mmc_card_removed(host->card)) {
257 mrq->cmd->error = -ENOMEDIUM;
258 complete(&mrq->completion);
261 mmc_start_request(host, mrq);
265 static void mmc_wait_for_req_done(struct mmc_host *host,
266 struct mmc_request *mrq)
268 struct mmc_command *cmd;
271 wait_for_completion(&mrq->completion);
274 if (!cmd->error || !cmd->retries ||
275 mmc_card_removed(host->card))
278 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
279 mmc_hostname(host), cmd->opcode, cmd->error);
282 host->ops->request(host, mrq);
287 * mmc_pre_req - Prepare for a new request
288 * @host: MMC host to prepare command
289 * @mrq: MMC request to prepare for
290 * @is_first_req: true if there is no previous started request
291 * that may run in parellel to this call, otherwise false
293 * mmc_pre_req() is called in prior to mmc_start_req() to let
294 * host prepare for the new request. Preparation of a request may be
295 * performed while another request is running on the host.
297 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
300 if (host->ops->pre_req) {
301 mmc_host_clk_hold(host);
302 host->ops->pre_req(host, mrq, is_first_req);
303 mmc_host_clk_release(host);
308 * mmc_post_req - Post process a completed request
309 * @host: MMC host to post process command
310 * @mrq: MMC request to post process for
311 * @err: Error, if non zero, clean up any resources made in pre_req
313 * Let the host post process a completed request. Post processing of
314 * a request may be performed while another reuqest is running.
316 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
319 if (host->ops->post_req) {
320 mmc_host_clk_hold(host);
321 host->ops->post_req(host, mrq, err);
322 mmc_host_clk_release(host);
327 * mmc_start_req - start a non-blocking request
328 * @host: MMC host to start command
329 * @areq: async request to start
330 * @error: out parameter returns 0 for success, otherwise non zero
332 * Start a new MMC custom command request for a host.
333 * If there is on ongoing async request wait for completion
334 * of that request and start the new one and return.
335 * Does not wait for the new request to complete.
337 * Returns the completed request, NULL in case of none completed.
338 * Wait for the an ongoing request (previoulsy started) to complete and
339 * return the completed request. If there is no ongoing request, NULL
340 * is returned without waiting. NULL is not an error condition.
342 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
343 struct mmc_async_req *areq, int *error)
347 struct mmc_async_req *data = host->areq;
349 /* Prepare a new request */
351 mmc_pre_req(host, areq->mrq, !host->areq);
354 mmc_wait_for_req_done(host, host->areq->mrq);
355 err = host->areq->err_check(host->card, host->areq);
359 start_err = __mmc_start_req(host, areq->mrq);
362 mmc_post_req(host, host->areq->mrq, 0);
364 /* Cancel a prepared request if it was not started. */
365 if ((err || start_err) && areq)
366 mmc_post_req(host, areq->mrq, -EINVAL);
377 EXPORT_SYMBOL(mmc_start_req);
380 * mmc_wait_for_req - start a request and wait for completion
381 * @host: MMC host to start command
382 * @mrq: MMC request to start
384 * Start a new MMC custom command request for a host, and wait
385 * for the command to complete. Does not attempt to parse the
388 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
390 __mmc_start_req(host, mrq);
391 mmc_wait_for_req_done(host, mrq);
393 EXPORT_SYMBOL(mmc_wait_for_req);
396 * mmc_interrupt_hpi - Issue for High priority Interrupt
397 * @card: the MMC card associated with the HPI transfer
399 * Issued High Priority Interrupt, and check for card status
400 * util out-of prg-state.
402 int mmc_interrupt_hpi(struct mmc_card *card)
409 if (!card->ext_csd.hpi_en) {
410 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
414 mmc_claim_host(card->host);
415 err = mmc_send_status(card, &status);
417 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
422 * If the card status is in PRG-state, we can send the HPI command.
424 if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
427 * We don't know when the HPI command will finish
428 * processing, so we need to resend HPI until out
429 * of prg-state, and keep checking the card status
430 * with SEND_STATUS. If a timeout error occurs when
431 * sending the HPI command, we are already out of
434 err = mmc_send_hpi_cmd(card, &status);
436 pr_debug("%s: abort HPI (%d error)\n",
437 mmc_hostname(card->host), err);
439 err = mmc_send_status(card, &status);
442 } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
444 pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
447 mmc_release_host(card->host);
450 EXPORT_SYMBOL(mmc_interrupt_hpi);
453 * mmc_wait_for_cmd - start a command and wait for completion
454 * @host: MMC host to start command
455 * @cmd: MMC command to start
456 * @retries: maximum number of retries
458 * Start a new MMC command for a host, and wait for the command
459 * to complete. Return any error that occurred while the command
460 * was executing. Do not attempt to parse the response.
462 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
464 struct mmc_request mrq = {NULL};
466 WARN_ON(!host->claimed);
468 memset(cmd->resp, 0, sizeof(cmd->resp));
469 cmd->retries = retries;
474 mmc_wait_for_req(host, &mrq);
479 EXPORT_SYMBOL(mmc_wait_for_cmd);
482 * mmc_set_data_timeout - set the timeout for a data command
483 * @data: data phase for command
484 * @card: the MMC card associated with the data transfer
486 * Computes the data timeout parameters according to the
487 * correct algorithm given the card type.
489 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
494 * SDIO cards only define an upper 1 s limit on access.
496 if (mmc_card_sdio(card)) {
497 data->timeout_ns = 1000000000;
498 data->timeout_clks = 0;
503 * SD cards use a 100 multiplier rather than 10
505 mult = mmc_card_sd(card) ? 100 : 10;
508 * Scale up the multiplier (and therefore the timeout) by
509 * the r2w factor for writes.
511 if (data->flags & MMC_DATA_WRITE)
512 mult <<= card->csd.r2w_factor;
514 data->timeout_ns = card->csd.tacc_ns * mult;
515 data->timeout_clks = card->csd.tacc_clks * mult;
518 * SD cards also have an upper limit on the timeout.
520 if (mmc_card_sd(card)) {
521 unsigned int timeout_us, limit_us;
523 timeout_us = data->timeout_ns / 1000;
524 if (mmc_host_clk_rate(card->host))
525 timeout_us += data->timeout_clks * 1000 /
526 (mmc_host_clk_rate(card->host) / 1000);
528 if (data->flags & MMC_DATA_WRITE)
530 * The MMC spec "It is strongly recommended
531 * for hosts to implement more than 500ms
532 * timeout value even if the card indicates
533 * the 250ms maximum busy length." Even the
534 * previous value of 300ms is known to be
535 * insufficient for some cards.
542 * SDHC cards always use these fixed values.
544 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
545 data->timeout_ns = limit_us * 1000;
546 data->timeout_clks = 0;
551 * Some cards require longer data read timeout than indicated in CSD.
552 * Address this by setting the read timeout to a "reasonably high"
553 * value. For the cards tested, 300ms has proven enough. If necessary,
554 * this value can be increased if other problematic cards require this.
556 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
557 data->timeout_ns = 300000000;
558 data->timeout_clks = 0;
562 * Some cards need very high timeouts if driven in SPI mode.
563 * The worst observed timeout was 900ms after writing a
564 * continuous stream of data until the internal logic
567 if (mmc_host_is_spi(card->host)) {
568 if (data->flags & MMC_DATA_WRITE) {
569 if (data->timeout_ns < 1000000000)
570 data->timeout_ns = 1000000000; /* 1s */
572 if (data->timeout_ns < 100000000)
573 data->timeout_ns = 100000000; /* 100ms */
577 EXPORT_SYMBOL(mmc_set_data_timeout);
580 * mmc_align_data_size - pads a transfer size to a more optimal value
581 * @card: the MMC card associated with the data transfer
582 * @sz: original transfer size
584 * Pads the original data size with a number of extra bytes in
585 * order to avoid controller bugs and/or performance hits
586 * (e.g. some controllers revert to PIO for certain sizes).
588 * Returns the improved size, which might be unmodified.
590 * Note that this function is only relevant when issuing a
591 * single scatter gather entry.
593 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
596 * FIXME: We don't have a system for the controller to tell
597 * the core about its problems yet, so for now we just 32-bit
600 sz = ((sz + 3) / 4) * 4;
604 EXPORT_SYMBOL(mmc_align_data_size);
607 * __mmc_claim_host - exclusively claim a host
608 * @host: mmc host to claim
609 * @abort: whether or not the operation should be aborted
611 * Claim a host for a set of operations. If @abort is non null and
612 * dereference a non-zero value then this will return prematurely with
613 * that non-zero value without acquiring the lock. Returns zero
614 * with the lock held otherwise.
616 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
618 DECLARE_WAITQUEUE(wait, current);
624 add_wait_queue(&host->wq, &wait);
625 spin_lock_irqsave(&host->lock, flags);
627 set_current_state(TASK_UNINTERRUPTIBLE);
628 stop = abort ? atomic_read(abort) : 0;
629 if (stop || !host->claimed || host->claimer == current)
631 spin_unlock_irqrestore(&host->lock, flags);
633 spin_lock_irqsave(&host->lock, flags);
635 set_current_state(TASK_RUNNING);
638 host->claimer = current;
639 host->claim_cnt += 1;
642 spin_unlock_irqrestore(&host->lock, flags);
643 remove_wait_queue(&host->wq, &wait);
644 if (host->ops->enable && !stop && host->claim_cnt == 1)
645 host->ops->enable(host);
649 EXPORT_SYMBOL(__mmc_claim_host);
652 * mmc_try_claim_host - try exclusively to claim a host
653 * @host: mmc host to claim
655 * Returns %1 if the host is claimed, %0 otherwise.
657 int mmc_try_claim_host(struct mmc_host *host)
659 int claimed_host = 0;
662 spin_lock_irqsave(&host->lock, flags);
663 if (!host->claimed || host->claimer == current) {
665 host->claimer = current;
666 host->claim_cnt += 1;
669 spin_unlock_irqrestore(&host->lock, flags);
670 if (host->ops->enable && claimed_host && host->claim_cnt == 1)
671 host->ops->enable(host);
674 EXPORT_SYMBOL(mmc_try_claim_host);
677 * mmc_release_host - release a host
678 * @host: mmc host to release
680 * Release a MMC host, allowing others to claim the host
681 * for their operations.
683 void mmc_release_host(struct mmc_host *host)
687 WARN_ON(!host->claimed);
689 if (host->ops->disable && host->claim_cnt == 1)
690 host->ops->disable(host);
692 spin_lock_irqsave(&host->lock, flags);
693 if (--host->claim_cnt) {
694 /* Release for nested claim */
695 spin_unlock_irqrestore(&host->lock, flags);
698 host->claimer = NULL;
699 spin_unlock_irqrestore(&host->lock, flags);
703 EXPORT_SYMBOL(mmc_release_host);
706 * Internal function that does the actual ios call to the host driver,
707 * optionally printing some debug output.
709 static inline void mmc_set_ios(struct mmc_host *host)
711 struct mmc_ios *ios = &host->ios;
713 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
714 "width %u timing %u\n",
715 mmc_hostname(host), ios->clock, ios->bus_mode,
716 ios->power_mode, ios->chip_select, ios->vdd,
717 ios->bus_width, ios->timing);
720 mmc_set_ungated(host);
721 host->ops->set_ios(host, ios);
725 * Control chip select pin on a host.
727 void mmc_set_chip_select(struct mmc_host *host, int mode)
729 mmc_host_clk_hold(host);
730 host->ios.chip_select = mode;
732 mmc_host_clk_release(host);
736 * Sets the host clock to the highest possible frequency that
739 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
741 WARN_ON(hz < host->f_min);
743 if (hz > host->f_max)
746 host->ios.clock = hz;
750 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
752 mmc_host_clk_hold(host);
753 __mmc_set_clock(host, hz);
754 mmc_host_clk_release(host);
757 #ifdef CONFIG_MMC_CLKGATE
759 * This gates the clock by setting it to 0 Hz.
761 void mmc_gate_clock(struct mmc_host *host)
765 spin_lock_irqsave(&host->clk_lock, flags);
766 host->clk_old = host->ios.clock;
768 host->clk_gated = true;
769 spin_unlock_irqrestore(&host->clk_lock, flags);
774 * This restores the clock from gating by using the cached
777 void mmc_ungate_clock(struct mmc_host *host)
780 * We should previously have gated the clock, so the clock shall
781 * be 0 here! The clock may however be 0 during initialization,
782 * when some request operations are performed before setting
783 * the frequency. When ungate is requested in that situation
784 * we just ignore the call.
787 BUG_ON(host->ios.clock);
788 /* This call will also set host->clk_gated to false */
789 __mmc_set_clock(host, host->clk_old);
793 void mmc_set_ungated(struct mmc_host *host)
798 * We've been given a new frequency while the clock is gated,
799 * so make sure we regard this as ungating it.
801 spin_lock_irqsave(&host->clk_lock, flags);
802 host->clk_gated = false;
803 spin_unlock_irqrestore(&host->clk_lock, flags);
807 void mmc_set_ungated(struct mmc_host *host)
813 * Change the bus mode (open drain/push-pull) of a host.
815 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
817 mmc_host_clk_hold(host);
818 host->ios.bus_mode = mode;
820 mmc_host_clk_release(host);
824 * Change data bus width of a host.
826 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
828 mmc_host_clk_hold(host);
829 host->ios.bus_width = width;
831 mmc_host_clk_release(host);
835 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
837 * @low_bits: prefer low bits in boundary cases
839 * This function returns the OCR bit number according to the provided @vdd
840 * value. If conversion is not possible a negative errno value returned.
842 * Depending on the @low_bits flag the function prefers low or high OCR bits
843 * on boundary voltages. For example,
844 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
845 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
847 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
849 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
851 const int max_bit = ilog2(MMC_VDD_35_36);
854 if (vdd < 1650 || vdd > 3600)
857 if (vdd >= 1650 && vdd <= 1950)
858 return ilog2(MMC_VDD_165_195);
863 /* Base 2000 mV, step 100 mV, bit's base 8. */
864 bit = (vdd - 2000) / 100 + 8;
871 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
872 * @vdd_min: minimum voltage value (mV)
873 * @vdd_max: maximum voltage value (mV)
875 * This function returns the OCR mask bits according to the provided @vdd_min
876 * and @vdd_max values. If conversion is not possible the function returns 0.
878 * Notes wrt boundary cases:
879 * This function sets the OCR bits for all boundary voltages, for example
880 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
881 * MMC_VDD_34_35 mask.
883 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
887 if (vdd_max < vdd_min)
890 /* Prefer high bits for the boundary vdd_max values. */
891 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
895 /* Prefer low bits for the boundary vdd_min values. */
896 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
900 /* Fill the mask, from max bit to min bit. */
901 while (vdd_max >= vdd_min)
902 mask |= 1 << vdd_max--;
906 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
908 #ifdef CONFIG_REGULATOR
911 * mmc_regulator_get_ocrmask - return mask of supported voltages
912 * @supply: regulator to use
914 * This returns either a negative errno, or a mask of voltages that
915 * can be provided to MMC/SD/SDIO devices using the specified voltage
916 * regulator. This would normally be called before registering the
919 int mmc_regulator_get_ocrmask(struct regulator *supply)
925 count = regulator_count_voltages(supply);
929 for (i = 0; i < count; i++) {
933 vdd_uV = regulator_list_voltage(supply, i);
937 vdd_mV = vdd_uV / 1000;
938 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
943 EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
946 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
947 * @mmc: the host to regulate
948 * @supply: regulator to use
949 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
951 * Returns zero on success, else negative errno.
953 * MMC host drivers may use this to enable or disable a regulator using
954 * a particular supply voltage. This would normally be called from the
957 int mmc_regulator_set_ocr(struct mmc_host *mmc,
958 struct regulator *supply,
959 unsigned short vdd_bit)
968 /* REVISIT mmc_vddrange_to_ocrmask() may have set some
969 * bits this regulator doesn't quite support ... don't
970 * be too picky, most cards and regulators are OK with
971 * a 0.1V range goof (it's a small error percentage).
973 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
975 min_uV = 1650 * 1000;
976 max_uV = 1950 * 1000;
978 min_uV = 1900 * 1000 + tmp * 100 * 1000;
979 max_uV = min_uV + 100 * 1000;
982 /* avoid needless changes to this voltage; the regulator
983 * might not allow this operation
985 voltage = regulator_get_voltage(supply);
987 if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
988 min_uV = max_uV = voltage;
992 else if (voltage < min_uV || voltage > max_uV)
993 result = regulator_set_voltage(supply, min_uV, max_uV);
997 if (result == 0 && !mmc->regulator_enabled) {
998 result = regulator_enable(supply);
1000 mmc->regulator_enabled = true;
1002 } else if (mmc->regulator_enabled) {
1003 result = regulator_disable(supply);
1005 mmc->regulator_enabled = false;
1009 dev_err(mmc_dev(mmc),
1010 "could not set regulator OCR (%d)\n", result);
1013 EXPORT_SYMBOL(mmc_regulator_set_ocr);
1015 #endif /* CONFIG_REGULATOR */
1018 * Mask off any voltages we don't support and select
1019 * the lowest voltage
1021 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1025 ocr &= host->ocr_avail;
1033 mmc_host_clk_hold(host);
1034 host->ios.vdd = bit;
1036 mmc_host_clk_release(host);
1038 pr_warning("%s: host doesn't support card's voltages\n",
1039 mmc_hostname(host));
1046 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11)
1048 struct mmc_command cmd = {0};
1054 * Send CMD11 only if the request is to switch the card to
1057 if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) {
1058 cmd.opcode = SD_SWITCH_VOLTAGE;
1060 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1062 err = mmc_wait_for_cmd(host, &cmd, 0);
1066 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
1070 host->ios.signal_voltage = signal_voltage;
1072 if (host->ops->start_signal_voltage_switch) {
1073 mmc_host_clk_hold(host);
1074 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1075 mmc_host_clk_release(host);
1082 * Select timing parameters for host.
1084 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1086 mmc_host_clk_hold(host);
1087 host->ios.timing = timing;
1089 mmc_host_clk_release(host);
1093 * Select appropriate driver type for host.
1095 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1097 mmc_host_clk_hold(host);
1098 host->ios.drv_type = drv_type;
1100 mmc_host_clk_release(host);
1103 static void mmc_poweroff_notify(struct mmc_host *host)
1105 struct mmc_card *card;
1106 unsigned int timeout;
1107 unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
1111 mmc_claim_host(host);
1114 * Send power notify command only if card
1115 * is mmc and notify state is powered ON
1117 if (card && mmc_card_mmc(card) &&
1118 (card->poweroff_notify_state == MMC_POWERED_ON)) {
1120 if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
1121 notify_type = EXT_CSD_POWER_OFF_SHORT;
1122 timeout = card->ext_csd.generic_cmd6_time;
1123 card->poweroff_notify_state = MMC_POWEROFF_SHORT;
1125 notify_type = EXT_CSD_POWER_OFF_LONG;
1126 timeout = card->ext_csd.power_off_longtime;
1127 card->poweroff_notify_state = MMC_POWEROFF_LONG;
1130 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1131 EXT_CSD_POWER_OFF_NOTIFICATION,
1132 notify_type, timeout);
1134 if (err && err != -EBADMSG)
1135 pr_err("Device failed to respond within %d poweroff "
1136 "time. Forcefully powering down the device\n",
1139 /* Set the card state to no notification after the poweroff */
1140 card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
1142 mmc_release_host(host);
1146 * Apply power to the MMC stack. This is a two-stage process.
1147 * First, we enable power to the card without the clock running.
1148 * We then wait a bit for the power to stabilise. Finally,
1149 * enable the bus drivers and clock to the card.
1151 * We must _NOT_ enable the clock prior to power stablising.
1153 * If a host does all the power sequencing itself, ignore the
1154 * initial MMC_POWER_UP stage.
1156 static void mmc_power_up(struct mmc_host *host)
1160 mmc_host_clk_hold(host);
1162 /* If ocr is set, we use it */
1164 bit = ffs(host->ocr) - 1;
1166 bit = fls(host->ocr_avail) - 1;
1168 host->ios.vdd = bit;
1169 if (mmc_host_is_spi(host))
1170 host->ios.chip_select = MMC_CS_HIGH;
1172 host->ios.chip_select = MMC_CS_DONTCARE;
1173 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1174 host->ios.power_mode = MMC_POWER_UP;
1175 host->ios.bus_width = MMC_BUS_WIDTH_1;
1176 host->ios.timing = MMC_TIMING_LEGACY;
1180 * This delay should be sufficient to allow the power supply
1181 * to reach the minimum voltage.
1185 host->ios.clock = host->f_init;
1187 host->ios.power_mode = MMC_POWER_ON;
1191 * This delay must be at least 74 clock sizes, or 1 ms, or the
1192 * time required to reach a stable voltage.
1196 mmc_host_clk_release(host);
1199 void mmc_power_off(struct mmc_host *host)
1202 mmc_host_clk_hold(host);
1204 host->ios.clock = 0;
1208 * For eMMC 4.5 device send AWAKE command before
1209 * POWER_OFF_NOTIFY command, because in sleep state
1210 * eMMC 4.5 devices respond to only RESET and AWAKE cmd
1212 if (host->card && mmc_card_is_sleep(host->card) &&
1213 host->bus_ops->resume) {
1214 err = host->bus_ops->resume(host);
1217 mmc_poweroff_notify(host);
1219 pr_warning("%s: error %d during resume "
1220 "(continue with poweroff sequence)\n",
1221 mmc_hostname(host), err);
1225 * Reset ocr mask to be the highest possible voltage supported for
1226 * this mmc host. This value will be used at next power up.
1228 host->ocr = 1 << (fls(host->ocr_avail) - 1);
1230 if (!mmc_host_is_spi(host)) {
1231 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1232 host->ios.chip_select = MMC_CS_DONTCARE;
1234 host->ios.power_mode = MMC_POWER_OFF;
1235 host->ios.bus_width = MMC_BUS_WIDTH_1;
1236 host->ios.timing = MMC_TIMING_LEGACY;
1240 * Some configurations, such as the 802.11 SDIO card in the OLPC
1241 * XO-1.5, require a short delay after poweroff before the card
1242 * can be successfully turned on again.
1246 mmc_host_clk_release(host);
1250 * Cleanup when the last reference to the bus operator is dropped.
1252 static void __mmc_release_bus(struct mmc_host *host)
1255 BUG_ON(host->bus_refs);
1256 BUG_ON(!host->bus_dead);
1258 host->bus_ops = NULL;
1262 * Increase reference count of bus operator
1264 static inline void mmc_bus_get(struct mmc_host *host)
1266 unsigned long flags;
1268 spin_lock_irqsave(&host->lock, flags);
1270 spin_unlock_irqrestore(&host->lock, flags);
1274 * Decrease reference count of bus operator and free it if
1275 * it is the last reference.
1277 static inline void mmc_bus_put(struct mmc_host *host)
1279 unsigned long flags;
1281 spin_lock_irqsave(&host->lock, flags);
1283 if ((host->bus_refs == 0) && host->bus_ops)
1284 __mmc_release_bus(host);
1285 spin_unlock_irqrestore(&host->lock, flags);
1289 * Assign a mmc bus handler to a host. Only one bus handler may control a
1290 * host at any given time.
1292 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1294 unsigned long flags;
1299 WARN_ON(!host->claimed);
1301 spin_lock_irqsave(&host->lock, flags);
1303 BUG_ON(host->bus_ops);
1304 BUG_ON(host->bus_refs);
1306 host->bus_ops = ops;
1310 spin_unlock_irqrestore(&host->lock, flags);
1314 * Remove the current bus handler from a host.
1316 void mmc_detach_bus(struct mmc_host *host)
1318 unsigned long flags;
1322 WARN_ON(!host->claimed);
1323 WARN_ON(!host->bus_ops);
1325 spin_lock_irqsave(&host->lock, flags);
1329 spin_unlock_irqrestore(&host->lock, flags);
1335 * mmc_detect_change - process change of state on a MMC socket
1336 * @host: host which changed state.
1337 * @delay: optional delay to wait before detection (jiffies)
1339 * MMC drivers should call this when they detect a card has been
1340 * inserted or removed. The MMC layer will confirm that any
1341 * present card is still functional, and initialize any newly
1344 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1346 #ifdef CONFIG_MMC_DEBUG
1347 unsigned long flags;
1348 spin_lock_irqsave(&host->lock, flags);
1349 WARN_ON(host->removed);
1350 spin_unlock_irqrestore(&host->lock, flags);
1352 host->detect_change = 1;
1353 mmc_schedule_delayed_work(&host->detect, delay);
1356 EXPORT_SYMBOL(mmc_detect_change);
1358 void mmc_init_erase(struct mmc_card *card)
1362 if (is_power_of_2(card->erase_size))
1363 card->erase_shift = ffs(card->erase_size) - 1;
1365 card->erase_shift = 0;
1368 * It is possible to erase an arbitrarily large area of an SD or MMC
1369 * card. That is not desirable because it can take a long time
1370 * (minutes) potentially delaying more important I/O, and also the
1371 * timeout calculations become increasingly hugely over-estimated.
1372 * Consequently, 'pref_erase' is defined as a guide to limit erases
1373 * to that size and alignment.
1375 * For SD cards that define Allocation Unit size, limit erases to one
1376 * Allocation Unit at a time. For MMC cards that define High Capacity
1377 * Erase Size, whether it is switched on or not, limit to that size.
1378 * Otherwise just have a stab at a good value. For modern cards it
1379 * will end up being 4MiB. Note that if the value is too small, it
1380 * can end up taking longer to erase.
1382 if (mmc_card_sd(card) && card->ssr.au) {
1383 card->pref_erase = card->ssr.au;
1384 card->erase_shift = ffs(card->ssr.au) - 1;
1385 } else if (card->ext_csd.hc_erase_size) {
1386 card->pref_erase = card->ext_csd.hc_erase_size;
1388 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1390 card->pref_erase = 512 * 1024 / 512;
1392 card->pref_erase = 1024 * 1024 / 512;
1394 card->pref_erase = 2 * 1024 * 1024 / 512;
1396 card->pref_erase = 4 * 1024 * 1024 / 512;
1397 if (card->pref_erase < card->erase_size)
1398 card->pref_erase = card->erase_size;
1400 sz = card->pref_erase % card->erase_size;
1402 card->pref_erase += card->erase_size - sz;
1407 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1408 unsigned int arg, unsigned int qty)
1410 unsigned int erase_timeout;
1412 if (arg == MMC_DISCARD_ARG ||
1413 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1414 erase_timeout = card->ext_csd.trim_timeout;
1415 } else if (card->ext_csd.erase_group_def & 1) {
1416 /* High Capacity Erase Group Size uses HC timeouts */
1417 if (arg == MMC_TRIM_ARG)
1418 erase_timeout = card->ext_csd.trim_timeout;
1420 erase_timeout = card->ext_csd.hc_erase_timeout;
1422 /* CSD Erase Group Size uses write timeout */
1423 unsigned int mult = (10 << card->csd.r2w_factor);
1424 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1425 unsigned int timeout_us;
1427 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1428 if (card->csd.tacc_ns < 1000000)
1429 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1431 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1434 * ios.clock is only a target. The real clock rate might be
1435 * less but not that much less, so fudge it by multiplying by 2.
1438 timeout_us += (timeout_clks * 1000) /
1439 (mmc_host_clk_rate(card->host) / 1000);
1441 erase_timeout = timeout_us / 1000;
1444 * Theoretically, the calculation could underflow so round up
1445 * to 1ms in that case.
1451 /* Multiplier for secure operations */
1452 if (arg & MMC_SECURE_ARGS) {
1453 if (arg == MMC_SECURE_ERASE_ARG)
1454 erase_timeout *= card->ext_csd.sec_erase_mult;
1456 erase_timeout *= card->ext_csd.sec_trim_mult;
1459 erase_timeout *= qty;
1462 * Ensure at least a 1 second timeout for SPI as per
1463 * 'mmc_set_data_timeout()'
1465 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1466 erase_timeout = 1000;
1468 return erase_timeout;
1471 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1475 unsigned int erase_timeout;
1477 if (card->ssr.erase_timeout) {
1478 /* Erase timeout specified in SD Status Register (SSR) */
1479 erase_timeout = card->ssr.erase_timeout * qty +
1480 card->ssr.erase_offset;
1483 * Erase timeout not specified in SD Status Register (SSR) so
1484 * use 250ms per write block.
1486 erase_timeout = 250 * qty;
1489 /* Must not be less than 1 second */
1490 if (erase_timeout < 1000)
1491 erase_timeout = 1000;
1493 return erase_timeout;
1496 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1500 if (mmc_card_sd(card))
1501 return mmc_sd_erase_timeout(card, arg, qty);
1503 return mmc_mmc_erase_timeout(card, arg, qty);
1506 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1507 unsigned int to, unsigned int arg)
1509 struct mmc_command cmd = {0};
1510 unsigned int qty = 0;
1514 * qty is used to calculate the erase timeout which depends on how many
1515 * erase groups (or allocation units in SD terminology) are affected.
1516 * We count erasing part of an erase group as one erase group.
1517 * For SD, the allocation units are always a power of 2. For MMC, the
1518 * erase group size is almost certainly also power of 2, but it does not
1519 * seem to insist on that in the JEDEC standard, so we fall back to
1520 * division in that case. SD may not specify an allocation unit size,
1521 * in which case the timeout is based on the number of write blocks.
1523 * Note that the timeout for secure trim 2 will only be correct if the
1524 * number of erase groups specified is the same as the total of all
1525 * preceding secure trim 1 commands. Since the power may have been
1526 * lost since the secure trim 1 commands occurred, it is generally
1527 * impossible to calculate the secure trim 2 timeout correctly.
1529 if (card->erase_shift)
1530 qty += ((to >> card->erase_shift) -
1531 (from >> card->erase_shift)) + 1;
1532 else if (mmc_card_sd(card))
1533 qty += to - from + 1;
1535 qty += ((to / card->erase_size) -
1536 (from / card->erase_size)) + 1;
1538 if (!mmc_card_blockaddr(card)) {
1543 if (mmc_card_sd(card))
1544 cmd.opcode = SD_ERASE_WR_BLK_START;
1546 cmd.opcode = MMC_ERASE_GROUP_START;
1548 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1549 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1551 pr_err("mmc_erase: group start error %d, "
1552 "status %#x\n", err, cmd.resp[0]);
1557 memset(&cmd, 0, sizeof(struct mmc_command));
1558 if (mmc_card_sd(card))
1559 cmd.opcode = SD_ERASE_WR_BLK_END;
1561 cmd.opcode = MMC_ERASE_GROUP_END;
1563 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1564 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1566 pr_err("mmc_erase: group end error %d, status %#x\n",
1572 memset(&cmd, 0, sizeof(struct mmc_command));
1573 cmd.opcode = MMC_ERASE;
1575 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1576 cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty);
1577 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1579 pr_err("mmc_erase: erase error %d, status %#x\n",
1585 if (mmc_host_is_spi(card->host))
1589 memset(&cmd, 0, sizeof(struct mmc_command));
1590 cmd.opcode = MMC_SEND_STATUS;
1591 cmd.arg = card->rca << 16;
1592 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1593 /* Do not retry else we can't see errors */
1594 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1595 if (err || (cmd.resp[0] & 0xFDF92000)) {
1596 pr_err("error %d requesting status %#x\n",
1601 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
1602 R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG);
1608 * mmc_erase - erase sectors.
1609 * @card: card to erase
1610 * @from: first sector to erase
1611 * @nr: number of sectors to erase
1612 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
1614 * Caller must claim host before calling this function.
1616 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
1619 unsigned int rem, to = from + nr;
1621 if (!(card->host->caps & MMC_CAP_ERASE) ||
1622 !(card->csd.cmdclass & CCC_ERASE))
1625 if (!card->erase_size)
1628 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
1631 if ((arg & MMC_SECURE_ARGS) &&
1632 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
1635 if ((arg & MMC_TRIM_ARGS) &&
1636 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
1639 if (arg == MMC_SECURE_ERASE_ARG) {
1640 if (from % card->erase_size || nr % card->erase_size)
1644 if (arg == MMC_ERASE_ARG) {
1645 rem = from % card->erase_size;
1647 rem = card->erase_size - rem;
1654 rem = nr % card->erase_size;
1667 /* 'from' and 'to' are inclusive */
1670 return mmc_do_erase(card, from, to, arg);
1672 EXPORT_SYMBOL(mmc_erase);
1674 int mmc_can_erase(struct mmc_card *card)
1676 if ((card->host->caps & MMC_CAP_ERASE) &&
1677 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
1681 EXPORT_SYMBOL(mmc_can_erase);
1683 int mmc_can_trim(struct mmc_card *card)
1685 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
1689 EXPORT_SYMBOL(mmc_can_trim);
1691 int mmc_can_discard(struct mmc_card *card)
1694 * As there's no way to detect the discard support bit at v4.5
1695 * use the s/w feature support filed.
1697 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
1701 EXPORT_SYMBOL(mmc_can_discard);
1703 int mmc_can_sanitize(struct mmc_card *card)
1705 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
1709 EXPORT_SYMBOL(mmc_can_sanitize);
1711 int mmc_can_secure_erase_trim(struct mmc_card *card)
1713 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
1717 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
1719 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
1722 if (!card->erase_size)
1724 if (from % card->erase_size || nr % card->erase_size)
1728 EXPORT_SYMBOL(mmc_erase_group_aligned);
1730 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
1733 struct mmc_host *host = card->host;
1734 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
1735 unsigned int last_timeout = 0;
1737 if (card->erase_shift)
1738 max_qty = UINT_MAX >> card->erase_shift;
1739 else if (mmc_card_sd(card))
1742 max_qty = UINT_MAX / card->erase_size;
1744 /* Find the largest qty with an OK timeout */
1747 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
1748 timeout = mmc_erase_timeout(card, arg, qty + x);
1749 if (timeout > host->max_discard_to)
1751 if (timeout < last_timeout)
1753 last_timeout = timeout;
1765 /* Convert qty to sectors */
1766 if (card->erase_shift)
1767 max_discard = --qty << card->erase_shift;
1768 else if (mmc_card_sd(card))
1771 max_discard = --qty * card->erase_size;
1776 unsigned int mmc_calc_max_discard(struct mmc_card *card)
1778 struct mmc_host *host = card->host;
1779 unsigned int max_discard, max_trim;
1781 if (!host->max_discard_to)
1785 * Without erase_group_def set, MMC erase timeout depends on clock
1786 * frequence which can change. In that case, the best choice is
1787 * just the preferred erase size.
1789 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
1790 return card->pref_erase;
1792 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
1793 if (mmc_can_trim(card)) {
1794 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
1795 if (max_trim < max_discard)
1796 max_discard = max_trim;
1797 } else if (max_discard < card->erase_size) {
1800 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
1801 mmc_hostname(host), max_discard, host->max_discard_to);
1804 EXPORT_SYMBOL(mmc_calc_max_discard);
1806 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
1808 struct mmc_command cmd = {0};
1810 if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card))
1813 cmd.opcode = MMC_SET_BLOCKLEN;
1815 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1816 return mmc_wait_for_cmd(card->host, &cmd, 5);
1818 EXPORT_SYMBOL(mmc_set_blocklen);
1820 static void mmc_hw_reset_for_init(struct mmc_host *host)
1822 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1824 mmc_host_clk_hold(host);
1825 host->ops->hw_reset(host);
1826 mmc_host_clk_release(host);
1829 int mmc_can_reset(struct mmc_card *card)
1833 if (!mmc_card_mmc(card))
1835 rst_n_function = card->ext_csd.rst_n_function;
1836 if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
1840 EXPORT_SYMBOL(mmc_can_reset);
1842 static int mmc_do_hw_reset(struct mmc_host *host, int check)
1844 struct mmc_card *card = host->card;
1846 if (!host->bus_ops->power_restore)
1849 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
1855 if (!mmc_can_reset(card))
1858 mmc_host_clk_hold(host);
1859 mmc_set_clock(host, host->f_init);
1861 host->ops->hw_reset(host);
1863 /* If the reset has happened, then a status command will fail */
1865 struct mmc_command cmd = {0};
1868 cmd.opcode = MMC_SEND_STATUS;
1869 if (!mmc_host_is_spi(card->host))
1870 cmd.arg = card->rca << 16;
1871 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
1872 err = mmc_wait_for_cmd(card->host, &cmd, 0);
1874 mmc_host_clk_release(host);
1879 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
1880 if (mmc_host_is_spi(host)) {
1881 host->ios.chip_select = MMC_CS_HIGH;
1882 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1884 host->ios.chip_select = MMC_CS_DONTCARE;
1885 host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
1887 host->ios.bus_width = MMC_BUS_WIDTH_1;
1888 host->ios.timing = MMC_TIMING_LEGACY;
1891 mmc_host_clk_release(host);
1893 return host->bus_ops->power_restore(host);
1896 int mmc_hw_reset(struct mmc_host *host)
1898 return mmc_do_hw_reset(host, 0);
1900 EXPORT_SYMBOL(mmc_hw_reset);
1902 int mmc_hw_reset_check(struct mmc_host *host)
1904 return mmc_do_hw_reset(host, 1);
1906 EXPORT_SYMBOL(mmc_hw_reset_check);
1908 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
1910 host->f_init = freq;
1912 #ifdef CONFIG_MMC_DEBUG
1913 pr_info("%s: %s: trying to init card at %u Hz\n",
1914 mmc_hostname(host), __func__, host->f_init);
1919 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
1920 * do a hardware reset if possible.
1922 mmc_hw_reset_for_init(host);
1924 /* Initialization should be done at 3.3 V I/O voltage. */
1925 mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
1928 * sdio_reset sends CMD52 to reset card. Since we do not know
1929 * if the card is being re-initialized, just send it. CMD52
1930 * should be ignored by SD/eMMC cards.
1935 mmc_send_if_cond(host, host->ocr_avail);
1937 /* Order's important: probe SDIO, then SD, then MMC */
1938 if (!mmc_attach_sdio(host))
1940 if (!mmc_attach_sd(host))
1942 if (!mmc_attach_mmc(host))
1945 mmc_power_off(host);
1949 int _mmc_detect_card_removed(struct mmc_host *host)
1953 if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
1956 if (!host->card || mmc_card_removed(host->card))
1959 ret = host->bus_ops->alive(host);
1961 mmc_card_set_removed(host->card);
1962 pr_debug("%s: card remove detected\n", mmc_hostname(host));
1968 int mmc_detect_card_removed(struct mmc_host *host)
1970 struct mmc_card *card = host->card;
1973 WARN_ON(!host->claimed);
1978 ret = mmc_card_removed(card);
1980 * The card will be considered unchanged unless we have been asked to
1981 * detect a change or host requires polling to provide card detection.
1983 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1984 !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
1987 host->detect_change = 0;
1989 ret = _mmc_detect_card_removed(host);
1990 if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
1992 * Schedule a detect work as soon as possible to let a
1993 * rescan handle the card removal.
1995 cancel_delayed_work(&host->detect);
1996 mmc_detect_change(host, 0);
2002 EXPORT_SYMBOL(mmc_detect_card_removed);
2004 void mmc_rescan(struct work_struct *work)
2006 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
2007 struct mmc_host *host =
2008 container_of(work, struct mmc_host, detect.work);
2011 if (host->rescan_disable)
2017 * if there is a _removable_ card registered, check whether it is
2020 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
2021 && !(host->caps & MMC_CAP_NONREMOVABLE))
2022 host->bus_ops->detect(host);
2024 host->detect_change = 0;
2027 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2028 * the card is no longer present.
2033 /* if there still is a card present, stop here */
2034 if (host->bus_ops != NULL) {
2040 * Only we can add a new handler, so it's safe to
2041 * release the lock here.
2045 if (host->ops->get_cd && host->ops->get_cd(host) == 0)
2048 mmc_claim_host(host);
2049 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2050 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2052 if (freqs[i] <= host->f_min)
2055 mmc_release_host(host);
2058 if (host->caps & MMC_CAP_NEEDS_POLL)
2059 mmc_schedule_delayed_work(&host->detect, HZ);
2062 void mmc_start_host(struct mmc_host *host)
2064 mmc_power_off(host);
2065 mmc_detect_change(host, 0);
2068 void mmc_stop_host(struct mmc_host *host)
2070 #ifdef CONFIG_MMC_DEBUG
2071 unsigned long flags;
2072 spin_lock_irqsave(&host->lock, flags);
2074 spin_unlock_irqrestore(&host->lock, flags);
2077 cancel_delayed_work_sync(&host->detect);
2078 mmc_flush_scheduled_work();
2080 /* clear pm flags now and let card drivers set them as needed */
2084 if (host->bus_ops && !host->bus_dead) {
2085 /* Calling bus_ops->remove() with a claimed host can deadlock */
2086 if (host->bus_ops->remove)
2087 host->bus_ops->remove(host);
2089 mmc_claim_host(host);
2090 mmc_detach_bus(host);
2091 mmc_power_off(host);
2092 mmc_release_host(host);
2100 mmc_power_off(host);
2103 int mmc_power_save_host(struct mmc_host *host)
2107 #ifdef CONFIG_MMC_DEBUG
2108 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2113 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2118 if (host->bus_ops->power_save)
2119 ret = host->bus_ops->power_save(host);
2123 mmc_power_off(host);
2127 EXPORT_SYMBOL(mmc_power_save_host);
2129 int mmc_power_restore_host(struct mmc_host *host)
2133 #ifdef CONFIG_MMC_DEBUG
2134 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2139 if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
2145 ret = host->bus_ops->power_restore(host);
2151 EXPORT_SYMBOL(mmc_power_restore_host);
2153 int mmc_card_awake(struct mmc_host *host)
2157 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2162 if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
2163 err = host->bus_ops->awake(host);
2169 EXPORT_SYMBOL(mmc_card_awake);
2171 int mmc_card_sleep(struct mmc_host *host)
2175 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
2180 if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
2181 err = host->bus_ops->sleep(host);
2187 EXPORT_SYMBOL(mmc_card_sleep);
2189 int mmc_card_can_sleep(struct mmc_host *host)
2191 struct mmc_card *card = host->card;
2193 if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3)
2197 EXPORT_SYMBOL(mmc_card_can_sleep);
2200 * Flush the cache to the non-volatile storage.
2202 int mmc_flush_cache(struct mmc_card *card)
2204 struct mmc_host *host = card->host;
2207 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
2210 if (mmc_card_mmc(card) &&
2211 (card->ext_csd.cache_size > 0) &&
2212 (card->ext_csd.cache_ctrl & 1)) {
2213 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2214 EXT_CSD_FLUSH_CACHE, 1, 0);
2216 pr_err("%s: cache flush error %d\n",
2217 mmc_hostname(card->host), err);
2222 EXPORT_SYMBOL(mmc_flush_cache);
2225 * Turn the cache ON/OFF.
2226 * Turning the cache OFF shall trigger flushing of the data
2227 * to the non-volatile storage.
2229 int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
2231 struct mmc_card *card = host->card;
2232 unsigned int timeout;
2235 if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
2236 mmc_card_is_removable(host))
2239 if (card && mmc_card_mmc(card) &&
2240 (card->ext_csd.cache_size > 0)) {
2243 if (card->ext_csd.cache_ctrl ^ enable) {
2244 timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
2245 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2246 EXT_CSD_CACHE_CTRL, enable, timeout);
2248 pr_err("%s: cache %s error %d\n",
2249 mmc_hostname(card->host),
2250 enable ? "on" : "off",
2253 card->ext_csd.cache_ctrl = enable;
2259 EXPORT_SYMBOL(mmc_cache_ctrl);
2264 * mmc_suspend_host - suspend a host
2267 int mmc_suspend_host(struct mmc_host *host)
2271 cancel_delayed_work(&host->detect);
2272 mmc_flush_scheduled_work();
2273 if (mmc_try_claim_host(host)) {
2274 err = mmc_cache_ctrl(host, 0);
2275 mmc_release_host(host);
2284 if (host->bus_ops && !host->bus_dead) {
2287 * A long response time is not acceptable for device drivers
2288 * when doing suspend. Prevent mmc_claim_host in the suspend
2289 * sequence, to potentially wait "forever" by trying to
2290 * pre-claim the host.
2292 if (mmc_try_claim_host(host)) {
2293 if (host->bus_ops->suspend) {
2294 err = host->bus_ops->suspend(host);
2296 mmc_release_host(host);
2298 if (err == -ENOSYS || !host->bus_ops->resume) {
2300 * We simply "remove" the card in this case.
2301 * It will be redetected on resume. (Calling
2302 * bus_ops->remove() with a claimed host can
2305 if (host->bus_ops->remove)
2306 host->bus_ops->remove(host);
2307 mmc_claim_host(host);
2308 mmc_detach_bus(host);
2309 mmc_power_off(host);
2310 mmc_release_host(host);
2320 if (!err && !mmc_card_keep_power(host))
2321 mmc_power_off(host);
2327 EXPORT_SYMBOL(mmc_suspend_host);
2330 * mmc_resume_host - resume a previously suspended host
2333 int mmc_resume_host(struct mmc_host *host)
2338 if (host->bus_ops && !host->bus_dead) {
2339 if (!mmc_card_keep_power(host)) {
2341 mmc_select_voltage(host, host->ocr);
2343 * Tell runtime PM core we just powered up the card,
2344 * since it still believes the card is powered off.
2345 * Note that currently runtime PM is only enabled
2346 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
2348 if (mmc_card_sdio(host->card) &&
2349 (host->caps & MMC_CAP_POWER_OFF_CARD)) {
2350 pm_runtime_disable(&host->card->dev);
2351 pm_runtime_set_active(&host->card->dev);
2352 pm_runtime_enable(&host->card->dev);
2355 BUG_ON(!host->bus_ops->resume);
2356 err = host->bus_ops->resume(host);
2358 pr_warning("%s: error %d during resume "
2359 "(card was removed?)\n",
2360 mmc_hostname(host), err);
2364 host->pm_flags &= ~MMC_PM_KEEP_POWER;
2369 EXPORT_SYMBOL(mmc_resume_host);
2371 /* Do the card removal on suspend if card is assumed removeable
2372 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2375 int mmc_pm_notify(struct notifier_block *notify_block,
2376 unsigned long mode, void *unused)
2378 struct mmc_host *host = container_of(
2379 notify_block, struct mmc_host, pm_notify);
2380 unsigned long flags;
2384 case PM_HIBERNATION_PREPARE:
2385 case PM_SUSPEND_PREPARE:
2387 spin_lock_irqsave(&host->lock, flags);
2388 host->rescan_disable = 1;
2389 host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
2390 spin_unlock_irqrestore(&host->lock, flags);
2391 cancel_delayed_work_sync(&host->detect);
2393 if (!host->bus_ops || host->bus_ops->suspend)
2396 /* Calling bus_ops->remove() with a claimed host can deadlock */
2397 if (host->bus_ops->remove)
2398 host->bus_ops->remove(host);
2400 mmc_claim_host(host);
2401 mmc_detach_bus(host);
2402 mmc_power_off(host);
2403 mmc_release_host(host);
2407 case PM_POST_SUSPEND:
2408 case PM_POST_HIBERNATION:
2409 case PM_POST_RESTORE:
2411 spin_lock_irqsave(&host->lock, flags);
2412 host->rescan_disable = 0;
2413 host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
2414 spin_unlock_irqrestore(&host->lock, flags);
2415 mmc_detect_change(host, 0);
2423 static int __init mmc_init(void)
2427 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2431 ret = mmc_register_bus();
2433 goto destroy_workqueue;
2435 ret = mmc_register_host_class();
2437 goto unregister_bus;
2439 ret = sdio_register_bus();
2441 goto unregister_host_class;
2445 unregister_host_class:
2446 mmc_unregister_host_class();
2448 mmc_unregister_bus();
2450 destroy_workqueue(workqueue);
2455 static void __exit mmc_exit(void)
2457 sdio_unregister_bus();
2458 mmc_unregister_host_class();
2459 mmc_unregister_bus();
2460 destroy_workqueue(workqueue);
2463 subsys_initcall(mmc_init);
2464 module_exit(mmc_exit);
2466 MODULE_LICENSE("GPL");