2 * linux/drivers/mmc/core/mmc.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/stat.h>
16 #include <linux/pm_runtime.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/mmc.h>
27 static const unsigned int tran_exp[] = {
28 10000, 100000, 1000000, 10000000,
32 static const unsigned char tran_mant[] = {
33 0, 10, 12, 13, 15, 20, 25, 30,
34 35, 40, 45, 50, 55, 60, 70, 80,
37 static const unsigned int tacc_exp[] = {
38 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
41 static const unsigned int tacc_mant[] = {
42 0, 10, 12, 13, 15, 20, 25, 30,
43 35, 40, 45, 50, 55, 60, 70, 80,
46 #define UNSTUFF_BITS(resp,start,size) \
48 const int __size = size; \
49 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
50 const int __off = 3 - ((start) / 32); \
51 const int __shft = (start) & 31; \
54 __res = resp[__off] >> __shft; \
55 if (__size + __shft > 32) \
56 __res |= resp[__off-1] << ((32 - __shft) % 32); \
61 * Given the decoded CSD structure, decode the raw CID to our CID structure.
63 static int mmc_decode_cid(struct mmc_card *card)
65 u32 *resp = card->raw_cid;
68 * The selection of the format here is based upon published
69 * specs from sandisk and from what people have reported.
71 switch (card->csd.mmca_vsn) {
72 case 0: /* MMC v1.0 - v1.2 */
73 case 1: /* MMC v1.4 */
74 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
75 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
76 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
77 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
78 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
79 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
80 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
81 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
82 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
83 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
84 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
85 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
86 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
89 case 2: /* MMC v2.0 - v2.2 */
90 case 3: /* MMC v3.1 - v3.3 */
92 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
93 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
94 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
95 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
96 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
97 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
98 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
99 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
100 card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
101 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
102 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
103 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
107 pr_err("%s: card has unknown MMCA version %d\n",
108 mmc_hostname(card->host), card->csd.mmca_vsn);
115 static void mmc_set_erase_size(struct mmc_card *card)
117 if (card->ext_csd.erase_group_def & 1)
118 card->erase_size = card->ext_csd.hc_erase_size;
120 card->erase_size = card->csd.erase_size;
122 mmc_init_erase(card);
126 * Given a 128-bit response, decode to our card CSD structure.
128 static int mmc_decode_csd(struct mmc_card *card)
130 struct mmc_csd *csd = &card->csd;
131 unsigned int e, m, a, b;
132 u32 *resp = card->raw_csd;
135 * We only understand CSD structure v1.1 and v1.2.
136 * v1.2 has extra information in bits 15, 11 and 10.
137 * We also support eMMC v4.4 & v4.41.
139 csd->structure = UNSTUFF_BITS(resp, 126, 2);
140 if (csd->structure == 0) {
141 pr_err("%s: unrecognised CSD structure version %d\n",
142 mmc_hostname(card->host), csd->structure);
146 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
147 m = UNSTUFF_BITS(resp, 115, 4);
148 e = UNSTUFF_BITS(resp, 112, 3);
149 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
150 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
152 m = UNSTUFF_BITS(resp, 99, 4);
153 e = UNSTUFF_BITS(resp, 96, 3);
154 csd->max_dtr = tran_exp[e] * tran_mant[m];
155 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
157 e = UNSTUFF_BITS(resp, 47, 3);
158 m = UNSTUFF_BITS(resp, 62, 12);
159 csd->capacity = (1 + m) << (e + 2);
161 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
162 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
163 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
164 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
165 csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
166 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
167 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
168 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
170 if (csd->write_blkbits >= 9) {
171 a = UNSTUFF_BITS(resp, 42, 5);
172 b = UNSTUFF_BITS(resp, 37, 5);
173 csd->erase_size = (a + 1) * (b + 1);
174 csd->erase_size <<= csd->write_blkbits - 9;
183 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
189 BUG_ON(!new_ext_csd);
193 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
197 * As the ext_csd is so large and mostly unused, we don't store the
198 * raw block in mmc_card.
200 ext_csd = kmalloc(512, GFP_KERNEL);
202 pr_err("%s: could not allocate a buffer to "
203 "receive the ext_csd.\n", mmc_hostname(card->host));
207 err = mmc_send_ext_csd(card, ext_csd);
212 /* If the host or the card can't do the switch,
213 * fail more gracefully. */
220 * High capacity cards should have this "magic" size
221 * stored in their CSD.
223 if (card->csd.capacity == (4096 * 512)) {
224 pr_err("%s: unable to read EXT_CSD "
225 "on a possible high capacity card. "
226 "Card will be ignored.\n",
227 mmc_hostname(card->host));
229 pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
230 mmc_hostname(card->host));
234 *new_ext_csd = ext_csd;
239 static void mmc_select_card_type(struct mmc_card *card)
241 struct mmc_host *host = card->host;
242 u8 card_type = card->ext_csd.raw_card_type;
243 u32 caps = host->caps, caps2 = host->caps2;
244 unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
245 unsigned int avail_type = 0;
247 if (caps & MMC_CAP_MMC_HIGHSPEED &&
248 card_type & EXT_CSD_CARD_TYPE_HS_26) {
249 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
250 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
253 if (caps & MMC_CAP_MMC_HIGHSPEED &&
254 card_type & EXT_CSD_CARD_TYPE_HS_52) {
255 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
256 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
259 if (caps & MMC_CAP_1_8V_DDR &&
260 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
261 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
262 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
265 if (caps & MMC_CAP_1_2V_DDR &&
266 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
267 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
268 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
271 if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
272 card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
273 hs200_max_dtr = MMC_HS200_MAX_DTR;
274 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
277 if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
278 card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
279 hs200_max_dtr = MMC_HS200_MAX_DTR;
280 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
283 if (caps2 & MMC_CAP2_HS400_1_8V &&
284 card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
285 hs200_max_dtr = MMC_HS200_MAX_DTR;
286 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
289 if (caps2 & MMC_CAP2_HS400_1_2V &&
290 card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
291 hs200_max_dtr = MMC_HS200_MAX_DTR;
292 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
295 card->ext_csd.hs_max_dtr = hs_max_dtr;
296 card->ext_csd.hs200_max_dtr = hs200_max_dtr;
297 card->mmc_avail_type = avail_type;
300 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
302 u8 hc_erase_grp_sz, hc_wp_grp_sz;
305 * Disable these attributes by default
307 card->ext_csd.enhanced_area_offset = -EINVAL;
308 card->ext_csd.enhanced_area_size = -EINVAL;
311 * Enhanced area feature support -- check whether the eMMC
312 * card has the Enhanced area enabled. If so, export enhanced
313 * area offset and size to user by adding sysfs interface.
315 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
316 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
317 if (card->ext_csd.partition_setting_completed) {
319 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
321 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
324 * calculate the enhanced data area offset, in bytes
326 card->ext_csd.enhanced_area_offset =
327 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
328 (ext_csd[137] << 8) + ext_csd[136];
329 if (mmc_card_blockaddr(card))
330 card->ext_csd.enhanced_area_offset <<= 9;
332 * calculate the enhanced data area size, in kilobytes
334 card->ext_csd.enhanced_area_size =
335 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
337 card->ext_csd.enhanced_area_size *=
338 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
339 card->ext_csd.enhanced_area_size <<= 9;
341 pr_warn("%s: defines enhanced area without partition setting complete\n",
342 mmc_hostname(card->host));
347 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
350 u8 hc_erase_grp_sz, hc_wp_grp_sz;
351 unsigned int part_size;
354 * General purpose partition feature support --
355 * If ext_csd has the size of general purpose partitions,
356 * set size, part_cfg, partition name in mmc_part.
358 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
359 EXT_CSD_PART_SUPPORT_PART_EN) {
361 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
363 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
365 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
366 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
367 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
368 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
370 if (card->ext_csd.partition_setting_completed == 0) {
371 pr_warn("%s: has partition size defined without partition complete\n",
372 mmc_hostname(card->host));
376 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
378 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
380 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
381 part_size *= (size_t)(hc_erase_grp_sz *
383 mmc_part_add(card, part_size << 19,
384 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
386 MMC_BLK_DATA_AREA_GP);
392 * Decode extended CSD.
394 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
397 unsigned int part_size;
404 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
405 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
406 if (card->csd.structure == 3) {
407 if (card->ext_csd.raw_ext_csd_structure > 2) {
408 pr_err("%s: unrecognised EXT_CSD structure "
409 "version %d\n", mmc_hostname(card->host),
410 card->ext_csd.raw_ext_csd_structure);
417 * The EXT_CSD format is meant to be forward compatible. As long
418 * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
419 * are authorized, see JEDEC JESD84-B50 section B.8.
421 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
423 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
424 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
425 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
426 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
427 if (card->ext_csd.rev >= 2) {
428 card->ext_csd.sectors =
429 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
430 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
431 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
432 ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
434 /* Cards with density > 2GiB are sector addressed */
435 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
436 mmc_card_set_blockaddr(card);
439 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
440 mmc_select_card_type(card);
442 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
443 card->ext_csd.raw_erase_timeout_mult =
444 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
445 card->ext_csd.raw_hc_erase_grp_size =
446 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
447 if (card->ext_csd.rev >= 3) {
448 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
449 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
451 /* EXT_CSD value is in units of 10ms, but we store in ms */
452 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
454 /* Sleep / awake timeout in 100ns units */
455 if (sa_shift > 0 && sa_shift <= 0x17)
456 card->ext_csd.sa_timeout =
457 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
458 card->ext_csd.erase_group_def =
459 ext_csd[EXT_CSD_ERASE_GROUP_DEF];
460 card->ext_csd.hc_erase_timeout = 300 *
461 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
462 card->ext_csd.hc_erase_size =
463 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
465 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
468 * There are two boot regions of equal size, defined in
471 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
472 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
473 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
474 mmc_part_add(card, part_size,
475 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
477 MMC_BLK_DATA_AREA_BOOT);
482 card->ext_csd.raw_hc_erase_gap_size =
483 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
484 card->ext_csd.raw_sec_trim_mult =
485 ext_csd[EXT_CSD_SEC_TRIM_MULT];
486 card->ext_csd.raw_sec_erase_mult =
487 ext_csd[EXT_CSD_SEC_ERASE_MULT];
488 card->ext_csd.raw_sec_feature_support =
489 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
490 card->ext_csd.raw_trim_mult =
491 ext_csd[EXT_CSD_TRIM_MULT];
492 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
493 if (card->ext_csd.rev >= 4) {
494 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
495 EXT_CSD_PART_SETTING_COMPLETED)
496 card->ext_csd.partition_setting_completed = 1;
498 card->ext_csd.partition_setting_completed = 0;
500 mmc_manage_enhanced_area(card, ext_csd);
502 mmc_manage_gp_partitions(card, ext_csd);
504 card->ext_csd.sec_trim_mult =
505 ext_csd[EXT_CSD_SEC_TRIM_MULT];
506 card->ext_csd.sec_erase_mult =
507 ext_csd[EXT_CSD_SEC_ERASE_MULT];
508 card->ext_csd.sec_feature_support =
509 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
510 card->ext_csd.trim_timeout = 300 *
511 ext_csd[EXT_CSD_TRIM_MULT];
514 * Note that the call to mmc_part_add above defaults to read
515 * only. If this default assumption is changed, the call must
516 * take into account the value of boot_locked below.
518 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
519 card->ext_csd.boot_ro_lockable = true;
521 /* Save power class values */
522 card->ext_csd.raw_pwr_cl_52_195 =
523 ext_csd[EXT_CSD_PWR_CL_52_195];
524 card->ext_csd.raw_pwr_cl_26_195 =
525 ext_csd[EXT_CSD_PWR_CL_26_195];
526 card->ext_csd.raw_pwr_cl_52_360 =
527 ext_csd[EXT_CSD_PWR_CL_52_360];
528 card->ext_csd.raw_pwr_cl_26_360 =
529 ext_csd[EXT_CSD_PWR_CL_26_360];
530 card->ext_csd.raw_pwr_cl_200_195 =
531 ext_csd[EXT_CSD_PWR_CL_200_195];
532 card->ext_csd.raw_pwr_cl_200_360 =
533 ext_csd[EXT_CSD_PWR_CL_200_360];
534 card->ext_csd.raw_pwr_cl_ddr_52_195 =
535 ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
536 card->ext_csd.raw_pwr_cl_ddr_52_360 =
537 ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
538 card->ext_csd.raw_pwr_cl_ddr_200_360 =
539 ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
542 if (card->ext_csd.rev >= 5) {
543 /* Adjust production date as per JEDEC JESD84-B451 */
544 if (card->cid.year < 2010)
545 card->cid.year += 16;
547 /* check whether the eMMC card supports BKOPS */
548 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
549 card->ext_csd.bkops = 1;
550 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
551 card->ext_csd.raw_bkops_status =
552 ext_csd[EXT_CSD_BKOPS_STATUS];
553 if (!card->ext_csd.bkops_en)
554 pr_info("%s: BKOPS_EN bit is not set\n",
555 mmc_hostname(card->host));
558 /* check whether the eMMC card supports HPI */
559 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
560 card->ext_csd.hpi = 1;
561 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
562 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
564 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
566 * Indicate the maximum timeout to close
567 * a command interrupted by HPI
569 card->ext_csd.out_of_int_time =
570 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
573 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
574 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
577 * RPMB regions are defined in multiples of 128K.
579 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
580 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
581 mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
582 EXT_CSD_PART_CONFIG_ACC_RPMB,
584 MMC_BLK_DATA_AREA_RPMB);
588 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
589 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
590 card->erased_byte = 0xFF;
592 card->erased_byte = 0x0;
594 /* eMMC v4.5 or later */
595 if (card->ext_csd.rev >= 6) {
596 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
598 card->ext_csd.generic_cmd6_time = 10 *
599 ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
600 card->ext_csd.power_off_longtime = 10 *
601 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
603 card->ext_csd.cache_size =
604 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
605 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
606 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
607 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
609 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
610 card->ext_csd.data_sector_size = 4096;
612 card->ext_csd.data_sector_size = 512;
614 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
615 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
616 card->ext_csd.data_tag_unit_size =
617 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
618 (card->ext_csd.data_sector_size);
620 card->ext_csd.data_tag_unit_size = 0;
623 card->ext_csd.max_packed_writes =
624 ext_csd[EXT_CSD_MAX_PACKED_WRITES];
625 card->ext_csd.max_packed_reads =
626 ext_csd[EXT_CSD_MAX_PACKED_READS];
628 card->ext_csd.data_sector_size = 512;
631 /* eMMC v5 or later */
632 if (card->ext_csd.rev >= 7) {
633 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
635 card->ext_csd.ffu_capable =
636 (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
637 !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
643 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
648 if (bus_width == MMC_BUS_WIDTH_1)
651 err = mmc_get_ext_csd(card, &bw_ext_csd);
653 if (err || bw_ext_csd == NULL) {
658 /* only compare read only fields */
659 err = !((card->ext_csd.raw_partition_support ==
660 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
661 (card->ext_csd.raw_erased_mem_count ==
662 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
663 (card->ext_csd.rev ==
664 bw_ext_csd[EXT_CSD_REV]) &&
665 (card->ext_csd.raw_ext_csd_structure ==
666 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
667 (card->ext_csd.raw_card_type ==
668 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
669 (card->ext_csd.raw_s_a_timeout ==
670 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
671 (card->ext_csd.raw_hc_erase_gap_size ==
672 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
673 (card->ext_csd.raw_erase_timeout_mult ==
674 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
675 (card->ext_csd.raw_hc_erase_grp_size ==
676 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
677 (card->ext_csd.raw_sec_trim_mult ==
678 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
679 (card->ext_csd.raw_sec_erase_mult ==
680 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
681 (card->ext_csd.raw_sec_feature_support ==
682 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
683 (card->ext_csd.raw_trim_mult ==
684 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
685 (card->ext_csd.raw_sectors[0] ==
686 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
687 (card->ext_csd.raw_sectors[1] ==
688 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
689 (card->ext_csd.raw_sectors[2] ==
690 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
691 (card->ext_csd.raw_sectors[3] ==
692 bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
693 (card->ext_csd.raw_pwr_cl_52_195 ==
694 bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
695 (card->ext_csd.raw_pwr_cl_26_195 ==
696 bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
697 (card->ext_csd.raw_pwr_cl_52_360 ==
698 bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
699 (card->ext_csd.raw_pwr_cl_26_360 ==
700 bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
701 (card->ext_csd.raw_pwr_cl_200_195 ==
702 bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
703 (card->ext_csd.raw_pwr_cl_200_360 ==
704 bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
705 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
706 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
707 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
708 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
709 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
710 bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
720 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
721 card->raw_cid[2], card->raw_cid[3]);
722 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
723 card->raw_csd[2], card->raw_csd[3]);
724 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
725 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
726 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
727 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
728 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
729 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
730 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
731 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
732 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
733 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
734 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
735 card->ext_csd.enhanced_area_offset);
736 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
737 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
738 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
740 static ssize_t mmc_fwrev_show(struct device *dev,
741 struct device_attribute *attr,
744 struct mmc_card *card = mmc_dev_to_card(dev);
746 if (card->ext_csd.rev < 7) {
747 return sprintf(buf, "0x%x\n", card->cid.fwrev);
749 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
750 card->ext_csd.fwrev);
754 static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
756 static struct attribute *mmc_std_attrs[] = {
760 &dev_attr_erase_size.attr,
761 &dev_attr_preferred_erase_size.attr,
762 &dev_attr_fwrev.attr,
763 &dev_attr_ffu_capable.attr,
764 &dev_attr_hwrev.attr,
765 &dev_attr_manfid.attr,
767 &dev_attr_oemid.attr,
769 &dev_attr_serial.attr,
770 &dev_attr_enhanced_area_offset.attr,
771 &dev_attr_enhanced_area_size.attr,
772 &dev_attr_raw_rpmb_size_mult.attr,
773 &dev_attr_rel_sectors.attr,
776 ATTRIBUTE_GROUPS(mmc_std);
778 static struct device_type mmc_type = {
779 .groups = mmc_std_groups,
783 * Select the PowerClass for the current bus width
784 * If power class is defined for 4/8 bit bus in the
785 * extended CSD register, select it by executing the
786 * mmc_switch command.
788 static int __mmc_select_powerclass(struct mmc_card *card,
789 unsigned int bus_width)
791 struct mmc_host *host = card->host;
792 struct mmc_ext_csd *ext_csd = &card->ext_csd;
793 unsigned int pwrclass_val = 0;
796 switch (1 << host->ios.vdd) {
797 case MMC_VDD_165_195:
798 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
799 pwrclass_val = ext_csd->raw_pwr_cl_26_195;
800 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
801 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
802 ext_csd->raw_pwr_cl_52_195 :
803 ext_csd->raw_pwr_cl_ddr_52_195;
804 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
805 pwrclass_val = ext_csd->raw_pwr_cl_200_195;
816 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
817 pwrclass_val = ext_csd->raw_pwr_cl_26_360;
818 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
819 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
820 ext_csd->raw_pwr_cl_52_360 :
821 ext_csd->raw_pwr_cl_ddr_52_360;
822 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
823 pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
824 ext_csd->raw_pwr_cl_ddr_200_360 :
825 ext_csd->raw_pwr_cl_200_360;
828 pr_warn("%s: Voltage range not supported for power class\n",
833 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
834 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
835 EXT_CSD_PWR_CL_8BIT_SHIFT;
837 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
838 EXT_CSD_PWR_CL_4BIT_SHIFT;
840 /* If the power class is different from the default value */
841 if (pwrclass_val > 0) {
842 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
845 card->ext_csd.generic_cmd6_time);
851 static int mmc_select_powerclass(struct mmc_card *card)
853 struct mmc_host *host = card->host;
854 u32 bus_width, ext_csd_bits;
857 /* Power class selection is supported for versions >= 4.0 */
858 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
861 bus_width = host->ios.bus_width;
862 /* Power class values are defined only for 4/8 bit bus */
863 if (bus_width == MMC_BUS_WIDTH_1)
866 ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
868 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
869 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
871 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
872 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
874 err = __mmc_select_powerclass(card, ext_csd_bits);
876 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
877 mmc_hostname(host), 1 << bus_width, ddr);
883 * Set the bus speed for the selected speed mode.
885 static void mmc_set_bus_speed(struct mmc_card *card)
887 unsigned int max_dtr = (unsigned int)-1;
889 if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
890 max_dtr > card->ext_csd.hs200_max_dtr)
891 max_dtr = card->ext_csd.hs200_max_dtr;
892 else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
893 max_dtr = card->ext_csd.hs_max_dtr;
894 else if (max_dtr > card->csd.max_dtr)
895 max_dtr = card->csd.max_dtr;
897 mmc_set_clock(card->host, max_dtr);
901 * Select the bus width amoung 4-bit and 8-bit(SDR).
902 * If the bus width is changed successfully, return the selected width value.
903 * Zero is returned instead of error value if the wide width is not supported.
905 static int mmc_select_bus_width(struct mmc_card *card)
907 static unsigned ext_csd_bits[] = {
911 static unsigned bus_widths[] = {
915 struct mmc_host *host = card->host;
916 unsigned idx, bus_width = 0;
919 if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) &&
920 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
923 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
926 * Unlike SD, MMC cards dont have a configuration register to notify
927 * supported bus width. So bus test command should be run to identify
928 * the supported bus width or compare the ext csd values of current
929 * bus width and ext csd values of 1 bit mode read earlier.
931 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
933 * Host is capable of 8bit transfer, then switch
934 * the device to work in 8bit transfer mode. If the
935 * mmc switch command returns error then switch to
936 * 4bit transfer mode. On success set the corresponding
937 * bus width on the host.
939 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
942 card->ext_csd.generic_cmd6_time);
946 bus_width = bus_widths[idx];
947 mmc_set_bus_width(host, bus_width);
950 * If controller can't handle bus width test,
951 * compare ext_csd previously read in 1 bit mode
952 * against ext_csd at new bus width
954 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
955 err = mmc_compare_ext_csds(card, bus_width);
957 err = mmc_bus_test(card, bus_width);
963 pr_warn("%s: switch to bus width %d failed\n",
964 mmc_hostname(host), ext_csd_bits[idx]);
972 * Switch to the high-speed mode
974 static int mmc_select_hs(struct mmc_card *card)
978 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
979 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
980 card->ext_csd.generic_cmd6_time,
983 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
989 * Activate wide bus and DDR if supported.
991 static int mmc_select_hs_ddr(struct mmc_card *card)
993 struct mmc_host *host = card->host;
994 u32 bus_width, ext_csd_bits;
997 if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
1000 bus_width = host->ios.bus_width;
1001 if (bus_width == MMC_BUS_WIDTH_1)
1004 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1005 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1007 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1010 card->ext_csd.generic_cmd6_time);
1012 pr_err("%s: switch to bus width %d ddr failed\n",
1013 mmc_hostname(host), 1 << bus_width);
1018 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1021 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1023 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1024 * in the JEDEC spec for DDR.
1026 * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1027 * host controller can support this, like some of the SDHCI
1028 * controller which connect to an eMMC device. Some of these
1029 * host controller still needs to use 1.8v vccq for supporting
1032 * So the sequence will be:
1033 * if (host and device can both support 1.2v IO)
1035 * else if (host and device can both support 1.8v IO)
1037 * so if host and device can only support 3.3v IO, this is the
1040 * WARNING: eMMC rules are NOT the same as SD DDR
1043 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
1044 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1046 if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1047 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1049 /* make sure vccq is 3.3v after switching disaster */
1051 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1054 mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1059 static int mmc_select_hs400(struct mmc_card *card)
1061 struct mmc_host *host = card->host;
1065 * HS400 mode requires 8-bit bus width
1067 if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1068 host->ios.bus_width == MMC_BUS_WIDTH_8))
1072 * Before switching to dual data rate operation for HS400,
1073 * it is required to convert from HS200 mode to HS mode.
1075 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1076 mmc_set_bus_speed(card);
1078 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1079 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1080 card->ext_csd.generic_cmd6_time,
1083 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1084 mmc_hostname(host), err);
1088 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1090 EXT_CSD_DDR_BUS_WIDTH_8,
1091 card->ext_csd.generic_cmd6_time);
1093 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1094 mmc_hostname(host), err);
1098 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1099 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
1100 card->ext_csd.generic_cmd6_time,
1103 pr_err("%s: switch to hs400 failed, err:%d\n",
1104 mmc_hostname(host), err);
1108 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1109 mmc_set_bus_speed(card);
1115 * For device supporting HS200 mode, the following sequence
1116 * should be done before executing the tuning process.
1117 * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1118 * 2. switch to HS200 mode
1119 * 3. set the clock to > 52Mhz and <=200MHz
1121 static int mmc_select_hs200(struct mmc_card *card)
1123 struct mmc_host *host = card->host;
1126 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1127 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1129 if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1130 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1132 /* If fails try again during next card power cycle */
1137 * Set the bus width(4 or 8) with host's support and
1138 * switch to HS200 mode if bus width is set successfully.
1140 err = mmc_select_bus_width(card);
1141 if (!IS_ERR_VALUE(err)) {
1142 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1143 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
1144 card->ext_csd.generic_cmd6_time,
1147 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1154 * Activate High Speed or HS200 mode if supported.
1156 static int mmc_select_timing(struct mmc_card *card)
1160 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
1163 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1164 err = mmc_select_hs200(card);
1165 else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1166 err = mmc_select_hs(card);
1168 if (err && err != -EBADMSG)
1172 pr_warn("%s: switch to %s failed\n",
1173 mmc_card_hs(card) ? "high-speed" :
1174 (mmc_card_hs200(card) ? "hs200" : ""),
1175 mmc_hostname(card->host));
1181 * Set the bus speed to the selected bus timing.
1182 * If timing is not selected, backward compatible is the default.
1184 mmc_set_bus_speed(card);
1188 const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = {
1189 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
1190 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
1191 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
1192 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
1193 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
1194 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
1195 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
1196 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
1198 EXPORT_SYMBOL(tuning_blk_pattern_4bit);
1200 const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = {
1201 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
1202 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
1203 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
1204 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
1205 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
1206 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
1207 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
1208 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
1209 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
1210 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
1211 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
1212 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
1213 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
1214 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
1215 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
1216 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
1218 EXPORT_SYMBOL(tuning_blk_pattern_8bit);
1221 * Execute tuning sequence to seek the proper bus operating
1222 * conditions for HS200 and HS400, which sends CMD21 to the device.
1224 static int mmc_hs200_tuning(struct mmc_card *card)
1226 struct mmc_host *host = card->host;
1230 * Timing should be adjusted to the HS400 target
1231 * operation frequency for tuning process
1233 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1234 host->ios.bus_width == MMC_BUS_WIDTH_8)
1235 if (host->ops->prepare_hs400_tuning)
1236 host->ops->prepare_hs400_tuning(host, &host->ios);
1238 if (host->ops->execute_tuning) {
1239 mmc_host_clk_hold(host);
1240 err = host->ops->execute_tuning(host,
1241 MMC_SEND_TUNING_BLOCK_HS200);
1242 mmc_host_clk_release(host);
1245 pr_err("%s: tuning execution failed\n",
1246 mmc_hostname(host));
1253 * Handle the detection and initialisation of a card.
1255 * In the case of a resume, "oldcard" will contain the card
1256 * we're trying to reinitialise.
1258 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1259 struct mmc_card *oldcard)
1261 struct mmc_card *card;
1268 WARN_ON(!host->claimed);
1270 /* Set correct bus mode for MMC before attempting init */
1271 if (!mmc_host_is_spi(host))
1272 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1275 * Since we're changing the OCR value, we seem to
1276 * need to tell some cards to go back to the idle
1277 * state. We wait 1ms to give cards time to
1279 * mmc_go_idle is needed for eMMC that are asleep
1283 /* The extra bit indicates that we support high capacity */
1284 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1289 * For SPI, enable CRC as appropriate.
1291 if (mmc_host_is_spi(host)) {
1292 err = mmc_spi_set_crc(host, use_spi_crc);
1298 * Fetch CID from card.
1300 if (mmc_host_is_spi(host))
1301 err = mmc_send_cid(host, cid);
1303 err = mmc_all_send_cid(host, cid);
1308 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1316 * Allocate card structure.
1318 card = mmc_alloc_card(host, &mmc_type);
1320 err = PTR_ERR(card);
1325 card->type = MMC_TYPE_MMC;
1327 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1331 * For native busses: set card RCA and quit open drain mode.
1333 if (!mmc_host_is_spi(host)) {
1334 err = mmc_set_relative_addr(card);
1338 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1343 * Fetch CSD from card.
1345 err = mmc_send_csd(card, card->raw_csd);
1349 err = mmc_decode_csd(card);
1352 err = mmc_decode_cid(card);
1358 * handling only for cards supporting DSR and hosts requesting
1361 if (card->csd.dsr_imp && host->dsr_req)
1365 * Select card, as all following commands rely on that.
1367 if (!mmc_host_is_spi(host)) {
1368 err = mmc_select_card(card);
1375 * Fetch and process extended CSD.
1378 err = mmc_get_ext_csd(card, &ext_csd);
1381 err = mmc_read_ext_csd(card, ext_csd);
1385 /* If doing byte addressing, check if required to do sector
1386 * addressing. Handle the case of <2GB cards needing sector
1387 * addressing. See section 8.1 JEDEC Standard JED84-A441;
1388 * ocr register has bit 30 set for sector addressing.
1390 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
1391 mmc_card_set_blockaddr(card);
1393 /* Erase size depends on CSD and Extended CSD */
1394 mmc_set_erase_size(card);
1398 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
1399 * bit. This bit will be lost every time after a reset or power off.
1401 if (card->ext_csd.partition_setting_completed ||
1402 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
1403 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1404 EXT_CSD_ERASE_GROUP_DEF, 1,
1405 card->ext_csd.generic_cmd6_time);
1407 if (err && err != -EBADMSG)
1413 * Just disable enhanced area off & sz
1414 * will try to enable ERASE_GROUP_DEF
1415 * during next time reinit
1417 card->ext_csd.enhanced_area_offset = -EINVAL;
1418 card->ext_csd.enhanced_area_size = -EINVAL;
1420 card->ext_csd.erase_group_def = 1;
1422 * enable ERASE_GRP_DEF successfully.
1423 * This will affect the erase size, so
1424 * here need to reset erase size
1426 mmc_set_erase_size(card);
1431 * Ensure eMMC user default partition is enabled
1433 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1434 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1435 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1436 card->ext_csd.part_config,
1437 card->ext_csd.part_time);
1438 if (err && err != -EBADMSG)
1443 * Enable power_off_notification byte in the ext_csd register
1445 if (card->ext_csd.rev >= 6) {
1446 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1447 EXT_CSD_POWER_OFF_NOTIFICATION,
1449 card->ext_csd.generic_cmd6_time);
1450 if (err && err != -EBADMSG)
1454 * The err can be -EBADMSG or 0,
1455 * so check for success and update the flag
1458 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1462 * Select timing interface
1464 err = mmc_select_timing(card);
1468 if (mmc_card_hs200(card)) {
1469 err = mmc_hs200_tuning(card);
1473 err = mmc_select_hs400(card);
1476 } else if (mmc_card_hs(card)) {
1477 /* Select the desired bus width optionally */
1478 err = mmc_select_bus_width(card);
1479 if (!IS_ERR_VALUE(err)) {
1480 err = mmc_select_hs_ddr(card);
1487 * Choose the power class with selected bus interface
1489 mmc_select_powerclass(card);
1492 * Enable HPI feature (if supported)
1494 if (card->ext_csd.hpi) {
1495 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1496 EXT_CSD_HPI_MGMT, 1,
1497 card->ext_csd.generic_cmd6_time);
1498 if (err && err != -EBADMSG)
1501 pr_warn("%s: Enabling HPI failed\n",
1502 mmc_hostname(card->host));
1505 card->ext_csd.hpi_en = 1;
1509 * If cache size is higher than 0, this indicates
1510 * the existence of cache and it can be turned on.
1512 if (card->ext_csd.cache_size > 0) {
1513 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1514 EXT_CSD_CACHE_CTRL, 1,
1515 card->ext_csd.generic_cmd6_time);
1516 if (err && err != -EBADMSG)
1520 * Only if no error, cache is turned on successfully.
1523 pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1524 mmc_hostname(card->host), err);
1525 card->ext_csd.cache_ctrl = 0;
1528 card->ext_csd.cache_ctrl = 1;
1533 * The mandatory minimum values are defined for packed command.
1536 if (card->ext_csd.max_packed_writes >= 3 &&
1537 card->ext_csd.max_packed_reads >= 5 &&
1538 host->caps2 & MMC_CAP2_PACKED_CMD) {
1539 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1540 EXT_CSD_EXP_EVENTS_CTRL,
1541 EXT_CSD_PACKED_EVENT_EN,
1542 card->ext_csd.generic_cmd6_time);
1543 if (err && err != -EBADMSG)
1546 pr_warn("%s: Enabling packed event failed\n",
1547 mmc_hostname(card->host));
1548 card->ext_csd.packed_event_en = 0;
1551 card->ext_csd.packed_event_en = 1;
1563 mmc_remove_card(card);
1570 static int mmc_can_sleep(struct mmc_card *card)
1572 return (card && card->ext_csd.rev >= 3);
1575 static int mmc_sleep(struct mmc_host *host)
1577 struct mmc_command cmd = {0};
1578 struct mmc_card *card = host->card;
1579 unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1582 err = mmc_deselect_cards(host);
1586 cmd.opcode = MMC_SLEEP_AWAKE;
1587 cmd.arg = card->rca << 16;
1591 * If the max_busy_timeout of the host is specified, validate it against
1592 * the sleep cmd timeout. A failure means we need to prevent the host
1593 * from doing hw busy detection, which is done by converting to a R1
1594 * response instead of a R1B.
1596 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1597 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1599 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1600 cmd.busy_timeout = timeout_ms;
1603 err = mmc_wait_for_cmd(host, &cmd, 0);
1608 * If the host does not wait while the card signals busy, then we will
1609 * will have to wait the sleep/awake timeout. Note, we cannot use the
1610 * SEND_STATUS command to poll the status because that command (and most
1611 * others) is invalid while the card sleeps.
1613 if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1614 mmc_delay(timeout_ms);
1619 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1622 mmc_card_mmc(card) &&
1623 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1626 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1628 unsigned int timeout = card->ext_csd.generic_cmd6_time;
1631 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1632 if (notify_type == EXT_CSD_POWER_OFF_LONG)
1633 timeout = card->ext_csd.power_off_longtime;
1635 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1636 EXT_CSD_POWER_OFF_NOTIFICATION,
1637 notify_type, timeout, true, false, false);
1639 pr_err("%s: Power Off Notification timed out, %u\n",
1640 mmc_hostname(card->host), timeout);
1642 /* Disable the power off notification after the switch operation. */
1643 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1649 * Host is being removed. Free up the current card.
1651 static void mmc_remove(struct mmc_host *host)
1654 BUG_ON(!host->card);
1656 mmc_remove_card(host->card);
1661 * Card detection - card is alive.
1663 static int mmc_alive(struct mmc_host *host)
1665 return mmc_send_status(host->card, NULL);
1669 * Card detection callback from host.
1671 static void mmc_detect(struct mmc_host *host)
1676 BUG_ON(!host->card);
1678 mmc_get_card(host->card);
1681 * Just check if our card has been removed.
1683 err = _mmc_detect_card_removed(host);
1685 mmc_put_card(host->card);
1690 mmc_claim_host(host);
1691 mmc_detach_bus(host);
1692 mmc_power_off(host);
1693 mmc_release_host(host);
1697 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
1700 unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
1701 EXT_CSD_POWER_OFF_LONG;
1704 BUG_ON(!host->card);
1706 mmc_claim_host(host);
1708 if (mmc_card_suspended(host->card))
1711 if (mmc_card_doing_bkops(host->card)) {
1712 err = mmc_stop_bkops(host->card);
1717 err = mmc_flush_cache(host->card);
1721 if (mmc_can_poweroff_notify(host->card) &&
1722 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
1723 err = mmc_poweroff_notify(host->card, notify_type);
1724 else if (mmc_can_sleep(host->card))
1725 err = mmc_sleep(host);
1726 else if (!mmc_host_is_spi(host))
1727 err = mmc_deselect_cards(host);
1730 mmc_power_off(host);
1731 mmc_card_set_suspended(host->card);
1734 mmc_release_host(host);
1741 static int mmc_suspend(struct mmc_host *host)
1745 err = _mmc_suspend(host, true);
1747 pm_runtime_disable(&host->card->dev);
1748 pm_runtime_set_suspended(&host->card->dev);
1755 * This function tries to determine if the same card is still present
1756 * and, if so, restore all state to it.
1758 static int _mmc_resume(struct mmc_host *host)
1763 BUG_ON(!host->card);
1765 mmc_claim_host(host);
1767 if (!mmc_card_suspended(host->card))
1770 mmc_power_up(host, host->card->ocr);
1771 err = mmc_init_card(host, host->card->ocr, host->card);
1772 mmc_card_clr_suspended(host->card);
1775 mmc_release_host(host);
1782 static int mmc_shutdown(struct mmc_host *host)
1787 * In a specific case for poweroff notify, we need to resume the card
1788 * before we can shutdown it properly.
1790 if (mmc_can_poweroff_notify(host->card) &&
1791 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
1792 err = _mmc_resume(host);
1795 err = _mmc_suspend(host, false);
1801 * Callback for resume.
1803 static int mmc_resume(struct mmc_host *host)
1807 if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
1808 err = _mmc_resume(host);
1809 pm_runtime_set_active(&host->card->dev);
1810 pm_runtime_mark_last_busy(&host->card->dev);
1812 pm_runtime_enable(&host->card->dev);
1818 * Callback for runtime_suspend.
1820 static int mmc_runtime_suspend(struct mmc_host *host)
1824 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
1827 err = _mmc_suspend(host, true);
1829 pr_err("%s: error %d doing aggessive suspend\n",
1830 mmc_hostname(host), err);
1836 * Callback for runtime_resume.
1838 static int mmc_runtime_resume(struct mmc_host *host)
1842 if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
1845 err = _mmc_resume(host);
1847 pr_err("%s: error %d doing aggessive resume\n",
1848 mmc_hostname(host), err);
1853 static int mmc_power_restore(struct mmc_host *host)
1857 mmc_claim_host(host);
1858 ret = mmc_init_card(host, host->card->ocr, host->card);
1859 mmc_release_host(host);
1864 static const struct mmc_bus_ops mmc_ops = {
1865 .remove = mmc_remove,
1866 .detect = mmc_detect,
1867 .suspend = mmc_suspend,
1868 .resume = mmc_resume,
1869 .runtime_suspend = mmc_runtime_suspend,
1870 .runtime_resume = mmc_runtime_resume,
1871 .power_restore = mmc_power_restore,
1873 .shutdown = mmc_shutdown,
1877 * Starting point for MMC card init.
1879 int mmc_attach_mmc(struct mmc_host *host)
1885 WARN_ON(!host->claimed);
1887 /* Set correct bus mode for MMC before attempting attach */
1888 if (!mmc_host_is_spi(host))
1889 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1891 err = mmc_send_op_cond(host, 0, &ocr);
1895 mmc_attach_bus(host, &mmc_ops);
1896 if (host->ocr_avail_mmc)
1897 host->ocr_avail = host->ocr_avail_mmc;
1900 * We need to get OCR a different way for SPI.
1902 if (mmc_host_is_spi(host)) {
1903 err = mmc_spi_read_ocr(host, 1, &ocr);
1908 rocr = mmc_select_voltage(host, ocr);
1911 * Can we support the voltage of the card?
1919 * Detect and init the card.
1921 err = mmc_init_card(host, rocr, NULL);
1925 mmc_release_host(host);
1926 err = mmc_add_card(host->card);
1927 mmc_claim_host(host);
1934 mmc_release_host(host);
1935 mmc_remove_card(host->card);
1936 mmc_claim_host(host);
1939 mmc_detach_bus(host);
1941 pr_err("%s: error %d whilst initialising MMC card\n",
1942 mmc_hostname(host), err);