2 * SD/MMC Greybus driver.
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
7 * Released under the GPLv2 only.
10 #include <linux/kernel.h>
11 #include <linux/mmc/core.h>
12 #include <linux/mmc/host.h>
13 #include <linux/mmc/mmc.h>
14 #include <linux/scatterlist.h>
15 #include <linux/workqueue.h>
20 struct gb_connection *connection;
22 struct mmc_request *mrq;
23 struct mutex lock; /* lock for this host */
26 spinlock_t xfer; /* lock to cancel ongoing transfer */
28 struct work_struct mrqwork;
35 static struct workqueue_struct *gb_sdio_mrq_workqueue;
37 #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
39 #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
40 #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
43 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
45 static inline bool single_op(struct mmc_command *cmd)
47 uint32_t opcode = cmd->opcode;
49 return opcode == MMC_WRITE_BLOCK ||
50 opcode == MMC_READ_SINGLE_BLOCK;
53 static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
58 caps = (r & GB_SDIO_CAP_NONREMOVABLE ? MMC_CAP_NONREMOVABLE : 0) |
59 (r & GB_SDIO_CAP_4_BIT_DATA ? MMC_CAP_4_BIT_DATA : 0) |
60 (r & GB_SDIO_CAP_8_BIT_DATA ? MMC_CAP_8_BIT_DATA : 0) |
61 (r & GB_SDIO_CAP_MMC_HS ? MMC_CAP_MMC_HIGHSPEED : 0) |
62 (r & GB_SDIO_CAP_SD_HS ? MMC_CAP_SD_HIGHSPEED : 0) |
63 (r & GB_SDIO_CAP_ERASE ? MMC_CAP_ERASE : 0) |
64 (r & GB_SDIO_CAP_1_2V_DDR ? MMC_CAP_1_2V_DDR : 0) |
65 (r & GB_SDIO_CAP_1_8V_DDR ? MMC_CAP_1_8V_DDR : 0) |
66 (r & GB_SDIO_CAP_POWER_OFF_CARD ? MMC_CAP_POWER_OFF_CARD : 0) |
67 (r & GB_SDIO_CAP_UHS_SDR12 ? MMC_CAP_UHS_SDR12 : 0) |
68 (r & GB_SDIO_CAP_UHS_SDR25 ? MMC_CAP_UHS_SDR25 : 0) |
69 (r & GB_SDIO_CAP_UHS_SDR50 ? MMC_CAP_UHS_SDR50 : 0) |
70 (r & GB_SDIO_CAP_UHS_SDR104 ? MMC_CAP_UHS_SDR104 : 0) |
71 (r & GB_SDIO_CAP_UHS_DDR50 ? MMC_CAP_UHS_DDR50 : 0) |
72 (r & GB_SDIO_CAP_DRIVER_TYPE_A ? MMC_CAP_DRIVER_TYPE_A : 0) |
73 (r & GB_SDIO_CAP_DRIVER_TYPE_C ? MMC_CAP_DRIVER_TYPE_C : 0) |
74 (r & GB_SDIO_CAP_DRIVER_TYPE_D ? MMC_CAP_DRIVER_TYPE_D : 0);
76 caps2 = (r & GB_SDIO_CAP_HS200_1_2V ? MMC_CAP2_HS200_1_2V_SDR : 0) |
77 #ifdef MMC_HS400_SUPPORTED
78 (r & GB_SDIO_CAP_HS400_1_2V ? MMC_CAP2_HS400_1_2V : 0) |
79 (r & GB_SDIO_CAP_HS400_1_8V ? MMC_CAP2_HS400_1_8V : 0) |
81 (r & GB_SDIO_CAP_HS200_1_8V ? MMC_CAP2_HS200_1_8V_SDR : 0);
83 host->mmc->caps = caps | MMC_CAP_NEEDS_POLL;
84 host->mmc->caps2 = caps2;
86 if (caps & MMC_CAP_NONREMOVABLE)
87 host->card_present = true;
90 static int gb_sdio_get_caps(struct gb_sdio_host *host)
92 struct gb_sdio_get_caps_response response;
93 struct mmc_host *mmc = host->mmc;
99 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
100 NULL, 0, &response, sizeof(response));
103 r = le32_to_cpu(response.caps);
105 _gb_sdio_set_host_caps(host, r);
107 /* get the max block size that could fit our payload */
108 data_max = gb_operation_get_payload_size_max(host->connection);
109 data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
110 data_max - sizeof(struct gb_sdio_transfer_response));
112 blksz = min(le16_to_cpu(response.max_blk_size), data_max);
113 blksz = max_t(u32, 512, blksz);
115 mmc->max_blk_size = rounddown_pow_of_two(blksz);
116 mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
117 host->data_max = data_max;
119 /* get ocr supported values */
120 mmc->ocr_avail = le32_to_cpu(response.ocr);
121 mmc->ocr_avail_sdio = mmc->ocr_avail;
122 mmc->ocr_avail_sd = mmc->ocr_avail;
123 mmc->ocr_avail_mmc = mmc->ocr_avail;
128 static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
130 if (event & GB_SDIO_CARD_INSERTED)
131 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
132 else if (event & GB_SDIO_CARD_REMOVED)
133 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
135 host->queued_events |= event;
138 static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
140 u8 state_changed = 0;
142 if (event & GB_SDIO_CARD_INSERTED) {
143 if (!mmc_card_is_removable(host->mmc))
145 if (host->card_present)
147 host->card_present = true;
151 if (event & GB_SDIO_CARD_REMOVED) {
152 if (!mmc_card_is_removable(host->mmc))
154 if (!(host->card_present))
156 host->card_present = false;
160 if (event & GB_SDIO_WP) {
161 host->read_only = true;
165 dev_info(mmc_dev(host->mmc), "card %s now event\n",
166 (host->card_present ? "inserted" : "removed"));
167 mmc_detect_change(host->mmc, 0);
173 static int gb_sdio_event_recv(u8 type, struct gb_operation *op)
175 struct gb_connection *connection = op->connection;
176 struct gb_sdio_host *host = connection->private;
177 struct gb_message *request;
178 struct gb_sdio_event_request *payload;
182 if (type != GB_SDIO_TYPE_EVENT) {
183 dev_err(&connection->dev,
184 "unsupported unsolicited event: %u\n", type);
188 request = op->request;
190 if (request->payload_size < sizeof(*payload)) {
191 dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
192 request->payload_size, sizeof(*payload));
196 payload = request->payload;
197 event = payload->event;
200 _gb_queue_event(host, event);
202 ret = _gb_sdio_process_events(host, event);
207 static int gb_sdio_set_ios(struct gb_sdio_host *host,
208 struct gb_sdio_set_ios_request *request)
210 return gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS,
211 request, sizeof(*request), NULL, 0);
214 static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
215 size_t len, u16 nblocks, off_t skip)
217 struct gb_sdio_transfer_request *request;
218 struct gb_sdio_transfer_response response;
219 struct scatterlist *sg = data->sg;
220 unsigned int sg_len = data->sg_len;
226 WARN_ON(len > host->data_max);
228 request = host->xfer_buffer;
229 request->data_flags = (data->flags >> 8);
230 request->data_blocks = cpu_to_le16(nblocks);
231 request->data_blksz = cpu_to_le16(data->blksz);
233 copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
238 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
239 request, len + sizeof(*request),
240 &response, sizeof(response));
244 send_blocks = le16_to_cpu(response.data_blocks);
245 send_blksz = le16_to_cpu(response.data_blksz);
247 if (len != send_blksz * send_blocks) {
248 dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
249 len, send_blksz * send_blocks);
256 static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
257 size_t len, u16 nblocks, off_t skip)
259 struct gb_sdio_transfer_request request;
260 struct gb_sdio_transfer_response *response;
261 struct scatterlist *sg = data->sg;
262 unsigned int sg_len = data->sg_len;
268 WARN_ON(len > host->data_max);
270 request.data_flags = (data->flags >> 8);
271 request.data_blocks = cpu_to_le16(nblocks);
272 request.data_blksz = cpu_to_le16(data->blksz);
274 response = host->xfer_buffer;
276 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_TRANSFER,
277 &request, sizeof(request), response, len +
282 recv_blocks = le16_to_cpu(response->data_blocks);
283 recv_blksz = le16_to_cpu(response->data_blksz);
285 if (len != recv_blksz * recv_blocks) {
286 dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
287 recv_blksz * recv_blocks, len);
291 copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
299 static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
306 if (single_op(data->mrq->cmd) && data->blocks > 1) {
311 left = data->blksz * data->blocks;
314 /* check is a stop transmission is pending */
315 spin_lock(&host->xfer);
316 if (host->xfer_stop) {
317 host->xfer_stop = false;
318 spin_unlock(&host->xfer);
322 spin_unlock(&host->xfer);
323 len = min(left, host->data_max);
324 nblocks = len / data->blksz;
325 len = nblocks * data->blksz;
327 if (data->flags & MMC_DATA_READ) {
328 ret = _gb_sdio_recv(host, data, len, nblocks, skip);
332 ret = _gb_sdio_send(host, data, len, nblocks, skip);
336 data->bytes_xfered += len;
346 static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
348 struct gb_sdio_command_request request;
349 struct gb_sdio_command_response response;
355 switch (mmc_resp_type(cmd)) {
357 cmd_flags = GB_SDIO_RSP_NONE;
360 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
363 cmd_flags = GB_SDIO_RSP_R1B;
366 cmd_flags = GB_SDIO_RSP_R2;
369 cmd_flags = GB_SDIO_RSP_R3_R4;
372 dev_err(mmc_dev(host->mmc), "cmd flag invalid %04x\n",
378 switch (mmc_cmd_type(cmd)) {
380 cmd_type = GB_SDIO_CMD_BC;
383 cmd_type = GB_SDIO_CMD_BCR;
386 cmd_type = GB_SDIO_CMD_AC;
389 cmd_type = GB_SDIO_CMD_ADTC;
392 dev_err(mmc_dev(host->mmc), "cmd type invalid %04x\n",
398 request.cmd = cmd->opcode;
399 request.cmd_flags = cmd_flags;
400 request.cmd_type = cmd_type;
401 request.cmd_arg = cpu_to_le32(cmd->arg);
403 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
404 &request, sizeof(request), &response,
409 /* no response expected */
410 if (cmd_flags & GB_SDIO_RSP_NONE)
413 /* long response expected */
414 if (cmd_flags & GB_SDIO_RSP_R2)
415 for (i = 0; i < 4; i++)
416 cmd->resp[i] = le32_to_cpu(response.resp[i]);
418 cmd->resp[0] = le32_to_cpu(response.resp[0]);
425 static void gb_sdio_mrq_work(struct work_struct *work)
427 struct gb_sdio_host *host;
428 struct mmc_request *mrq;
431 host = container_of(work, struct gb_sdio_host, mrqwork);
433 mutex_lock(&host->lock);
436 mutex_unlock(&host->lock);
437 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
442 mrq->cmd->error = -ESHUTDOWN;
447 ret = gb_sdio_command(host, mrq->sbc);
452 ret = gb_sdio_command(host, mrq->cmd);
457 ret = gb_sdio_transfer(host, host->mrq->data);
463 ret = gb_sdio_command(host, mrq->stop);
470 mutex_unlock(&host->lock);
471 mmc_request_done(host->mmc, mrq);
474 static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
476 struct gb_sdio_host *host = mmc_priv(mmc);
477 struct mmc_command *cmd = mrq->cmd;
479 /* Check if it is a cancel to ongoing transfer */
480 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
481 spin_lock(&host->xfer);
482 host->xfer_stop = true;
483 spin_unlock(&host->xfer);
486 mutex_lock(&host->lock);
492 mrq->cmd->error = -ESHUTDOWN;
495 if (!host->card_present) {
496 mrq->cmd->error = -ENOMEDIUM;
500 queue_work(gb_sdio_mrq_workqueue, &host->mrqwork);
502 mutex_unlock(&host->lock);
507 mutex_unlock(&host->lock);
508 mmc_request_done(mmc, mrq);
511 static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
513 struct gb_sdio_host *host = mmc_priv(mmc);
514 struct gb_sdio_set_ios_request request;
522 mutex_lock(&host->lock);
523 request.clock = cpu_to_le32(ios->clock);
524 request.vdd = cpu_to_le32(1 << ios->vdd);
526 request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
527 GB_SDIO_BUSMODE_OPENDRAIN :
528 GB_SDIO_BUSMODE_PUSHPULL);
530 switch (ios->power_mode) {
533 power_mode = GB_SDIO_POWER_OFF;
536 power_mode = GB_SDIO_POWER_UP;
539 power_mode = GB_SDIO_POWER_ON;
541 #ifdef MMC_POWER_UNDEFINED_SUPPORTED
542 case MMC_POWER_UNDEFINED:
543 power_mode = GB_SDIO_POWER_UNDEFINED;
547 request.power_mode = power_mode;
549 switch (ios->bus_width) {
550 case MMC_BUS_WIDTH_1:
551 bus_width = GB_SDIO_BUS_WIDTH_1;
553 case MMC_BUS_WIDTH_4:
555 bus_width = GB_SDIO_BUS_WIDTH_4;
557 case MMC_BUS_WIDTH_8:
558 bus_width = GB_SDIO_BUS_WIDTH_8;
561 request.bus_width = bus_width;
563 switch (ios->timing) {
564 case MMC_TIMING_LEGACY:
566 timing = GB_SDIO_TIMING_LEGACY;
568 case MMC_TIMING_MMC_HS:
569 timing = GB_SDIO_TIMING_MMC_HS;
571 case MMC_TIMING_SD_HS:
572 timing = GB_SDIO_TIMING_SD_HS;
574 case MMC_TIMING_UHS_SDR12:
575 timing = GB_SDIO_TIMING_UHS_SDR12;
577 case MMC_TIMING_UHS_SDR25:
578 timing = GB_SDIO_TIMING_UHS_SDR25;
580 case MMC_TIMING_UHS_SDR50:
581 timing = GB_SDIO_TIMING_UHS_SDR50;
583 case MMC_TIMING_UHS_SDR104:
584 timing = GB_SDIO_TIMING_UHS_SDR104;
586 case MMC_TIMING_UHS_DDR50:
587 timing = GB_SDIO_TIMING_UHS_DDR50;
589 #ifdef MMC_DDR52_DEFINED
590 case MMC_TIMING_MMC_DDR52:
591 timing = GB_SDIO_TIMING_MMC_DDR52;
594 case MMC_TIMING_MMC_HS200:
595 timing = GB_SDIO_TIMING_MMC_HS200;
597 #ifdef MMC_HS400_SUPPORTED
598 case MMC_TIMING_MMC_HS400:
599 timing = GB_SDIO_TIMING_MMC_HS400;
603 request.timing = timing;
605 switch (ios->signal_voltage) {
606 case MMC_SIGNAL_VOLTAGE_330:
607 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
609 case MMC_SIGNAL_VOLTAGE_180:
611 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
613 case MMC_SIGNAL_VOLTAGE_120:
614 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
617 request.signal_voltage = signal_voltage;
619 switch (ios->drv_type) {
620 case MMC_SET_DRIVER_TYPE_A:
621 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
623 case MMC_SET_DRIVER_TYPE_C:
624 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
626 case MMC_SET_DRIVER_TYPE_D:
627 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
629 case MMC_SET_DRIVER_TYPE_B:
631 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
634 request.drv_type = drv_type;
636 ret = gb_sdio_set_ios(host, &request);
640 memcpy(&mmc->ios, ios, sizeof(mmc->ios));
643 mutex_unlock(&host->lock);
646 static int gb_mmc_get_ro(struct mmc_host *mmc)
648 struct gb_sdio_host *host = mmc_priv(mmc);
650 mutex_lock(&host->lock);
653 mutex_unlock(&host->lock);
654 return host->read_only;
657 static int gb_mmc_get_cd(struct mmc_host *mmc)
659 struct gb_sdio_host *host = mmc_priv(mmc);
661 mutex_lock(&host->lock);
664 mutex_unlock(&host->lock);
665 return host->card_present;
668 static const struct mmc_host_ops gb_sdio_ops = {
669 .request = gb_mmc_request,
670 .set_ios = gb_mmc_set_ios,
671 .get_ro = gb_mmc_get_ro,
672 .get_cd = gb_mmc_get_cd,
675 static int gb_sdio_connection_init(struct gb_connection *connection)
677 struct mmc_host *mmc;
678 struct gb_sdio_host *host;
682 mmc = mmc_alloc_host(sizeof(*host), &connection->dev);
686 host = mmc_priv(mmc);
688 host->removed = true;
690 host->connection = connection;
691 connection->private = host;
693 ret = gb_sdio_get_caps(host);
697 mmc->ops = &gb_sdio_ops;
699 /* for now we just make a map 1:1 between max blocks and segments */
700 mmc->max_segs = host->mmc->max_blk_count;
701 mmc->max_seg_size = host->mmc->max_blk_size;
703 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
705 max_buffer = gb_operation_get_payload_size_max(host->connection);
706 host->xfer_buffer = kzalloc(max_buffer, GFP_KERNEL);
707 if (!host->xfer_buffer) {
711 mutex_init(&host->lock);
712 spin_lock_init(&host->xfer);
713 gb_sdio_mrq_workqueue = alloc_workqueue("gb_sdio_mrq", 0, 1);
714 INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
716 ret = mmc_add_host(mmc);
719 host->removed = false;
720 ret = _gb_sdio_process_events(host, host->queued_events);
721 host->queued_events = 0;
726 destroy_workqueue(gb_sdio_mrq_workqueue);
727 kfree(host->xfer_buffer);
730 connection->private = NULL;
736 static void gb_sdio_connection_exit(struct gb_connection *connection)
738 struct mmc_host *mmc;
739 struct gb_sdio_host *host = connection->private;
744 mutex_lock(&host->lock);
745 host->removed = true;
747 connection->private = NULL;
748 mutex_unlock(&host->lock);
750 flush_workqueue(gb_sdio_mrq_workqueue);
751 destroy_workqueue(gb_sdio_mrq_workqueue);
752 mmc_remove_host(mmc);
754 kfree(host->xfer_buffer);
757 static struct gb_protocol sdio_protocol = {
759 .id = GREYBUS_PROTOCOL_SDIO,
760 .major = GB_SDIO_VERSION_MAJOR,
761 .minor = GB_SDIO_VERSION_MINOR,
762 .connection_init = gb_sdio_connection_init,
763 .connection_exit = gb_sdio_connection_exit,
764 .request_recv = gb_sdio_event_recv,
767 gb_builtin_protocol_driver(sdio_protocol);