]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mmc: cavium: Add core MMC driver for Cavium SOCs
authorJan Glauber <jglauber@cavium.com>
Thu, 30 Mar 2017 15:31:24 +0000 (17:31 +0200)
committerUlf Hansson <ulf.hansson@linaro.org>
Mon, 24 Apr 2017 19:42:09 +0000 (21:42 +0200)
This core driver will be used by a MIPS platform driver
or by an ARM64 PCI driver. The core driver implements the
mmc_host_ops and slot probe & remove functions.
Callbacks are provided to allow platform specific interrupt
enable and bus locking.

The host controller supports:
- up to 4 slots that can contain sd-cards or eMMC chips
- 1, 4 and 8 bit bus width
- SDR and DDR
- transfers up to 52 Mhz (might be less when multiple slots are used)
- DMA read/write
- multi-block read/write (but not stream mode)

Voltage is limited to 3.3v and shared for all slots (vmmc and vmmcq).

A global lock for all MMC devices is required because the host
controller is shared.

Signed-off-by: Jan Glauber <jglauber@cavium.com>
Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Steven J. Hill <steven.hill@cavium.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
drivers/mmc/host/cavium.c [new file with mode: 0644]
drivers/mmc/host/cavium.h [new file with mode: 0644]

diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
new file mode 100644 (file)
index 0000000..910e290
--- /dev/null
@@ -0,0 +1,982 @@
+/*
+ * Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
+ * ThunderX SOCs.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012-2017 Cavium Inc.
+ * Authors:
+ *   David Daney <david.daney@cavium.com>
+ *   Peter Swain <pswain@cavium.com>
+ *   Steven J. Hill <steven.hill@cavium.com>
+ *   Jan Glauber <jglauber@cavium.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/scatterlist.h>
+#include <linux/time.h>
+
+#include "cavium.h"
+
+const char *cvm_mmc_irq_names[] = {
+       "MMC Buffer",
+       "MMC Command",
+       "MMC DMA",
+       "MMC Command Error",
+       "MMC DMA Error",
+       "MMC Switch",
+       "MMC Switch Error",
+       "MMC DMA int Fifo",
+       "MMC DMA int",
+};
+
+/*
+ * The Cavium MMC host hardware assumes that all commands have fixed
+ * command and response types.  These are correct if MMC devices are
+ * being used.  However, non-MMC devices like SD use command and
+ * response types that are unexpected by the host hardware.
+ *
+ * The command and response types can be overridden by supplying an
+ * XOR value that is applied to the type.  We calculate the XOR value
+ * from the values in this table and the flags passed from the MMC
+ * core.
+ */
+static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
+       {0, 0},         /* CMD0 */
+       {0, 3},         /* CMD1 */
+       {0, 2},         /* CMD2 */
+       {0, 1},         /* CMD3 */
+       {0, 0},         /* CMD4 */
+       {0, 1},         /* CMD5 */
+       {0, 1},         /* CMD6 */
+       {0, 1},         /* CMD7 */
+       {1, 1},         /* CMD8 */
+       {0, 2},         /* CMD9 */
+       {0, 2},         /* CMD10 */
+       {1, 1},         /* CMD11 */
+       {0, 1},         /* CMD12 */
+       {0, 1},         /* CMD13 */
+       {1, 1},         /* CMD14 */
+       {0, 0},         /* CMD15 */
+       {0, 1},         /* CMD16 */
+       {1, 1},         /* CMD17 */
+       {1, 1},         /* CMD18 */
+       {3, 1},         /* CMD19 */
+       {2, 1},         /* CMD20 */
+       {0, 0},         /* CMD21 */
+       {0, 0},         /* CMD22 */
+       {0, 1},         /* CMD23 */
+       {2, 1},         /* CMD24 */
+       {2, 1},         /* CMD25 */
+       {2, 1},         /* CMD26 */
+       {2, 1},         /* CMD27 */
+       {0, 1},         /* CMD28 */
+       {0, 1},         /* CMD29 */
+       {1, 1},         /* CMD30 */
+       {1, 1},         /* CMD31 */
+       {0, 0},         /* CMD32 */
+       {0, 0},         /* CMD33 */
+       {0, 0},         /* CMD34 */
+       {0, 1},         /* CMD35 */
+       {0, 1},         /* CMD36 */
+       {0, 0},         /* CMD37 */
+       {0, 1},         /* CMD38 */
+       {0, 4},         /* CMD39 */
+       {0, 5},         /* CMD40 */
+       {0, 0},         /* CMD41 */
+       {2, 1},         /* CMD42 */
+       {0, 0},         /* CMD43 */
+       {0, 0},         /* CMD44 */
+       {0, 0},         /* CMD45 */
+       {0, 0},         /* CMD46 */
+       {0, 0},         /* CMD47 */
+       {0, 0},         /* CMD48 */
+       {0, 0},         /* CMD49 */
+       {0, 0},         /* CMD50 */
+       {0, 0},         /* CMD51 */
+       {0, 0},         /* CMD52 */
+       {0, 0},         /* CMD53 */
+       {0, 0},         /* CMD54 */
+       {0, 1},         /* CMD55 */
+       {0xff, 0xff},   /* CMD56 */
+       {0, 0},         /* CMD57 */
+       {0, 0},         /* CMD58 */
+       {0, 0},         /* CMD59 */
+       {0, 0},         /* CMD60 */
+       {0, 0},         /* CMD61 */
+       {0, 0},         /* CMD62 */
+       {0, 0}          /* CMD63 */
+};
+
+static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
+{
+       struct cvm_mmc_cr_type *cr;
+       u8 hardware_ctype, hardware_rtype;
+       u8 desired_ctype = 0, desired_rtype = 0;
+       struct cvm_mmc_cr_mods r;
+
+       cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
+       hardware_ctype = cr->ctype;
+       hardware_rtype = cr->rtype;
+       if (cmd->opcode == MMC_GEN_CMD)
+               hardware_ctype = (cmd->arg & 1) ? 1 : 2;
+
+       switch (mmc_cmd_type(cmd)) {
+       case MMC_CMD_ADTC:
+               desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
+               break;
+       case MMC_CMD_AC:
+       case MMC_CMD_BC:
+       case MMC_CMD_BCR:
+               desired_ctype = 0;
+               break;
+       }
+
+       switch (mmc_resp_type(cmd)) {
+       case MMC_RSP_NONE:
+               desired_rtype = 0;
+               break;
+       case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
+       case MMC_RSP_R1B:
+               desired_rtype = 1;
+               break;
+       case MMC_RSP_R2:
+               desired_rtype = 2;
+               break;
+       case MMC_RSP_R3: /* MMC_RSP_R4 */
+               desired_rtype = 3;
+               break;
+       }
+       r.ctype_xor = desired_ctype ^ hardware_ctype;
+       r.rtype_xor = desired_rtype ^ hardware_rtype;
+       return r;
+}
+
+static void check_switch_errors(struct cvm_mmc_host *host)
+{
+       u64 emm_switch;
+
+       emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
+       if (emm_switch & MIO_EMM_SWITCH_ERR0)
+               dev_err(host->dev, "Switch power class error\n");
+       if (emm_switch & MIO_EMM_SWITCH_ERR1)
+               dev_err(host->dev, "Switch hs timing error\n");
+       if (emm_switch & MIO_EMM_SWITCH_ERR2)
+               dev_err(host->dev, "Switch bus width error\n");
+}
+
+static void clear_bus_id(u64 *reg)
+{
+       u64 bus_id_mask = GENMASK_ULL(61, 60);
+
+       *reg &= ~bus_id_mask;
+}
+
+static void set_bus_id(u64 *reg, int bus_id)
+{
+       clear_bus_id(reg);
+       *reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
+}
+
+static int get_bus_id(u64 reg)
+{
+       return FIELD_GET(GENMASK_ULL(61, 60), reg);
+}
+
+/*
+ * We never set the switch_exe bit since that would interfere
+ * with the commands send by the MMC core.
+ */
+static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
+{
+       int retries = 100;
+       u64 rsp_sts;
+       int bus_id;
+
+       /*
+        * Modes setting only taken from slot 0. Work around that hardware
+        * issue by first switching to slot 0.
+        */
+       bus_id = get_bus_id(emm_switch);
+       clear_bus_id(&emm_switch);
+       writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+
+       set_bus_id(&emm_switch, bus_id);
+       writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
+
+       /* wait for the switch to finish */
+       do {
+               rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+               if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
+                       break;
+               udelay(10);
+       } while (--retries);
+
+       check_switch_errors(host);
+}
+
+static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
+{
+       /* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
+       u64 match = 0x3001070fffffffffull;
+
+       return (slot->cached_switch & match) != (new_val & match);
+}
+
+static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
+{
+       u64 timeout;
+
+       if (!slot->clock)
+               return;
+
+       if (ns)
+               timeout = (slot->clock * ns) / NSEC_PER_SEC;
+       else
+               timeout = (slot->clock * 850ull) / 1000ull;
+       writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
+}
+
+static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
+{
+       struct cvm_mmc_host *host = slot->host;
+       u64 emm_switch, wdog;
+
+       emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
+       emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
+                       MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
+       set_bus_id(&emm_switch, slot->bus_id);
+
+       wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
+       do_switch(slot->host, emm_switch);
+
+       slot->cached_switch = emm_switch;
+
+       msleep(20);
+
+       writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
+}
+
+/* Switch to another slot if needed */
+static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
+{
+       struct cvm_mmc_host *host = slot->host;
+       struct cvm_mmc_slot *old_slot;
+       u64 emm_sample, emm_switch;
+
+       if (slot->bus_id == host->last_slot)
+               return;
+
+       if (host->last_slot >= 0 && host->slot[host->last_slot]) {
+               old_slot = host->slot[host->last_slot];
+               old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
+               old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
+       }
+
+       writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
+       emm_switch = slot->cached_switch;
+       set_bus_id(&emm_switch, slot->bus_id);
+       do_switch(host, emm_switch);
+
+       emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
+                    FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
+       writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
+
+       host->last_slot = slot->bus_id;
+}
+
+static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
+                   u64 dbuf)
+{
+       struct sg_mapping_iter *smi = &host->smi;
+       int data_len = req->data->blocks * req->data->blksz;
+       int bytes_xfered, shift = -1;
+       u64 dat = 0;
+
+       /* Auto inc from offset zero */
+       writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
+
+       for (bytes_xfered = 0; bytes_xfered < data_len;) {
+               if (smi->consumed >= smi->length) {
+                       if (!sg_miter_next(smi))
+                               break;
+                       smi->consumed = 0;
+               }
+
+               if (shift < 0) {
+                       dat = readq(host->base + MIO_EMM_BUF_DAT(host));
+                       shift = 56;
+               }
+
+               while (smi->consumed < smi->length && shift >= 0) {
+                       ((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
+                       bytes_xfered++;
+                       smi->consumed++;
+                       shift -= 8;
+               }
+       }
+
+       sg_miter_stop(smi);
+       req->data->bytes_xfered = bytes_xfered;
+       req->data->error = 0;
+}
+
+static void do_write(struct mmc_request *req)
+{
+       req->data->bytes_xfered = req->data->blocks * req->data->blksz;
+       req->data->error = 0;
+}
+
+static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
+                            u64 rsp_sts)
+{
+       u64 rsp_hi, rsp_lo;
+
+       if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
+               return;
+
+       rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
+
+       switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
+       case 1:
+       case 3:
+               req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
+               req->cmd->resp[1] = 0;
+               req->cmd->resp[2] = 0;
+               req->cmd->resp[3] = 0;
+               break;
+       case 2:
+               req->cmd->resp[3] = rsp_lo & 0xffffffff;
+               req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
+               rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
+               req->cmd->resp[1] = rsp_hi & 0xffffffff;
+               req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
+               break;
+       }
+}
+
+static int get_dma_dir(struct mmc_data *data)
+{
+       return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
+
+static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
+{
+       data->bytes_xfered = data->blocks * data->blksz;
+       data->error = 0;
+       return 1;
+}
+
+static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
+{
+       return finish_dma_single(host, data);
+}
+
+static int check_status(u64 rsp_sts)
+{
+       if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
+           rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
+           rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
+               return -EILSEQ;
+       if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
+           rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
+               return -ETIMEDOUT;
+       if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
+               return -EIO;
+       return 0;
+}
+
+/* Try to clean up failed DMA. */
+static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
+{
+       u64 emm_dma;
+
+       emm_dma = readq(host->base + MIO_EMM_DMA(host));
+       emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
+                  FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
+       set_bus_id(&emm_dma, get_bus_id(rsp_sts));
+       writeq(emm_dma, host->base + MIO_EMM_DMA(host));
+}
+
+irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
+{
+       struct cvm_mmc_host *host = dev_id;
+       struct mmc_request *req;
+       unsigned long flags = 0;
+       u64 emm_int, rsp_sts;
+       bool host_done;
+
+       if (host->need_irq_handler_lock)
+               spin_lock_irqsave(&host->irq_handler_lock, flags);
+       else
+               __acquire(&host->irq_handler_lock);
+
+       /* Clear interrupt bits (write 1 clears ). */
+       emm_int = readq(host->base + MIO_EMM_INT(host));
+       writeq(emm_int, host->base + MIO_EMM_INT(host));
+
+       if (emm_int & MIO_EMM_INT_SWITCH_ERR)
+               check_switch_errors(host);
+
+       req = host->current_req;
+       if (!req)
+               goto out;
+
+       rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+       /*
+        * dma_val set means DMA is still in progress. Don't touch
+        * the request and wait for the interrupt indicating that
+        * the DMA is finished.
+        */
+       if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
+               goto out;
+
+       if (!host->dma_active && req->data &&
+           (emm_int & MIO_EMM_INT_BUF_DONE)) {
+               unsigned int type = (rsp_sts >> 7) & 3;
+
+               if (type == 1)
+                       do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
+               else if (type == 2)
+                       do_write(req);
+       }
+
+       host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
+                   emm_int & MIO_EMM_INT_DMA_DONE ||
+                   emm_int & MIO_EMM_INT_CMD_ERR  ||
+                   emm_int & MIO_EMM_INT_DMA_ERR;
+
+       if (!(host_done && req->done))
+               goto no_req_done;
+
+       req->cmd->error = check_status(rsp_sts);
+
+       if (host->dma_active && req->data)
+               if (!finish_dma(host, req->data))
+                       goto no_req_done;
+
+       set_cmd_response(host, req, rsp_sts);
+       if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
+           (rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
+               cleanup_dma(host, rsp_sts);
+
+       host->current_req = NULL;
+       req->done(req);
+
+no_req_done:
+       if (host->dmar_fixup_done)
+               host->dmar_fixup_done(host);
+       if (host_done)
+               host->release_bus(host);
+out:
+       if (host->need_irq_handler_lock)
+               spin_unlock_irqrestore(&host->irq_handler_lock, flags);
+       else
+               __release(&host->irq_handler_lock);
+       return IRQ_RETVAL(emm_int != 0);
+}
+
+/*
+ * Program DMA_CFG and if needed DMA_ADR.
+ * Returns 0 on error, DMA address otherwise.
+ */
+static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
+{
+       u64 dma_cfg, addr;
+       int count, rw;
+
+       count = dma_map_sg(host->dev, data->sg, data->sg_len,
+                          get_dma_dir(data));
+       if (!count)
+               return 0;
+
+       rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
+       dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
+                 FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
+#ifdef __LITTLE_ENDIAN
+       dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
+#endif
+       dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
+                             (sg_dma_len(&data->sg[0]) / 8) - 1);
+
+       addr = sg_dma_address(&data->sg[0]);
+       if (!host->big_dma_addr)
+               dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
+       writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
+
+       pr_debug("[%s] sg_dma_len: %u  total sg_elem: %d\n",
+                (rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
+
+       if (host->big_dma_addr)
+               writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
+       return addr;
+}
+
+static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
+{
+       return prepare_dma_single(host, data);
+}
+
+static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       struct cvm_mmc_slot *slot = mmc_priv(mmc);
+       u64 emm_dma;
+
+       emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
+                 FIELD_PREP(MIO_EMM_DMA_SECTOR,
+                            (mrq->data->blksz == 512) ? 1 : 0) |
+                 FIELD_PREP(MIO_EMM_DMA_RW,
+                            (mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
+                 FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
+                 FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
+       set_bus_id(&emm_dma, slot->bus_id);
+
+       if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
+           (mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
+               emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
+
+       pr_debug("[%s] blocks: %u  multi: %d\n",
+               (emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
+                mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
+       return emm_dma;
+}
+
+static void cvm_mmc_dma_request(struct mmc_host *mmc,
+                               struct mmc_request *mrq)
+{
+       struct cvm_mmc_slot *slot = mmc_priv(mmc);
+       struct cvm_mmc_host *host = slot->host;
+       struct mmc_data *data;
+       u64 emm_dma, addr;
+
+       if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
+           !mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
+               dev_err(&mmc->card->dev,
+                       "Error: cmv_mmc_dma_request no data\n");
+               goto error;
+       }
+
+       cvm_mmc_switch_to(slot);
+
+       data = mrq->data;
+       pr_debug("DMA request  blocks: %d  block_size: %d  total_size: %d\n",
+                data->blocks, data->blksz, data->blocks * data->blksz);
+       if (data->timeout_ns)
+               set_wdog(slot, data->timeout_ns);
+
+       WARN_ON(host->current_req);
+       host->current_req = mrq;
+
+       emm_dma = prepare_ext_dma(mmc, mrq);
+       addr = prepare_dma(host, data);
+       if (!addr) {
+               dev_err(host->dev, "prepare_dma failed\n");
+               goto error;
+       }
+
+       host->dma_active = true;
+       host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
+                        MIO_EMM_INT_DMA_ERR);
+
+       if (host->dmar_fixup)
+               host->dmar_fixup(host, mrq->cmd, data, addr);
+
+       /*
+        * If we have a valid SD card in the slot, we set the response
+        * bit mask to check for CRC errors and timeouts only.
+        * Otherwise, use the default power reset value.
+        */
+       if (mmc->card && mmc_card_sd(mmc->card))
+               writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
+       else
+               writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
+       writeq(emm_dma, host->base + MIO_EMM_DMA(host));
+       return;
+
+error:
+       mrq->cmd->error = -EINVAL;
+       if (mrq->done)
+               mrq->done(mrq);
+       host->release_bus(host);
+}
+
+static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+{
+       sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
+                      SG_MITER_ATOMIC | SG_MITER_TO_SG);
+}
+
+static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
+{
+       unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
+       struct sg_mapping_iter *smi = &host->smi;
+       unsigned int bytes_xfered;
+       int shift = 56;
+       u64 dat = 0;
+
+       /* Copy data to the xmit buffer before issuing the command. */
+       sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
+
+       /* Auto inc from offset zero, dbuf zero */
+       writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
+
+       for (bytes_xfered = 0; bytes_xfered < data_len;) {
+               if (smi->consumed >= smi->length) {
+                       if (!sg_miter_next(smi))
+                               break;
+                       smi->consumed = 0;
+               }
+
+               while (smi->consumed < smi->length && shift >= 0) {
+                       dat |= ((u8 *)smi->addr)[smi->consumed] << shift;
+                       bytes_xfered++;
+                       smi->consumed++;
+                       shift -= 8;
+               }
+
+               if (shift < 0) {
+                       writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
+                       shift = 56;
+                       dat = 0;
+               }
+       }
+       sg_miter_stop(smi);
+}
+
+static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       struct cvm_mmc_slot *slot = mmc_priv(mmc);
+       struct cvm_mmc_host *host = slot->host;
+       struct mmc_command *cmd = mrq->cmd;
+       struct cvm_mmc_cr_mods mods;
+       u64 emm_cmd, rsp_sts;
+       int retries = 100;
+
+       /*
+        * Note about locking:
+        * All MMC devices share the same bus and controller. Allow only a
+        * single user of the bootbus/MMC bus at a time. The lock is acquired
+        * on all entry points from the MMC layer.
+        *
+        * For requests the lock is only released after the completion
+        * interrupt!
+        */
+       host->acquire_bus(host);
+
+       if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
+           cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+               return cvm_mmc_dma_request(mmc, mrq);
+
+       cvm_mmc_switch_to(slot);
+
+       mods = cvm_mmc_get_cr_mods(cmd);
+
+       WARN_ON(host->current_req);
+       host->current_req = mrq;
+
+       if (cmd->data) {
+               if (cmd->data->flags & MMC_DATA_READ)
+                       do_read_request(host, mrq);
+               else
+                       do_write_request(host, mrq);
+
+               if (cmd->data->timeout_ns)
+                       set_wdog(slot, cmd->data->timeout_ns);
+       } else
+               set_wdog(slot, 0);
+
+       host->dma_active = false;
+       host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
+
+       emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
+                 FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
+                 FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
+                 FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
+                 FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
+       set_bus_id(&emm_cmd, slot->bus_id);
+       if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
+               emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
+                               64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
+
+       writeq(0, host->base + MIO_EMM_STS_MASK(host));
+
+retry:
+       rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
+       if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
+           rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
+           rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
+           rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
+               udelay(10);
+               if (--retries)
+                       goto retry;
+       }
+       if (!retries)
+               dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
+       writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
+}
+
+static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       struct cvm_mmc_slot *slot = mmc_priv(mmc);
+       struct cvm_mmc_host *host = slot->host;
+       int clk_period = 0, power_class = 10, bus_width = 0;
+       u64 clock, emm_switch;
+
+       host->acquire_bus(host);
+       cvm_mmc_switch_to(slot);
+
+       /* Set the power state */
+       switch (ios->power_mode) {
+       case MMC_POWER_ON:
+               break;
+
+       case MMC_POWER_OFF:
+               cvm_mmc_reset_bus(slot);
+               if (host->global_pwr_gpiod)
+                       host->set_shared_power(host, 0);
+               else
+                       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+               break;
+
+       case MMC_POWER_UP:
+               if (host->global_pwr_gpiod)
+                       host->set_shared_power(host, 1);
+               else
+                       mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+               break;
+       }
+
+       /* Convert bus width to HW definition */
+       switch (ios->bus_width) {
+       case MMC_BUS_WIDTH_8:
+               bus_width = 2;
+               break;
+       case MMC_BUS_WIDTH_4:
+               bus_width = 1;
+               break;
+       case MMC_BUS_WIDTH_1:
+               bus_width = 0;
+               break;
+       }
+
+       /* Change the clock frequency. */
+       clock = ios->clock;
+       if (clock > 52000000)
+               clock = 52000000;
+       slot->clock = clock;
+
+       if (clock)
+               clk_period = (host->sys_freq + clock - 1) / (2 * clock);
+
+       emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
+                               (ios->timing == MMC_TIMING_MMC_HS)) |
+                    FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
+                    FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
+                    FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
+                    FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
+       set_bus_id(&emm_switch, slot->bus_id);
+
+       if (!switch_val_changed(slot, emm_switch))
+               goto out;
+
+       set_wdog(slot, 0);
+       do_switch(host, emm_switch);
+       slot->cached_switch = emm_switch;
+out:
+       host->release_bus(host);
+}
+
+static const struct mmc_host_ops cvm_mmc_ops = {
+       .request        = cvm_mmc_request,
+       .set_ios        = cvm_mmc_set_ios,
+       .get_ro         = mmc_gpio_get_ro,
+       .get_cd         = mmc_gpio_get_cd,
+};
+
+static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
+{
+       struct mmc_host *mmc = slot->mmc;
+
+       clock = min(clock, mmc->f_max);
+       clock = max(clock, mmc->f_min);
+       slot->clock = clock;
+}
+
+static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
+{
+       struct cvm_mmc_host *host = slot->host;
+       u64 emm_switch;
+
+       /* Enable this bus slot. */
+       host->emm_cfg |= (1ull << slot->bus_id);
+       writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
+       udelay(10);
+
+       /* Program initial clock speed and power. */
+       cvm_mmc_set_clock(slot, slot->mmc->f_min);
+       emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
+       emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
+                                (host->sys_freq / slot->clock) / 2);
+       emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
+                                (host->sys_freq / slot->clock) / 2);
+
+       /* Make the changes take effect on this bus slot. */
+       set_bus_id(&emm_switch, slot->bus_id);
+       do_switch(host, emm_switch);
+
+       slot->cached_switch = emm_switch;
+
+       /*
+        * Set watchdog timeout value and default reset value
+        * for the mask register. Finally, set the CARD_RCA
+        * bit so that we can get the card address relative
+        * to the CMD register for CMD7 transactions.
+        */
+       set_wdog(slot, 0);
+       writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
+       writeq(1, host->base + MIO_EMM_RCA(host));
+       return 0;
+}
+
+static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
+{
+       u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
+       struct device_node *node = dev->of_node;
+       struct mmc_host *mmc = slot->mmc;
+       u64 clock_period;
+       int ret;
+
+       ret = of_property_read_u32(node, "reg", &id);
+       if (ret) {
+               dev_err(dev, "Missing or invalid reg property on %s\n",
+                       of_node_full_name(node));
+               return ret;
+       }
+
+       if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
+               dev_err(dev, "Invalid reg property on %s\n",
+                       of_node_full_name(node));
+               return -EINVAL;
+       }
+
+       mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
+       if (IS_ERR(mmc->supply.vmmc)) {
+               if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               /*
+                * Legacy Octeon firmware has no regulator entry, fall-back to
+                * a hard-coded voltage to get a sane OCR.
+                */
+               mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+       } else {
+               ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
+               if (ret > 0)
+                       mmc->ocr_avail = ret;
+       }
+
+       /* Common MMC bindings */
+       ret = mmc_of_parse(mmc);
+       if (ret)
+               return ret;
+
+       /* Set bus width */
+       if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
+               of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
+               if (bus_width == 8)
+                       mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
+               else if (bus_width == 4)
+                       mmc->caps |= MMC_CAP_4_BIT_DATA;
+       }
+
+       /* Set maximum and minimum frequency */
+       if (!mmc->f_max)
+               of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
+       if (!mmc->f_max || mmc->f_max > 52000000)
+               mmc->f_max = 52000000;
+       mmc->f_min = 400000;
+
+       /* Sampling register settings, period in picoseconds */
+       clock_period = 1000000000000ull / slot->host->sys_freq;
+       of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
+       of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
+       slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
+       slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
+
+       return id;
+}
+
+int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
+{
+       struct cvm_mmc_slot *slot;
+       struct mmc_host *mmc;
+       int ret, id;
+
+       mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
+       if (!mmc)
+               return -ENOMEM;
+
+       slot = mmc_priv(mmc);
+       slot->mmc = mmc;
+       slot->host = host;
+
+       ret = cvm_mmc_of_parse(dev, slot);
+       if (ret < 0)
+               goto error;
+       id = ret;
+
+       /* Set up host parameters */
+       mmc->ops = &cvm_mmc_ops;
+
+       mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+                    MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD;
+
+       mmc->max_segs = 1;
+
+       /* DMA size field can address up to 8 MB */
+       mmc->max_seg_size = 8 * 1024 * 1024;
+       mmc->max_req_size = mmc->max_seg_size;
+       /* External DMA is in 512 byte blocks */
+       mmc->max_blk_size = 512;
+       /* DMA block count field is 15 bits */
+       mmc->max_blk_count = 32767;
+
+       slot->clock = mmc->f_min;
+       slot->bus_id = id;
+       slot->cached_rca = 1;
+
+       host->acquire_bus(host);
+       host->slot[id] = slot;
+       cvm_mmc_switch_to(slot);
+       cvm_mmc_init_lowlevel(slot);
+       host->release_bus(host);
+
+       ret = mmc_add_host(mmc);
+       if (ret) {
+               dev_err(dev, "mmc_add_host() returned %d\n", ret);
+               slot->host->slot[id] = NULL;
+               goto error;
+       }
+       return 0;
+
+error:
+       mmc_free_host(slot->mmc);
+       return ret;
+}
+
+int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
+{
+       mmc_remove_host(slot->mmc);
+       slot->host->slot[slot->bus_id] = NULL;
+       mmc_free_host(slot->mmc);
+       return 0;
+}
diff --git a/drivers/mmc/host/cavium.h b/drivers/mmc/host/cavium.h
new file mode 100644 (file)
index 0000000..f5d2b61
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * Driver for MMC and SSD cards for Cavium OCTEON and ThunderX SOCs.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012-2017 Cavium Inc.
+ */
+
+#ifndef _CAVIUM_MMC_H_
+#define _CAVIUM_MMC_H_
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+#include <linux/of.h>
+#include <linux/scatterlist.h>
+#include <linux/semaphore.h>
+
+#define CAVIUM_MAX_MMC         4
+
+/* DMA register addresses */
+#define MIO_EMM_DMA_CFG(x)     (0x00 + x->reg_off_dma)
+
+/* register addresses */
+#define MIO_EMM_CFG(x)         (0x00 + x->reg_off)
+#define MIO_EMM_SWITCH(x)      (0x48 + x->reg_off)
+#define MIO_EMM_DMA(x)         (0x50 + x->reg_off)
+#define MIO_EMM_CMD(x)         (0x58 + x->reg_off)
+#define MIO_EMM_RSP_STS(x)     (0x60 + x->reg_off)
+#define MIO_EMM_RSP_LO(x)      (0x68 + x->reg_off)
+#define MIO_EMM_RSP_HI(x)      (0x70 + x->reg_off)
+#define MIO_EMM_INT(x)         (0x78 + x->reg_off)
+#define MIO_EMM_INT_EN(x)      (0x80 + x->reg_off)
+#define MIO_EMM_WDOG(x)                (0x88 + x->reg_off)
+#define MIO_EMM_SAMPLE(x)      (0x90 + x->reg_off)
+#define MIO_EMM_STS_MASK(x)    (0x98 + x->reg_off)
+#define MIO_EMM_RCA(x)         (0xa0 + x->reg_off)
+#define MIO_EMM_BUF_IDX(x)     (0xe0 + x->reg_off)
+#define MIO_EMM_BUF_DAT(x)     (0xe8 + x->reg_off)
+
+struct cvm_mmc_host {
+       struct device *dev;
+       void __iomem *base;
+       void __iomem *dma_base;
+       int reg_off;
+       int reg_off_dma;
+       u64 emm_cfg;
+       u64 n_minus_one;        /* OCTEON II workaround location */
+       int last_slot;
+       struct clk *clk;
+       int sys_freq;
+
+       struct mmc_request *current_req;
+       struct sg_mapping_iter smi;
+       bool dma_active;
+
+       bool has_ciu3;
+       bool big_dma_addr;
+       bool need_irq_handler_lock;
+       spinlock_t irq_handler_lock;
+       struct semaphore mmc_serializer;
+
+       struct gpio_desc *global_pwr_gpiod;
+       atomic_t shared_power_users;
+
+       struct cvm_mmc_slot *slot[CAVIUM_MAX_MMC];
+       struct platform_device *slot_pdev[CAVIUM_MAX_MMC];
+
+       void (*set_shared_power)(struct cvm_mmc_host *, int);
+       void (*acquire_bus)(struct cvm_mmc_host *);
+       void (*release_bus)(struct cvm_mmc_host *);
+       void (*int_enable)(struct cvm_mmc_host *, u64);
+       /* required on some MIPS models */
+       void (*dmar_fixup)(struct cvm_mmc_host *, struct mmc_command *,
+                          struct mmc_data *, u64);
+       void (*dmar_fixup_done)(struct cvm_mmc_host *);
+};
+
+struct cvm_mmc_slot {
+       struct mmc_host *mmc;           /* slot-level mmc_core object */
+       struct cvm_mmc_host *host;      /* common hw for all slots */
+
+       u64 clock;
+
+       u64 cached_switch;
+       u64 cached_rca;
+
+       unsigned int cmd_cnt;           /* sample delay */
+       unsigned int dat_cnt;           /* sample delay */
+
+       int bus_id;
+};
+
+struct cvm_mmc_cr_type {
+       u8 ctype;
+       u8 rtype;
+};
+
+struct cvm_mmc_cr_mods {
+       u8 ctype_xor;
+       u8 rtype_xor;
+};
+
+/* Bitfield definitions */
+#define MIO_EMM_CMD_SKIP_BUSY          BIT_ULL(62)
+#define MIO_EMM_CMD_BUS_ID             GENMASK_ULL(61, 60)
+#define MIO_EMM_CMD_VAL                        BIT_ULL(59)
+#define MIO_EMM_CMD_DBUF               BIT_ULL(55)
+#define MIO_EMM_CMD_OFFSET             GENMASK_ULL(54, 49)
+#define MIO_EMM_CMD_CTYPE_XOR          GENMASK_ULL(42, 41)
+#define MIO_EMM_CMD_RTYPE_XOR          GENMASK_ULL(40, 38)
+#define MIO_EMM_CMD_IDX                        GENMASK_ULL(37, 32)
+#define MIO_EMM_CMD_ARG                        GENMASK_ULL(31, 0)
+
+#define MIO_EMM_DMA_SKIP_BUSY          BIT_ULL(62)
+#define MIO_EMM_DMA_BUS_ID             GENMASK_ULL(61, 60)
+#define MIO_EMM_DMA_VAL                        BIT_ULL(59)
+#define MIO_EMM_DMA_SECTOR             BIT_ULL(58)
+#define MIO_EMM_DMA_DAT_NULL           BIT_ULL(57)
+#define MIO_EMM_DMA_THRES              GENMASK_ULL(56, 51)
+#define MIO_EMM_DMA_REL_WR             BIT_ULL(50)
+#define MIO_EMM_DMA_RW                 BIT_ULL(49)
+#define MIO_EMM_DMA_MULTI              BIT_ULL(48)
+#define MIO_EMM_DMA_BLOCK_CNT          GENMASK_ULL(47, 32)
+#define MIO_EMM_DMA_CARD_ADDR          GENMASK_ULL(31, 0)
+
+#define MIO_EMM_DMA_CFG_EN             BIT_ULL(63)
+#define MIO_EMM_DMA_CFG_RW             BIT_ULL(62)
+#define MIO_EMM_DMA_CFG_CLR            BIT_ULL(61)
+#define MIO_EMM_DMA_CFG_SWAP32         BIT_ULL(59)
+#define MIO_EMM_DMA_CFG_SWAP16         BIT_ULL(58)
+#define MIO_EMM_DMA_CFG_SWAP8          BIT_ULL(57)
+#define MIO_EMM_DMA_CFG_ENDIAN         BIT_ULL(56)
+#define MIO_EMM_DMA_CFG_SIZE           GENMASK_ULL(55, 36)
+#define MIO_EMM_DMA_CFG_ADR            GENMASK_ULL(35, 0)
+
+#define MIO_EMM_INT_SWITCH_ERR         BIT_ULL(6)
+#define MIO_EMM_INT_SWITCH_DONE                BIT_ULL(5)
+#define MIO_EMM_INT_DMA_ERR            BIT_ULL(4)
+#define MIO_EMM_INT_CMD_ERR            BIT_ULL(3)
+#define MIO_EMM_INT_DMA_DONE           BIT_ULL(2)
+#define MIO_EMM_INT_CMD_DONE           BIT_ULL(1)
+#define MIO_EMM_INT_BUF_DONE           BIT_ULL(0)
+
+#define MIO_EMM_RSP_STS_BUS_ID         GENMASK_ULL(61, 60)
+#define MIO_EMM_RSP_STS_CMD_VAL                BIT_ULL(59)
+#define MIO_EMM_RSP_STS_SWITCH_VAL     BIT_ULL(58)
+#define MIO_EMM_RSP_STS_DMA_VAL                BIT_ULL(57)
+#define MIO_EMM_RSP_STS_DMA_PEND       BIT_ULL(56)
+#define MIO_EMM_RSP_STS_DBUF_ERR       BIT_ULL(28)
+#define MIO_EMM_RSP_STS_DBUF           BIT_ULL(23)
+#define MIO_EMM_RSP_STS_BLK_TIMEOUT    BIT_ULL(22)
+#define MIO_EMM_RSP_STS_BLK_CRC_ERR    BIT_ULL(21)
+#define MIO_EMM_RSP_STS_RSP_BUSYBIT    BIT_ULL(20)
+#define MIO_EMM_RSP_STS_STP_TIMEOUT    BIT_ULL(19)
+#define MIO_EMM_RSP_STS_STP_CRC_ERR    BIT_ULL(18)
+#define MIO_EMM_RSP_STS_STP_BAD_STS    BIT_ULL(17)
+#define MIO_EMM_RSP_STS_STP_VAL                BIT_ULL(16)
+#define MIO_EMM_RSP_STS_RSP_TIMEOUT    BIT_ULL(15)
+#define MIO_EMM_RSP_STS_RSP_CRC_ERR    BIT_ULL(14)
+#define MIO_EMM_RSP_STS_RSP_BAD_STS    BIT_ULL(13)
+#define MIO_EMM_RSP_STS_RSP_VAL                BIT_ULL(12)
+#define MIO_EMM_RSP_STS_RSP_TYPE       GENMASK_ULL(11, 9)
+#define MIO_EMM_RSP_STS_CMD_TYPE       GENMASK_ULL(8, 7)
+#define MIO_EMM_RSP_STS_CMD_IDX                GENMASK_ULL(6, 1)
+#define MIO_EMM_RSP_STS_CMD_DONE       BIT_ULL(0)
+
+#define MIO_EMM_SAMPLE_CMD_CNT         GENMASK_ULL(25, 16)
+#define MIO_EMM_SAMPLE_DAT_CNT         GENMASK_ULL(9, 0)
+
+#define MIO_EMM_SWITCH_BUS_ID          GENMASK_ULL(61, 60)
+#define MIO_EMM_SWITCH_EXE             BIT_ULL(59)
+#define MIO_EMM_SWITCH_ERR0            BIT_ULL(58)
+#define MIO_EMM_SWITCH_ERR1            BIT_ULL(57)
+#define MIO_EMM_SWITCH_ERR2            BIT_ULL(56)
+#define MIO_EMM_SWITCH_HS_TIMING       BIT_ULL(48)
+#define MIO_EMM_SWITCH_BUS_WIDTH       GENMASK_ULL(42, 40)
+#define MIO_EMM_SWITCH_POWER_CLASS     GENMASK_ULL(35, 32)
+#define MIO_EMM_SWITCH_CLK_HI          GENMASK_ULL(31, 16)
+#define MIO_EMM_SWITCH_CLK_LO          GENMASK_ULL(15, 0)
+
+/* Protoypes */
+irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id);
+int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host);
+int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot);
+extern const char *cvm_mmc_irq_names[];
+
+#endif