/* Enable the interrupts in the GINTMSK */
intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT;
- if (hsotg->params.dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
intmsk |= GINTSTS_RXFLVL;
if (hsotg->params.external_id_pin_ctl <= 0)
intmsk |= GINTSTS_CONIDSTSCHNG;
break;
}
- dev_dbg(hsotg->dev, "dma_enable:%d dma_desc_enable:%d\n",
- hsotg->params.dma_enable,
+ dev_dbg(hsotg->dev, "host_dma:%d dma_desc_enable:%d\n",
+ hsotg->params.host_dma,
hsotg->params.dma_desc_enable);
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (hsotg->params.dma_desc_enable > 0)
dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n");
else
hsotg->params.dma_desc_enable = 0;
}
- if (hsotg->params.dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
ahbcfg |= GAHBCFG_DMA_EN;
dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG);
{
u32 intmsk;
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_hc_enable_dma_ints(hsotg, chan);
}
hcchar |= HCCHAR_CHDIS;
- if (hsotg->params.dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA not enabled\n");
hcchar |= HCCHAR_CHENA;
dev_vdbg(hsotg->dev, "%s()\n", __func__);
if (chan->do_ping) {
- if (hsotg->params.dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "ping, no DMA\n");
dwc2_hc_do_ping(hsotg, chan);
TSIZ_SC_MC_PID_SHIFT);
}
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_writel((u32)chan->xfer_dma,
hsotg->regs + HCDMA(chan->hc_num));
if (dbg_hc(chan))
chan->xfer_started = 1;
chan->requests++;
- if (hsotg->params.dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
!chan->ep_is_in && chan->xfer_len > 0)
/* Load OUT packet into the appropriate Tx FIFO */
dwc2_hc_write_packet(hsotg, chan);
u32 hcchar;
int i;
- if (hsotg->params.dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
/* Flush out any channel requests in slave mode */
for (i = 0; i < num_channels; i++) {
channel = hsotg->hc_ptr_array[i];
chan->do_ping = 0;
chan->ep_is_in = 0;
chan->data_pid_start = DWC2_HC_PID_SETUP;
- if (hsotg->params.dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->setup_dma;
else
chan->xfer_buf = urb->setup_packet;
chan->do_ping = 0;
chan->data_pid_start = DWC2_HC_PID_DATA1;
chan->xfer_len = 0;
- if (hsotg->params.dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = hsotg->status_buf_dma;
else
chan->xfer_buf = hsotg->status_buf;
frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
frame_desc->status = 0;
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
chan->xfer_dma = urb->dma;
chan->xfer_dma += frame_desc->offset +
qtd->isoc_split_offset;
!dwc2_hcd_is_pipe_in(&urb->pipe_info))
urb->actual_length = urb->length;
- if (hsotg->params.dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
chan->xfer_dma = urb->dma + urb->actual_length;
else
chan->xfer_buf = (u8 *)urb->buf + urb->actual_length;
list_move_tail(&chan->split_order_list_entry,
&hsotg->split_order);
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (hsotg->params.dma_desc_enable > 0) {
if (!chan->xfer_started ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
* The flag prevents any halts to get into the request queue in
* the middle of multiple high-bandwidth packets getting queued.
*/
- if (hsotg->params.dma_enable <= 0 &&
+ if (hsotg->params.host_dma <= 0 &&
qh->channel->multi_count > 1)
hsotg->queuing_high_bandwidth = 1;
* controller automatically handles multiple packets for
* high-bandwidth transfers.
*/
- if (hsotg->params.dma_enable > 0 || status == 0 ||
+ if (hsotg->params.host_dma > 0 || status == 0 ||
qh->channel->requests == qh->channel->multi_count) {
qh_ptr = qh_ptr->next;
/*
exit:
if (no_queue_space || no_fifo_space ||
- (hsotg->params.dma_enable <= 0 &&
+ (hsotg->params.host_dma <= 0 &&
!list_empty(&hsotg->periodic_sched_assigned))) {
/*
* May need to queue more transactions as the request
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
- if (hsotg->params.dma_enable <= 0 && qspcavail == 0) {
+ if (hsotg->params.host_dma <= 0 && qspcavail == 0) {
no_queue_space = 1;
break;
}
hsotg->non_periodic_qh_ptr->next;
} while (hsotg->non_periodic_qh_ptr != orig_qh_ptr);
- if (hsotg->params.dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
tx_status = dwc2_readl(hsotg->regs + GNPTXSTS);
qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
TXSTS_QSPCAVAIL_SHIFT;
}
}
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (hsotg->status_buf) {
dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE,
hsotg->status_buf,
hsotg->last_frame_num = HFNUM_MAX_FRNUM;
/* Check if the bus driver or platform code has setup a dma_mask */
- if (hsotg->params.dma_enable > 0 &&
+ if (hsotg->params.host_dma > 0 &&
hsotg->dev->dma_mask == NULL) {
dev_warn(hsotg->dev,
"dma_mask not set, disabling DMA\n");
- hsotg->params.dma_enable = 0;
+ hsotg->params.host_dma = 0;
hsotg->params.dma_desc_enable = 0;
}
/* Set device flags indicating whether the HCD supports DMA */
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set DMA mask\n");
if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
if (!hcd)
goto error1;
- if (hsotg->params.dma_enable <= 0)
+ if (hsotg->params.host_dma <= 0)
hcd->self.uses_dma = 0;
hcd->has_tt = 1;
* done after usb_add_hcd since that function allocates the DMA buffer
* pool.
*/
- if (hsotg->params.dma_enable > 0)
+ if (hsotg->params.host_dma > 0)
hsotg->status_buf = dma_alloc_coherent(hsotg->dev,
DWC2_HCD_STATUS_BUF_SIZE,
&hsotg->status_buf_dma, GFP_KERNEL);
/* Skip whole frame */
if (chan->qh->do_split &&
chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->params.dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
}
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "DMA enabled\n");
dwc2_release_channel(hsotg, chan, qtd, halt_status);
/* Handle xfer complete on CSPLIT */
if (chan->qh->do_split) {
if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
- hsotg->params.dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
if (qtd->complete_split &&
dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
qtd))
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
- if (hsotg->params.dma_enable > 0 && chan->ep_is_in) {
+ if (hsotg->params.host_dma > 0 && chan->ep_is_in) {
/*
* NAK interrupts are enabled on bulk/control IN
* transfers in DMA mode for the sole purpose of
*/
if (chan->do_split && chan->complete_split) {
if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
- hsotg->params.dma_enable > 0) {
+ hsotg->params.host_dma > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
qtd->isoc_frame_index++;
dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
chnum);
- if (hsotg->params.dma_enable > 0) {
+ if (hsotg->params.host_dma > 0) {
dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
} else {
if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
qtd_list_entry);
- if (hsotg->params.dma_enable <= 0) {
+ if (hsotg->params.host_dma <= 0) {
if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
hcint &= ~HCINTMSK_CHHLTD;
}
static const struct dwc2_core_params params_hi6220 = {
.otg_cap = 2, /* No HNP/SRP capable */
.otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
+ .host_dma = 1,
.dma_desc_enable = 0,
.dma_desc_fs_enable = 0,
.speed = 0, /* High Speed */
static const struct dwc2_core_params params_bcm2835 = {
.otg_cap = 0, /* HNP/SRP capable */
.otg_ver = 0, /* 1.3 */
- .dma_enable = 1,
+ .host_dma = 1,
.dma_desc_enable = 0,
.dma_desc_fs_enable = 0,
.speed = 0, /* High Speed */
static const struct dwc2_core_params params_rk3066 = {
.otg_cap = 2, /* non-HNP/non-SRP */
.otg_ver = -1,
- .dma_enable = -1,
+ .host_dma = -1,
.dma_desc_enable = 0,
.dma_desc_fs_enable = 0,
.speed = -1,
static const struct dwc2_core_params params_ltq = {
.otg_cap = 2, /* non-HNP/non-SRP */
.otg_ver = -1,
- .dma_enable = -1,
+ .host_dma = -1,
.dma_desc_enable = -1,
.dma_desc_fs_enable = -1,
.speed = -1,
static const struct dwc2_core_params params_amlogic = {
.otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE,
.otg_ver = -1,
- .dma_enable = 1,
+ .host_dma = 1,
.dma_desc_enable = 0,
.dma_desc_fs_enable = 0,
.speed = DWC2_SPEED_PARAM_HIGH,
static const struct dwc2_core_params params_default = {
.otg_cap = -1,
.otg_ver = -1,
- .dma_enable = -1,
+ .host_dma = -1,
/*
* Disable descriptor dma mode by default as the HW can support
hsotg->params.otg_cap = val;
}
-static void dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
+static void dwc2_set_param_host_dma(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
if (!valid) {
if (val >= 0)
dev_err(hsotg->dev,
- "%d invalid for dma_enable parameter. Check HW configuration.\n",
+ "%d invalid for host_dma parameter. Check HW configuration.\n",
val);
val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
- dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
+ dev_dbg(hsotg->dev, "Setting host_dma to %d\n", val);
}
- hsotg->params.dma_enable = val;
+ hsotg->params.host_dma = val;
}
static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
- if (val > 0 && (hsotg->params.dma_enable <= 0 ||
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
!hsotg->hw_params.dma_desc_enable))
valid = 0;
if (val < 0)
dev_err(hsotg->dev,
"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
val);
- val = (hsotg->params.dma_enable > 0 &&
+ val = (hsotg->params.host_dma > 0 &&
hsotg->hw_params.dma_desc_enable);
dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
}
{
int valid = 1;
- if (val > 0 && (hsotg->params.dma_enable <= 0 ||
+ if (val > 0 && (hsotg->params.host_dma <= 0 ||
!hsotg->hw_params.dma_desc_enable))
valid = 0;
if (val < 0)
dev_err(hsotg->dev,
"%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n",
val);
- val = (hsotg->params.dma_enable > 0 &&
+ val = (hsotg->params.host_dma > 0 &&
hsotg->hw_params.dma_desc_enable);
}
struct dwc2_core_params *p = &hsotg->params;
dwc2_set_param_otg_cap(hsotg, params->otg_cap);
- dwc2_set_param_dma_enable(hsotg, params->dma_enable);
+ dwc2_set_param_host_dma(hsotg, params->host_dma);
dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable);
dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable);
dwc2_set_param_host_support_fs_ls_low_power(hsotg,