U1 = U + offset;
V = U + ((s0_params->width * s0_params->height) >> s);
V1 = V + offset;
- __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
- __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
+ if (s0_params->pixel_fmt == PXP_PIX_FMT_YVU420P) {
+ __raw_writel(V1, pxp->base + HW_PXP_PS_UBUF);
+ __raw_writel(U1, pxp->base + HW_PXP_PS_VBUF);
+ } else {
+ __raw_writel(U1, pxp->base + HW_PXP_PS_UBUF);
+ __raw_writel(V1, pxp->base + HW_PXP_PS_VBUF);
+ }
} else if ((s0_params->pixel_fmt == PXP_PIX_FMT_NV12) ||
(s0_params->pixel_fmt == PXP_PIX_FMT_NV21) ||
(s0_params->pixel_fmt == PXP_PIX_FMT_NV16) ||
struct pxp_tx_desc *desc = NULL;
spin_lock_irqsave(&pxp->lock, flags);
- if (list_empty(&head)) {
- pxp->pxp_ongoing = 0;
- spin_unlock_irqrestore(&pxp->lock, flags);
- return;
- }
desc = list_entry(head.next, struct pxp_tx_desc, list);
pxp_chan = to_pxp_channel(desc->txd.chan);
spin_unlock_irqrestore(&pxp->lock, flags);
}
-static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct list_head *list)
+static void pxpdma_dequeue(struct pxp_channel *pxp_chan, struct pxps *pxp)
{
+ unsigned long flags;
struct pxp_tx_desc *desc = NULL;
+
do {
desc = pxpdma_first_queued(pxp_chan);
- list_move_tail(&desc->list, list);
+ spin_lock_irqsave(&pxp->lock, flags);
+ list_move_tail(&desc->list, &head);
+ spin_unlock_irqrestore(&pxp->lock, flags);
} while (!list_empty(&pxp_chan->queue));
}
struct pxp_tx_desc *desc = to_tx_desc(tx);
struct pxp_channel *pxp_chan = to_pxp_channel(tx->chan);
dma_cookie_t cookie;
- unsigned long flags;
dev_dbg(&pxp_chan->dma_chan.dev->device, "received TX\n");
- mutex_lock(&pxp_chan->chan_mutex);
+ /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
+ spin_lock(&pxp_chan->lock);
cookie = pxp_chan->dma_chan.cookie;
pxp_chan->dma_chan.cookie = cookie;
tx->cookie = cookie;
- /* pxp_chan->lock can be taken under ichan->lock, but not v.v. */
- spin_lock_irqsave(&pxp_chan->lock, flags);
-
/* Here we add the tx descriptor to our PxP task queue. */
list_add_tail(&desc->list, &pxp_chan->queue);
- spin_unlock_irqrestore(&pxp_chan->lock, flags);
+ spin_unlock(&pxp_chan->lock);
dev_dbg(&pxp_chan->dma_chan.dev->device, "done TX\n");
- mutex_unlock(&pxp_chan->chan_mutex);
return cookie;
}
struct pxp_tx_desc *desc = NULL;
struct pxp_tx_desc *first = NULL, *prev = NULL;
struct scatterlist *sg;
- unsigned long flags;
dma_addr_t phys_addr;
int i;
if (unlikely(sg_len < 2))
return NULL;
- spin_lock_irqsave(&pxp_chan->lock, flags);
for_each_sg(sgl, sg, sg_len, i) {
desc = pxpdma_desc_alloc(pxp_chan);
if (!desc) {
- spin_unlock_irqrestore(&pxp_chan->lock, flags);
dev_err(chan->device->dev, "no enough memory to allocate tx descriptor\n");
return NULL;
}
prev = desc;
}
- spin_unlock_irqrestore(&pxp_chan->lock, flags);
pxp->pxp_conf_state.layer_nr = sg_len;
first->txd.flags = tx_flags;
struct pxp_channel *pxp_chan = to_pxp_channel(chan);
struct pxp_dma *pxp_dma = to_pxp_dma(chan->device);
struct pxps *pxp = to_pxp(pxp_dma);
- unsigned long flags0, flags;
- spin_lock_irqsave(&pxp->lock, flags0);
- spin_lock_irqsave(&pxp_chan->lock, flags);
+ spin_lock(&pxp_chan->lock);
- if (!list_empty(&pxp_chan->queue)) {
- pxpdma_dequeue(pxp_chan, &head);
- pxp_chan->status = PXP_CHANNEL_READY;
- } else {
- spin_unlock_irqrestore(&pxp_chan->lock, flags);
- spin_unlock_irqrestore(&pxp->lock, flags0);
+ if (list_empty(&pxp_chan->queue)) {
+ spin_unlock(&pxp_chan->lock);
return;
}
- spin_unlock_irqrestore(&pxp_chan->lock, flags);
- spin_unlock_irqrestore(&pxp->lock, flags0);
+
+ pxpdma_dequeue(pxp_chan, pxp);
+ pxp_chan->status = PXP_CHANNEL_READY;
+
+ spin_unlock(&pxp_chan->lock);
pxp_clk_enable(pxp);
wake_up_interruptible(&pxp->thread_waitq);
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
- mutex_lock(&pxp_chan->chan_mutex);
+ spin_lock(&pxp_chan->lock);
__pxp_terminate_all(chan);
- mutex_unlock(&pxp_chan->chan_mutex);
+ spin_unlock(&pxp_chan->lock);
return 0;
}
{
struct pxp_channel *pxp_chan = to_pxp_channel(chan);
- mutex_lock(&pxp_chan->chan_mutex);
+ spin_lock(&pxp_chan->lock);
__pxp_terminate_all(chan);
pxp_chan->status = PXP_CHANNEL_FREE;
- mutex_unlock(&pxp_chan->chan_mutex);
+ spin_unlock(&pxp_chan->lock);
}
static enum dma_status pxp_tx_status(struct dma_chan *chan,
struct dma_chan *dma_chan = &pxp_chan->dma_chan;
spin_lock_init(&pxp_chan->lock);
- mutex_init(&pxp_chan->chan_mutex);
/* Only one EOF IRQ for PxP, shared by all channels */
pxp_chan->eof_irq = pxp->irq;
if (signal_pending(current))
continue;
+ if (kthread_should_stop())
+ break;
+
spin_lock_irqsave(&pxp->lock, flags);
pxp->pxp_ongoing = 1;
spin_unlock_irqrestore(&pxp->lock, flags);