After the commit "
ffcad666548417ef21937e0a755d85ab922313a9" pushed,
adding this support in PXP device driver is also necessary. This
change allows users to submit more than one PXP tasks followd by
only one wait for finished ioctl. It means that users can wait for
more than one tasks done by calling one PXP_IOC_WAIT4CMPLT ioctl.
Signed-off-by: Fancy Fang <B47543@freescale.com>
};
struct pxp_chan_info {
};
struct pxp_chan_info {
struct dma_chan *dma_chan;
struct list_head list;
};
struct dma_chan *dma_chan;
struct list_head list;
};
struct dma_chan *chan = tx_desc->txd.chan;
struct pxp_channel *pxp_chan = to_pxp_channel(chan);
int chan_id = pxp_chan->dma_chan.chan_id;
struct dma_chan *chan = tx_desc->txd.chan;
struct pxp_channel *pxp_chan = to_pxp_channel(chan);
int chan_id = pxp_chan->dma_chan.chan_id;
pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
pr_debug("DMA Done ISR, chan_id %d\n", chan_id);
- irq_info[chan_id].irq_pending++;
+ spin_lock_irqsave(&(irq_info[chan_id].lock), flags);
+ irq_info[chan_id].irq_pending--;
irq_info[chan_id].hist_status = tx_desc->hist_status;
irq_info[chan_id].hist_status = tx_desc->hist_status;
+ spin_unlock_irqrestore(&(irq_info[chan_id].lock), flags);
wake_up_interruptible(&(irq_info[chan_id].waitq));
}
wake_up_interruptible(&(irq_info[chan_id].waitq));
}
dma_cookie_t cookie;
int chan_id;
int i, length, ret;
dma_cookie_t cookie;
int chan_id;
int i, length, ret;
ret = copy_from_user(&pxp_conf,
(struct pxp_config_data *)arg,
ret = copy_from_user(&pxp_conf,
(struct pxp_config_data *)arg,
if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
return -ENODEV;
if (chan_id < 0 || chan_id >= NR_PXP_VIRT_CHANNEL)
return -ENODEV;
- init_waitqueue_head(&(irq_info[chan_id].waitq));
-
/* find the channel */
spin_lock(&pxp_chan_lock);
list_for_each_entry(info, &list, list) {
/* find the channel */
spin_lock(&pxp_chan_lock);
list_for_each_entry(info, &list, list) {
+ spin_lock_irqsave(&(irq_info[chan_id].lock), flags);
+ irq_info[chan_id].irq_pending++;
+ spin_unlock_irqrestore(&(irq_info[chan_id].lock), flags);
+
list_add_tail(&info->list, &list);
spin_unlock(&pxp_chan_lock);
list_add_tail(&info->list, &list);
spin_unlock(&pxp_chan_lock);
+ init_waitqueue_head(&(irq_info[info->dma_chan->chan_id].waitq));
if (put_user
(info->dma_chan->chan_id, (u32 __user *) arg))
return -EFAULT;
if (put_user
(info->dma_chan->chan_id, (u32 __user *) arg))
return -EFAULT;
}
case PXP_IOC_CONFIG_CHAN:
{
}
case PXP_IOC_CONFIG_CHAN:
{
int ret;
ret = pxp_ioc_config_chan(arg);
int ret;
ret = pxp_ioc_config_chan(arg);
kfree(rec);
printk(KERN_ERR
"Physical memory allocation error!\n");
kfree(rec);
printk(KERN_ERR
"Physical memory allocation error!\n");
}
ret = copy_to_user((void __user *)arg, &(rec->mem),
sizeof(struct pxp_mem_desc));
if (ret) {
kfree(rec);
ret = -EFAULT;
}
ret = copy_to_user((void __user *)arg, &(rec->mem),
sizeof(struct pxp_mem_desc));
if (ret) {
kfree(rec);
ret = -EFAULT;
}
spin_lock(&pxp_mem_lock);
}
spin_lock(&pxp_mem_lock);
ret = wait_event_interruptible
(irq_info[chan_id].waitq,
ret = wait_event_interruptible
(irq_info[chan_id].waitq,
- (irq_info[chan_id].irq_pending != 0));
+ (irq_info[chan_id].irq_pending == 0));
if (ret < 0) {
printk(KERN_WARNING
if (ret < 0) {
printk(KERN_WARNING
- "pxp interrupt received.\n");
+ "WAIT4CMPLT: signal received.\n");
- } else
- irq_info[chan_id].irq_pending--;
chan_handle.hist_status = irq_info[chan_id].hist_status;
ret = copy_to_user((struct pxp_chan_handle *)arg,
chan_handle.hist_status = irq_info[chan_id].hist_status;
ret = copy_to_user((struct pxp_chan_handle *)arg,
int register_pxp_device(void)
{
int register_pxp_device(void)
{
ret = misc_register(&pxp_device_miscdev);
if (ret)
return ret;
ret = misc_register(&pxp_device_miscdev);
if (ret)
return ret;
+ for (i = 0; i < NR_PXP_VIRT_CHANNEL; i++)
+ spin_lock_init(&(irq_info[i].lock));
+
pr_debug("PxP_Device registered Successfully\n");
return 0;
}
pr_debug("PxP_Device registered Successfully\n");
return 0;
}
wait_queue_head_t waitq;
int irq_pending;
int hist_status;
wait_queue_head_t waitq;
int irq_pending;
int hist_status;
};
#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)
};
#define to_tx_desc(tx) container_of(tx, struct pxp_tx_desc, txd)