struct list_head ep_list;
int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
- u32 *registers;
+ __iomem u32 *registers;
int fatal_error;
struct mutex register_mutex;
struct xilly_endpoint_hardware {
struct module *owner;
- void (*sync_single_for_cpu)(struct xilly_endpoint *,
+ void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *,
dma_addr_t,
size_t,
int);
- void (*sync_single_for_device)(struct xilly_endpoint *,
+ void (*hw_sync_sgl_for_device)(struct xilly_endpoint *,
dma_addr_t,
size_t,
int);
static LIST_HEAD(list_of_endpoints);
static struct mutex ep_list_lock;
-struct workqueue_struct *xillybus_wq;
+static struct workqueue_struct *xillybus_wq;
/*
* Locking scheme: Mutexes protect invocations of character device methods.
buf_size = ep->msg_buf_size/sizeof(u32);
- ep->ephw->sync_single_for_cpu(ep,
+ ep->ephw->hw_sync_sgl_for_cpu(ep,
ep->msgbuf_dma_addr,
ep->msg_buf_size,
DMA_FROM_DEVICE);
pr_err("xillybus: Lost sync with "
"interrupt messages. Stopping.\n");
else {
- ep->ephw->sync_single_for_device(
+ ep->ephw->hw_sync_sgl_for_device(
ep,
ep->msgbuf_dma_addr,
ep->msg_buf_size,
}
}
- ep->ephw->sync_single_for_device(ep,
+ ep->ephw->hw_sync_sgl_for_device(ep,
ep->msgbuf_dma_addr,
ep->msg_buf_size,
DMA_FROM_DEVICE);
return rc;
}
- endpoint->ephw->sync_single_for_cpu(
+ endpoint->ephw->hw_sync_sgl_for_cpu(
channel->endpoint,
channel->wr_buffers[0]->dma_addr,
channel->wr_buf_size,
return 0; /* Success */
}
-static ssize_t xillybus_read(struct file *filp, char *userbuf, size_t count,
- loff_t *f_pos)
+static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
{
ssize_t rc;
unsigned long flags;
if (!empty) { /* Go on, now without the spinlock */
if (bufpos == 0) /* Position zero means it's virgin */
- channel->endpoint->ephw->sync_single_for_cpu(
+ channel->endpoint->ephw->hw_sync_sgl_for_cpu(
channel->endpoint,
channel->wr_buffers[bufidx]->dma_addr,
channel->wr_buf_size,
if (bufferdone) {
channel->endpoint->ephw->
- sync_single_for_device
+ hw_sync_sgl_for_device
(
channel->endpoint,
channel->wr_buffers[bufidx]->
else
channel->rd_host_buf_idx++;
- channel->endpoint->ephw->sync_single_for_device(
+ channel->endpoint->ephw->hw_sync_sgl_for_device(
channel->endpoint,
channel->rd_buffers[bufidx]->dma_addr,
channel->rd_buf_size,
}
-static ssize_t xillybus_write(struct file *filp, const char *userbuf,
+static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
size_t count, loff_t *f_pos)
{
ssize_t rc;
if ((bufpos == 0) || /* Zero means it's virgin */
(channel->rd_leftovers[3] != 0)) {
- channel->endpoint->ephw->sync_single_for_cpu(
+ channel->endpoint->ephw->hw_sync_sgl_for_cpu(
channel->endpoint,
channel->rd_buffers[bufidx]->dma_addr,
channel->rd_buf_size,
if (bufferdone) {
channel->endpoint->ephw->
- sync_single_for_device(
+ hw_sync_sgl_for_device(
channel->endpoint,
channel->rd_buffers[bufidx]->
dma_addr,
return 0;
}
-loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
+static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
{
struct xilly_channel *channel = filp->private_data;
loff_t pos = filp->f_pos;
return addr;
}
-void xilly_unmap_single_of(struct xilly_dma *entry)
+static void xilly_unmap_single_of(struct xilly_dma *entry)
{
dma_unmap_single(entry->dev,
entry->dma_addr,
static struct xilly_endpoint_hardware of_hw = {
.owner = THIS_MODULE,
- .sync_single_for_cpu = xilly_dma_sync_single_for_cpu_of,
- .sync_single_for_device = xilly_dma_sync_single_for_device_of,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
.map_single = xilly_map_single_of,
.unmap_single = xilly_unmap_single_of
};
return addr;
}
-void xilly_unmap_single_pci(struct xilly_dma *entry)
+static void xilly_unmap_single_pci(struct xilly_dma *entry)
{
pci_unmap_single(entry->pdev,
entry->dma_addr,
static struct xilly_endpoint_hardware pci_hw = {
.owner = THIS_MODULE,
- .sync_single_for_cpu = xilly_dma_sync_single_for_cpu_pci,
- .sync_single_for_device = xilly_dma_sync_single_for_device_pci,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
.map_single = xilly_map_single_pci,
.unmap_single = xilly_unmap_single_pci
};