Currently only few of the API calls are exported. This creates problem
when the knav* modules are built as modules and another user module
such as netcp_core try to use these API calls and they are also built
as module. This patch export these APIs to address the issue.
This is needed to support allmodconfig for ARM
Signed-off-by: Murali Karicheri <m-karicheri2@ti.com>
Signed-off-by: Santosh Shilimkar <ssantosh@kernel.org>
atomic_inc(&qh->stats.pushes);
return 0;
}
atomic_inc(&qh->stats.pushes);
return 0;
}
+EXPORT_SYMBOL_GPL(knav_queue_push);
/**
* knav_queue_pop() - pop data (or descriptor) from the head of a queue
/**
* knav_queue_pop() - pop data (or descriptor) from the head of a queue
atomic_inc(&qh->stats.pops);
return dma;
}
atomic_inc(&qh->stats.pops);
return dma;
}
+EXPORT_SYMBOL_GPL(knav_queue_pop);
/* carve out descriptors and push into queue */
static void kdesc_fill_pool(struct knav_pool *pool)
/* carve out descriptors and push into queue */
static void kdesc_fill_pool(struct knav_pool *pool)
struct knav_pool *pool = ph;
return pool->region->dma_start + (virt - pool->region->virt_start);
}
struct knav_pool *pool = ph;
return pool->region->dma_start + (virt - pool->region->virt_start);
}
+EXPORT_SYMBOL_GPL(knav_pool_desc_virt_to_dma);
void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
{
struct knav_pool *pool = ph;
return pool->region->virt_start + (dma - pool->region->dma_start);
}
void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma)
{
struct knav_pool *pool = ph;
return pool->region->virt_start + (dma - pool->region->dma_start);
}
+EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
/**
* knav_pool_create() - Create a pool of descriptors
/**
* knav_pool_create() - Create a pool of descriptors
data = knav_pool_desc_dma_to_virt(pool, dma);
return data;
}
data = knav_pool_desc_dma_to_virt(pool, dma);
return data;
}
+EXPORT_SYMBOL_GPL(knav_pool_desc_get);
/**
* knav_pool_desc_put() - return a descriptor to the pool
/**
* knav_pool_desc_put() - return a descriptor to the pool
dma = knav_pool_desc_virt_to_dma(pool, desc);
knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
}
dma = knav_pool_desc_virt_to_dma(pool, desc);
knav_queue_push(pool->queue, dma, pool->region->desc_size, 0);
}
+EXPORT_SYMBOL_GPL(knav_pool_desc_put);
/**
* knav_pool_desc_map() - Map descriptor for DMA transfer
/**
* knav_pool_desc_map() - Map descriptor for DMA transfer
+EXPORT_SYMBOL_GPL(knav_pool_desc_map);
/**
* knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
/**
* knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
prefetch(desc);
return desc;
}
prefetch(desc);
return desc;
}
+EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
/**
* knav_pool_count() - Get the number of descriptors in pool.
/**
* knav_pool_count() - Get the number of descriptors in pool.
struct knav_pool *pool = ph;
return knav_queue_get_count(pool->queue);
}
struct knav_pool *pool = ph;
return knav_queue_get_count(pool->queue);
}
+EXPORT_SYMBOL_GPL(knav_pool_count);
static void knav_queue_setup_region(struct knav_device *kdev,
struct knav_region *region)
static void knav_queue_setup_region(struct knav_device *kdev,
struct knav_region *region)