/*
-
* drivers/staging/android/ion/compat_ion.h
*
* Copyright (C) 2013 Google, Inc.
/*
-
+ *
* drivers/staging/android/ion/ion.c
*
* Copyright (C) 2011 Google, Inc.
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
- /* this will set up dma addresses for the sglist -- it is not
- technically correct as per the dma api -- a specific
- device isn't really taking ownership here. However, in practice on
- our systems the only dma_address space is physical addresses.
- Additionally, we can't afford the overhead of invalidating every
- allocation via dma_map_sg. The implicit contract here is that
- memory coming from the heaps is ready for dma, ie if it has a
- cached mapping that mapping has been invalidated */
+ /*
+ * this will set up dma addresses for the sglist -- it is not
+ * technically correct as per the dma api -- a specific
+ * device isn't really taking ownership here. However, in practice on
+ * our systems the only dma_address space is physical addresses.
+ * Additionally, we can't afford the overhead of invalidating every
+ * allocation via dma_map_sg. The implicit contract here is that
+ * memory coming from the heaps is ready for dma, ie if it has a
+ * cached mapping that mapping has been invalidated
+ */
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg);
mutex_lock(&dev->buffer_lock);
get_task_struct(current->group_leader);
task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader);
- /* don't bother to store task struct for kernel threads,
- they can't be killed anyway */
+ /*
+ * don't bother to store task struct for kernel threads,
+ * they can't be killed anyway
+ */
if (current->group_leader->flags & PF_KTHREAD) {
put_task_struct(current->group_leader);
task = NULL;
heap->dev = dev;
down_write(&dev->lock);
- /* use negative heap->id to reverse the priority -- when traversing
- the list later attempt higher id numbers first */
+ /*
+ * use negative heap->id to reverse the priority -- when traversing
+ * the list later attempt higher id numbers first
+ */
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664,
struct ion_client;
struct ion_buffer;
-/* This should be removed some day when phys_addr_t's are fully
- plumbed in the kernel, and all instances of ion_phys_addr_t should
- be converted to phys_addr_t. For the time being many kernel interfaces
- do not accept phys_addr_t's that would have to */
+/*
+ * This should be removed some day when phys_addr_t's are fully
+ * plumbed in the kernel, and all instances of ion_phys_addr_t should
+ * be converted to phys_addr_t. For the time being many kernel interfaces
+ * do not accept phys_addr_t's that would have to
+ */
#define ion_phys_addr_t unsigned long
/**
return ERR_PTR(-ENOMEM);
cma_heap->heap.ops = &ion_cma_ops;
- /* get device from private heaps data, later it will be
- * used to make the link with reserved CMA memory */
+ /*
+ * get device from private heaps data, later it will be
+ * used to make the link with reserved CMA memory
+ */
cma_heap->dev = data->priv;
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap;
* to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been
* invalidated from the cache, provides a significant performance benefit on
- * many systems */
+ * many systems
+ */
/**
* struct ion_page_pool - pagepool struct
struct scatterlist *sg;
int i;
- /* uncached pages come from the page pools, zero them before returning
- for security purposes (other allocations are zerod at alloc time */
+ /*
+ * uncached pages come from the page pools, zero them before returning
+ * for security purposes (other allocations are zerod at
+ * alloc time
+ */
if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer);
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
- ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
- are at the end of this enum */
+ ION_HEAP_TYPE_CUSTOM, /*
+ * must be last so device specific heaps always
+ * are at the end of this enum
+ */
ION_NUM_HEAPS = 16,
};
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
-#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
- cached, ion will do cache
- maintenance when the buffer is
- mapped for dma */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
- at mmap time, if this is set
- caches must be managed manually */
+#define ION_FLAG_CACHED 1 /*
+ * mappings of this buffer should be
+ * cached, ion will do cache
+ * maintenance when the buffer is
+ * mapped for dma
+ */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
+ * mappings of this buffer will created
+ * at mmap time, if this is set
+ * caches must be managed
+ * manually
+ */
/**
* DOC: Ion Userspace API