#include <linux/pm_runtime.h>
+#if gcdANDROID_NATIVE_FENCE_SYNC
+#include <linux/file.h>
+#include "gc_hal_kernel_sync.h"
+#endif
+
+
#define _GC_OBJ_ZONE gcvZONE_OS
/*******************************************************************************
{
struct idr idr;
spinlock_t lock;
+ gctINT curr;
}
gcsINTEGER_DB;
/* signal id database. */
gcsINTEGER_DB signalDB;
+#if gcdANDROID_NATIVE_FENCE_SYNC
+ /* Lock. */
+ gctPOINTER syncPointMutex;
+
+ /* sync point id database. */
+ gcsINTEGER_DB syncPointDB;
+#endif
+
gcsUSER_MAPPING_PTR userMap;
gctPOINTER debugLock;
}
gcsSIGNAL;
+#if gcdANDROID_NATIVE_FENCE_SYNC
+typedef struct _gcsSYNC_POINT * gcsSYNC_POINT_PTR;
+typedef struct _gcsSYNC_POINT
+{
+ /* The reference counter. */
+ atomic_t ref;
+
+ /* State. */
+ atomic_t state;
+
+ /* timeline. */
+ struct sync_timeline * timeline;
+
+ /* ID. */
+ gctUINT32 id;
+}
+gcsSYNC_POINT;
+#endif
+
typedef struct _gcsPageInfo * gcsPageInfo_PTR;
typedef struct _gcsPageInfo
{
mdlMap->pid = ProcessID;
mdlMap->vmaAddr = gcvNULL;
mdlMap->vma = gcvNULL;
+ mdlMap->count = 0;
mdlMap->next = Mdl->maps;
Mdl->maps = mdlMap;
)
{
int result;
+ gctINT next;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
- spin_lock(&Database->lock);
- /* Try to get a id greater than 0. */
- result = idr_alloc(&Database->idr, KernelPointer, 1, 0,
- GFP_KERNEL | gcdNOWARN);
- spin_unlock(&Database->lock);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+ idr_preload(GFP_KERNEL | gcdNOWARN);
+
+ spin_lock(&Database->lock);
+
+ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
+ result = idr_alloc(&Database->idr, KernelPointer, next, 0, GFP_ATOMIC);
- if (result < 0)
- return gcvSTATUS_OUT_OF_RESOURCES;
+ if (!result)
+ {
+ Database->curr = *Id;
+ }
+
+ spin_unlock(&Database->lock);
+
+ idr_preload_end();
- *Id = result;
+ if (result < 0)
+ {
+ return gcvSTATUS_OUT_OF_RESOURCES;
+ }
+
+ *Id = result;
#else
again:
if (idr_pre_get(&Database->idr, GFP_KERNEL | gcdNOWARN) == 0)
spin_lock(&Database->lock);
- /* Try to get a id greater than 0. */
- result = idr_get_new_above(&Database->idr, KernelPointer, 1, Id);
+ next = (Database->curr + 1 <= 0) ? 1 : Database->curr + 1;
+
+ /* Try to get a id greater than current id. */
+ result = idr_get_new_above(&Database->idr, KernelPointer, next, Id);
+
+ if (!result)
+ {
+ Database->curr = *Id;
+ }
spin_unlock(&Database->lock);
/* Initialize signal id database. */
idr_init(&os->signalDB.idr);
+#if gcdANDROID_NATIVE_FENCE_SYNC
+ /*
+ * Initialize the sync point manager.
+ */
+
+ /* Initialize mutex. */
+ gcmkONERROR(gckOS_CreateMutex(os, &os->syncPointMutex));
+
+ /* Initialize sync point id database lock. */
+ spin_lock_init(&os->syncPointDB.lock);
+
+ /* Initialize sync point id database. */
+ idr_init(&os->syncPointDB.idr);
+#endif
+
#if gcdUSE_NON_PAGED_MEMORY_CACHE
os->cacheSize = 0;
os->cacheHead = gcvNULL;
return gcvSTATUS_OK;
OnError:
+
+#if gcdANDROID_NATIVE_FENCE_SYNC
+ if (os->syncPointMutex != gcvNULL)
+ {
+ gcmkVERIFY_OK(
+ gckOS_DeleteMutex(os, os->syncPointMutex));
+ }
+#endif
+
if (os->signalMutex != gcvNULL)
{
gcmkVERIFY_OK(
_FreeAllNonPagedMemoryCache(Os);
#endif
+#if gcdANDROID_NATIVE_FENCE_SYNC
+ /*
+ * Destroy the sync point manager.
+ */
+
+ /* Destroy the mutex. */
+ gcmkVERIFY_OK(gckOS_DeleteMutex(Os, Os->syncPointMutex));
+#endif
+
/*
* Destroy the signal manager.
*/
&mdl->dmaHandle,
GFP_KERNEL | gcdNOWARN);
}
+#if gcdUSE_NON_PAGED_MEMORY_CACHE
+ if(addr == gcvNULL)
+ {
+ MEMORY_UNLOCK(Os);
+ locked = gcvFALSE;
+ /*Free all cache and try again*/
+ _FreeAllNonPagedMemoryCache(Os);
+ MEMORY_LOCK(Os);
+ locked = gcvTRUE;
+ addr = dma_alloc_coherent(gcvNULL,
+ mdl->numPages * PAGE_SIZE,
+ &mdl->dmaHandle,
+ GFP_KERNEL | gcdNOWARN);
+ }
+#endif
#else
size = mdl->numPages * PAGE_SIZE;
order = get_order(size);
gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
}
- if ((Os->device->baseAddress & 0x80000000) != (mdl->dmaHandle & 0x80000000))
- {
- mdl->dmaHandle = (mdl->dmaHandle & ~0x80000000)
- | (Os->device->baseAddress & 0x80000000);
- }
-
mdl->addr = addr;
/* Return allocated memory. */
/* Verify the arguments. */
gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
+ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
gcmkVERIFY_ARGUMENT(Data != gcvNULL);
*Data = readl((gctUINT8 *)Os->device->registerBases[Core] + Address);
{
gcmkHEADER_ARG("Os=0x%X Core=%d Address=0x%X Data=0x%08x", Os, Core, Address, Data);
+ gcmkVERIFY_ARGUMENT(Address < Os->device->requestedRegisterMemSizes[Core]);
+
writel(Data, (gctUINT8 *)Os->device->registerBases[Core] + Address);
/* Success. */
if (Delay > 0)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
- ktime_t delay = ktime_set(0, Delay * NSEC_PER_MSEC);
+ ktime_t delay = ktime_set(Delay/1000, (Delay%1000) * NSEC_PER_MSEC);
__set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&delay, HRTIMER_MODE_REL);
#else
if (Contiguous)
{
- /* Get contiguous pages, and suppress warning (stack dump) from kernel when
- we run out of memory. */
+ gctUINT32 order = get_order(bytes);
+
+ if (order >= MAX_ORDER)
+ {
+ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
+ }
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
addr =
alloc_pages_exact(numPages * PAGE_SIZE, GFP_KERNEL | gcdNOWARN | __GFP_NORETRY);
mdl->exact = gcvTRUE;
#else
mdl->u.contiguousPages =
- alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, GetOrder(numPages));
+ alloc_pages(GFP_KERNEL | gcdNOWARN | __GFP_NORETRY, order);
#endif
if (mdl->u.contiguousPages == gcvNULL)
{
mdl->u.contiguousPages =
- alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, GetOrder(numPages));
+ alloc_pages(GFP_KERNEL | __GFP_HIGHMEM | gcdNOWARN, order);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
mdl->exact = gcvFALSE;
}
mdlMap->vma->vm_flags |= gcdVM_FLAGS;
-#if !gcdPAGED_MEMORY_CACHEABLE
+
if (Cacheable == gcvFALSE)
{
/* Make this mapping non-cached. */
mdlMap->vma->vm_page_prot = gcmkPAGED_MEMROY_PROT(mdlMap->vma->vm_page_prot);
}
-#endif
+
addr = mdl->addr;
/* Now map all the vmalloc pages to this user address. */
up_write(¤t->mm->mmap_sem);
}
- else
- {
- /* mdlMap->vmaAddr != gcvNULL means current process has already locked this node. */
- MEMORY_UNLOCK(Os);
- gcmkFOOTER_ARG("*status=%d, mdlMap->vmaAddr=%x", gcvSTATUS_MEMORY_LOCKED, mdlMap->vmaAddr);
- return gcvSTATUS_MEMORY_LOCKED;
- }
+ mdlMap->count++;
/* Convert pointer to MDL. */
*Logical = mdlMap->vmaAddr;
{
if ((mdlMap->vmaAddr != gcvNULL) && (_GetProcessID() == mdlMap->pid))
{
- _UnmapUserLogical(mdlMap->pid, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
- mdlMap->vmaAddr = gcvNULL;
+ if (--mdlMap->count == 0)
+ {
+ _UnmapUserLogical(mdlMap->pid, mdlMap->vmaAddr, mdl->numPages * PAGE_SIZE);
+ mdlMap->vmaAddr = gcvNULL;
+ }
}
mdlMap = mdlMap->next;
{
/* Get the user pages. */
down_read(¤t->mm->mmap_sem);
+
result = get_user_pages(current,
current->mm,
memory & PAGE_MASK,
pages,
gcvNULL
);
+
up_read(¤t->mm->mmap_sem);
if (result <=0 || result < pageCount)
{
struct vm_area_struct *vma;
- /* Free the page table. */
- if (pages != gcvNULL)
+ /* Release the pages if any. */
+ if (result > 0)
{
- /* Release the pages if any. */
- if (result > 0)
+ for (i = 0; i < result; i++)
{
- for (i = 0; i < result; i++)
+ if (pages[i] == gcvNULL)
{
- if (pages[i] == gcvNULL)
- {
- break;
- }
-
- page_cache_release(pages[i]);
+ break;
}
+
+ page_cache_release(pages[i]);
+ pages[i] = gcvNULL;
}
- kfree(pages);
- pages = gcvNULL;
+ result = 0;
}
vma = find_vma(current->mm, memory);
- if (vma && (vma->vm_flags & VM_PFNMAP) )
+ if (vma && (vma->vm_flags & VM_PFNMAP))
{
pte_t * pte;
spinlock_t * ptl;
- unsigned long pfn;
+ gctUINTPTR_T logical = memory;
- pgd_t * pgd = pgd_offset(current->mm, memory);
- pud_t * pud = pud_offset(pgd, memory);
- if (pud)
+ for (i = 0; i < pageCount; i++)
{
- pmd_t * pmd = pmd_offset(pud, memory);
- pte = pte_offset_map_lock(current->mm, pmd, memory, &ptl);
- if (!pte)
+ pgd_t * pgd = pgd_offset(current->mm, logical);
+ pud_t * pud = pud_offset(pgd, logical);
+
+ if (pud)
+ {
+ pmd_t * pmd = pmd_offset(pud, logical);
+ pte = pte_offset_map_lock(current->mm, pmd, logical, &ptl);
+ if (!pte)
+ {
+ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
+ }
+ }
+ else
{
gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
}
+
+ pages[i] = pte_page(*pte);
+ pte_unmap_unlock(pte, ptl);
+
+ /* Advance to next. */
+ logical += PAGE_SIZE;
}
- else
+ }
+ else
+ {
+ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
+ }
+
+ /* Check if this memory is contiguous for old mmu. */
+ if (Os->device->kernels[Core]->hardware->mmuVersion == 0)
+ {
+ for (i = 1; i < pageCount; i++)
{
- gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
+ if (pages[i] != nth_page(pages[0], i))
+ {
+ /* Non-contiguous. */
+ break;
+ }
}
- pfn = pte_pfn(*pte);
-
- physical = (pfn << PAGE_SHIFT) | (memory & ~PAGE_MASK);
+ if (i == pageCount)
+ {
+ /* Contiguous memory. */
+ physical = page_to_phys(pages[0]) | (memory & ~PAGE_MASK);
- pte_unmap_unlock(pte, ptl);
+ if (!((physical - Os->device->baseAddress) & 0x80000000))
+ {
+ kfree(pages);
+ pages = gcvNULL;
- if ((Os->device->kernels[Core]->hardware->mmuVersion == 0)
- && !((physical - Os->device->baseAddress) & 0x80000000))
- {
- info->pages = gcvNULL;
- info->pageTable = gcvNULL;
+ info->pages = gcvNULL;
+ info->pageTable = gcvNULL;
- MEMORY_MAP_UNLOCK(Os);
+ MEMORY_MAP_UNLOCK(Os);
- *Address = physical - Os->device->baseAddress;
- *Info = info;
+ *Address = physical - Os->device->baseAddress;
+ *Info = info;
- gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x",
- *Info, *Address);
+ gcmkFOOTER_ARG("*Info=0x%X *Address=0x%08x",
+ *Info, *Address);
- return gcvSTATUS_OK;
+ return gcvSTATUS_OK;
+ }
}
}
- else
+
+ /* Reference pages. */
+ for (i = 0; i < pageCount; i++)
{
- gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
+ get_page(pages[i]);
}
}
}
- if (pages)
- {
- for (i = 0; i < pageCount; i++)
- {
- /* Flush(clean) the data cache. */
- gcmkONERROR(gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
- (gctPOINTER)(gctUINTPTR_T)page_to_phys(pages[i]),
- (gctPOINTER)(memory & PAGE_MASK) + i*PAGE_SIZE,
- PAGE_SIZE));
- }
- }
- else
+ for (i = 0; i < pageCount; i++)
{
+#ifdef CONFIG_ARM
+ gctUINT32 data;
+ get_user(data, (gctUINT32*)((memory & PAGE_MASK) + i * PAGE_SIZE));
+#endif
+
/* Flush(clean) the data cache. */
gcmkONERROR(gckOS_CacheFlush(Os, _GetProcessID(), gcvNULL,
- (gctPOINTER)(gctUINTPTR_T)(physical & PAGE_MASK),
- (gctPOINTER)(memory & PAGE_MASK),
- PAGE_SIZE * pageCount));
+ (gctPOINTER)(gctUINTPTR_T)page_to_phys(pages[i]),
+ (gctPOINTER)(memory & PAGE_MASK) + i*PAGE_SIZE,
+ PAGE_SIZE));
}
#if gcdENABLE_VG
(gctPOINTER *) &pageTable,
&address));
}
+
/* Fill the page table. */
for (i = 0; i < pageCount; i++)
{
gctUINT32 phys;
gctUINT32_PTR tab = pageTable + i * (PAGE_SIZE/4096);
- if (pages)
- {
- phys = page_to_phys(pages[i]);
- }
- else
- {
- phys = (physical & PAGE_MASK) + i * PAGE_SIZE;
- }
+ phys = page_to_phys(pages[i]);
#if gcdENABLE_VG
if (Core == gcvCORE_VG)
#else
dma_sync_single_for_device(
gcvNULL,
- Physical,
+ (dma_addr_t)Physical,
Bytes,
DMA_TO_DEVICE);
#endif
#else
dma_sync_single_for_device(
gcvNULL,
- Physical,
+ (dma_addr_t)Physical,
Bytes,
DMA_FROM_DEVICE);
#endif
#else
dma_sync_single_for_device(
gcvNULL,
- Physical,
+ (dma_addr_t)Physical,
Bytes,
DMA_BIDIRECTIONAL);
#endif
struct clk *clk_2dcore = Os->device->clk_2d_core;
struct clk *clk_2d_axi = Os->device->clk_2d_axi;
struct clk *clk_vg_axi = Os->device->clk_vg_axi;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
+ int ret;
+#endif
gctBOOL oldClockState = gcvFALSE;
gctBOOL oldPowerState = gcvFALSE;
}
if((Power == gcvTRUE) && (oldPowerState == gcvFALSE))
{
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
- if(!IS_ERR(Os->device->gpu_regulator))
- regulator_enable(Os->device->gpu_regulator);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
+ if(!IS_ERR(Os->device->gpu_regulator)) {
+ ret = regulator_enable(Os->device->gpu_regulator);
+ if (ret != 0)
+ gckOS_Print("%s(%d): fail to enable pu regulator %d!\n",
+ __FUNCTION__, __LINE__, ret);
+ }
#else
imx_gpc_power_up_pu(true);
#endif
pm_runtime_put_sync(Os->device->pmdev);
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
if(!IS_ERR(Os->device->gpu_regulator))
regulator_disable(Os->device->gpu_regulator);
#else
else
{
/* Set the event to an unsignaled state. */
- INIT_COMPLETION(signal->obj);
+ reinit_completion(&signal->obj);
}
gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->signalMutex));
if (unlikely(delayed_work_pending(&timer->work)))
{
- cancel_delayed_work(&timer->work);
+ if (unlikely(!cancel_delayed_work(&timer->work)))
+ {
+ cancel_work_sync(&timer->work.work);
+
+ if (unlikely(delayed_work_pending(&timer->work)))
+ {
+ gckOS_Print("gckOS_StartTimer error, the pending worker cannot complete!!!! \n");
+
+ return gcvSTATUS_INVALID_REQUEST;
+ }
+ }
}
queue_delayed_work(Os->workqueue, &timer->work, msecs_to_jiffies(Delay));
return gcvSTATUS_OK;
}
+#if gcdANDROID_NATIVE_FENCE_SYNC
+
+gceSTATUS
+gckOS_CreateSyncPoint(
+ IN gckOS Os,
+ OUT gctSYNC_POINT * SyncPoint
+ )
+{
+ gceSTATUS status;
+ gcsSYNC_POINT_PTR syncPoint;
+
+ gcmkHEADER_ARG("Os=0x%X", Os);
+
+ /* Verify the arguments. */
+ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
+
+ /* Create an sync point structure. */
+ syncPoint = (gcsSYNC_POINT_PTR) kmalloc(
+ sizeof(gcsSYNC_POINT), GFP_KERNEL | gcdNOWARN);
+
+ if (syncPoint == gcvNULL)
+ {
+ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
+ }
+
+ /* Initialize the sync point. */
+ atomic_set(&syncPoint->ref, 1);
+ atomic_set(&syncPoint->state, 0);
+
+ gcmkONERROR(_AllocateIntegerId(&Os->syncPointDB, syncPoint, &syncPoint->id));
+
+ *SyncPoint = (gctSYNC_POINT)(gctUINTPTR_T)syncPoint->id;
+
+ gcmkFOOTER_ARG("*SyncPonint=%d", syncPoint->id);
+ return gcvSTATUS_OK;
+
+OnError:
+ if (syncPoint != gcvNULL)
+ {
+ kfree(syncPoint);
+ }
+
+ gcmkFOOTER();
+ return status;
+}
+
+gceSTATUS
+gckOS_ReferenceSyncPoint(
+ IN gckOS Os,
+ IN gctSYNC_POINT SyncPoint
+ )
+{
+ gceSTATUS status;
+ gcsSYNC_POINT_PTR syncPoint;
+
+ gcmkHEADER_ARG("Os=0x%X", Os);
+
+ /* Verify the arguments. */
+ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
+ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
+
+ gcmkONERROR(
+ _QueryIntegerId(&Os->syncPointDB,
+ (gctUINT32)(gctUINTPTR_T)SyncPoint,
+ (gctPOINTER)&syncPoint));
+
+ /* Initialize the sync point. */
+ atomic_inc(&syncPoint->ref);
+
+ gcmkFOOTER_NO();
+ return gcvSTATUS_OK;
+
+OnError:
+ gcmkFOOTER();
+ return status;
+}
+
+gceSTATUS
+gckOS_DestroySyncPoint(
+ IN gckOS Os,
+ IN gctSYNC_POINT SyncPoint
+ )
+{
+ gceSTATUS status;
+ gcsSYNC_POINT_PTR syncPoint;
+ gctBOOL acquired = gcvFALSE;
+
+ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
+
+ /* Verify the arguments. */
+ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
+ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
+
+ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
+ acquired = gcvTRUE;
+
+ gcmkONERROR(
+ _QueryIntegerId(&Os->syncPointDB,
+ (gctUINT32)(gctUINTPTR_T)SyncPoint,
+ (gctPOINTER)&syncPoint));
+
+ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
+
+ if (atomic_dec_and_test(&syncPoint->ref))
+ {
+ gcmkVERIFY_OK(_DestroyIntegerId(&Os->syncPointDB, syncPoint->id));
+
+ /* Free the sgianl. */
+ syncPoint->timeline = gcvNULL;
+ kfree(syncPoint);
+ }
+
+ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
+ acquired = gcvFALSE;
+
+ /* Success. */
+ gcmkFOOTER_NO();
+ return gcvSTATUS_OK;
+
+OnError:
+ if (acquired)
+ {
+ /* Release the mutex. */
+ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
+ }
+
+ gcmkFOOTER();
+ return status;
+}
+
+gceSTATUS
+gckOS_SignalSyncPoint(
+ IN gckOS Os,
+ IN gctSYNC_POINT SyncPoint
+ )
+{
+ gceSTATUS status;
+ gcsSYNC_POINT_PTR syncPoint;
+ gctBOOL acquired = gcvFALSE;
+
+ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
+
+ /* Verify the arguments. */
+ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
+ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
+
+ gcmkONERROR(gckOS_AcquireMutex(Os, Os->syncPointMutex, gcvINFINITE));
+ acquired = gcvTRUE;
+
+ gcmkONERROR(
+ _QueryIntegerId(&Os->syncPointDB,
+ (gctUINT32)(gctUINTPTR_T)SyncPoint,
+ (gctPOINTER)&syncPoint));
+
+ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
+
+ /* Get state. */
+ atomic_set(&syncPoint->state, gcvTRUE);
+
+ /* Signal timeline. */
+ if (syncPoint->timeline)
+ {
+ sync_timeline_signal(syncPoint->timeline);
+ }
+
+ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
+ acquired = gcvFALSE;
+
+ /* Success. */
+ gcmkFOOTER_NO();
+ return gcvSTATUS_OK;
+
+OnError:
+ if (acquired)
+ {
+ /* Release the mutex. */
+ gcmkVERIFY_OK(gckOS_ReleaseMutex(Os, Os->syncPointMutex));
+ }
+
+ gcmkFOOTER();
+ return status;
+}
+
+gceSTATUS
+gckOS_QuerySyncPoint(
+ IN gckOS Os,
+ IN gctSYNC_POINT SyncPoint,
+ OUT gctBOOL_PTR State
+ )
+{
+ gceSTATUS status;
+ gcsSYNC_POINT_PTR syncPoint;
+
+ gcmkHEADER_ARG("Os=0x%X SyncPoint=%d", Os, (gctUINT32)(gctUINTPTR_T)SyncPoint);
+
+ /* Verify the arguments. */
+ gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
+ gcmkVERIFY_ARGUMENT(SyncPoint != gcvNULL);
+
+ gcmkONERROR(
+ _QueryIntegerId(&Os->syncPointDB,
+ (gctUINT32)(gctUINTPTR_T)SyncPoint,
+ (gctPOINTER)&syncPoint));
+
+ gcmkASSERT(syncPoint->id == (gctUINT32)(gctUINTPTR_T)SyncPoint);
+
+ /* Get state. */
+ *State = atomic_read(&syncPoint->state);
+
+ /* Success. */
+ gcmkFOOTER_ARG("*State=%d", *State);
+ return gcvSTATUS_OK;
+
+OnError:
+ gcmkFOOTER();
+ return status;
+}
+
+gceSTATUS
+gckOS_CreateSyncTimeline(
+ IN gckOS Os,
+ OUT gctHANDLE * Timeline
+ )
+{
+ struct viv_sync_timeline * timeline;
+
+ /* Create viv sync timeline. */
+ timeline = viv_sync_timeline_create("viv timeline", Os);
+
+ if (timeline == gcvNULL)
+ {
+ /* Out of memory. */
+ return gcvSTATUS_OUT_OF_MEMORY;
+ }
+
+ *Timeline = (gctHANDLE) timeline;
+ return gcvSTATUS_OK;
+}
+
+gceSTATUS
+gckOS_DestroySyncTimeline(
+ IN gckOS Os,
+ IN gctHANDLE Timeline
+ )
+{
+ struct viv_sync_timeline * timeline;
+ gcmkASSERT(Timeline != gcvNULL);
+
+ /* Destroy timeline. */
+ timeline = (struct viv_sync_timeline *) Timeline;
+ sync_timeline_destroy(&timeline->obj);
+
+ return gcvSTATUS_OK;
+}
+
+gceSTATUS
+gckOS_CreateNativeFence(
+ IN gckOS Os,
+ IN gctHANDLE Timeline,
+ IN gctSYNC_POINT SyncPoint,
+ OUT gctINT * FenceFD
+ )
+{
+ int fd = -1;
+ struct viv_sync_timeline *timeline;
+ struct sync_pt * pt = gcvNULL;
+ struct sync_fence * fence;
+ char name[32];
+ gcsSYNC_POINT_PTR syncPoint;
+ gceSTATUS status;
+
+ gcmkHEADER_ARG("Os=0x%X Timeline=0x%X SyncPoint=%d",
+ Os, Timeline, (gctUINT)(gctUINTPTR_T)SyncPoint);
+
+ gcmkONERROR(
+ _QueryIntegerId(&Os->syncPointDB,
+ (gctUINT32)(gctUINTPTR_T)SyncPoint,
+ (gctPOINTER)&syncPoint));
+
+ /* Cast timeline. */
+ timeline = (struct viv_sync_timeline *) Timeline;
+
+ fd = get_unused_fd();
+
+ if (fd < 0)
+ {
+ /* Out of resources. */
+ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
+ }
+
+ /* Create viv_sync_pt. */
+ pt = viv_sync_pt_create(timeline, SyncPoint);
+
+ if (pt == gcvNULL)
+ {
+ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
+ }
+
+ /* Reference sync_timeline. */
+ syncPoint->timeline = &timeline->obj;
+
+ /* Build fence name. */
+ snprintf(name, 32, "viv sync_fence-%u", (gctUINT)(gctUINTPTR_T)SyncPoint);
+
+ /* Create sync_fence. */
+ fence = sync_fence_create(name, pt);
+
+ if (fence == NULL)
+ {
+ gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
+ }
+
+ /* Install fence to fd. */
+ sync_fence_install(fence, fd);
+
+ *FenceFD = fd;
+ gcmkFOOTER_ARG("*FenceFD=%d", fd);
+ return gcvSTATUS_OK;
+
+OnError:
+ /* Error roll back. */
+ if (pt)
+ {
+ sync_pt_free(pt);
+ }
+
+ if (fd > 0)
+ {
+ put_unused_fd(fd);
+ }
+
+ gcmkFOOTER();
+ return status;
+}
+#endif