/****************************************************************************
*
-* Copyright (C) 2005 - 2013 by Vivante Corp.
+* Copyright (C) 2005 - 2014 by Vivante Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#define gcdMMU_TABLE_DUMP 0
-#define gcdUSE_MMU_EXCEPTION 0
+#define gcdUSE_MMU_EXCEPTION 1
/*
gcdMMU_CLEAR_VALUE
# define gcdMMU_CLEAR_VALUE 0x00000ABC
#endif
-/* VIV: Start GPU address for gcvSURF_VERTEX. */
#define gcdVERTEX_START (128 << 10)
typedef struct _gcsMMU_STLB *gcsMMU_STLB_PTR;
else
{
/* Address page table. */
- gctUINT32_PTR pageTable = Mmu->pageTableLogical;
+ gctUINT32_PTR map = Mmu->mapLogical;
/* Dispatch on node type. */
- switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[Index])))
+ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[Index])))
{
case gcvMMU_SINGLE:
/* Set single index. */
- _WritePageEntry(&pageTable[Index], (Next << 8) | gcvMMU_SINGLE);
+ _WritePageEntry(&map[Index], (Next << 8) | gcvMMU_SINGLE);
break;
case gcvMMU_FREE:
/* Set index. */
- _WritePageEntry(&pageTable[Index + 1], Next);
+ _WritePageEntry(&map[Index + 1], Next);
break;
default:
IN gctUINT32 Count
)
{
- gctUINT32_PTR pageTable = Mmu->pageTableLogical;
+ gctUINT32_PTR map = Mmu->mapLogical;
if (Count == 1)
{
/* Initialize a single page node. */
- _WritePageEntry(pageTable + Node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
+ _WritePageEntry(map + Node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
}
else
{
/* Initialize the node. */
- _WritePageEntry(pageTable + Node + 0, (Count << 8) | gcvMMU_FREE);
- _WritePageEntry(pageTable + Node + 1, ~0U);
+ _WritePageEntry(map + Node + 0, (Count << 8) | gcvMMU_FREE);
+ _WritePageEntry(map + Node + 1, ~0U);
}
/* Append the node. */
IN gckMMU Mmu
)
{
- gctUINT32_PTR pageTable = Mmu->pageTableLogical;
+ gctUINT32_PTR map = Mmu->mapLogical;
gceSTATUS status;
gctUINT32 i, previous, start = 0, count = 0;
for (i = 0; i < Mmu->pageTableEntries; ++i)
{
/* Dispatch based on type of page. */
- switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[i])))
+ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
{
case gcvMMU_USED:
/* Used page, so close any open node. */
}
/* Advance the count. */
- count += _ReadPageEntry(&pageTable[i]) >> 8;
+ count += _ReadPageEntry(&map[i]) >> 8;
/* Advance the index into the page table. */
- i += (_ReadPageEntry(&pageTable[i]) >> 8) - 1;
+ i += (_ReadPageEntry(&map[i]) >> 8) - 1;
break;
default:
| (1 << 0);
}
+#if gcdPROCESS_ADDRESS_SPACE
+gctUINT32
+_AddressToIndex(
+ IN gckMMU Mmu,
+ IN gctUINT32 Address
+ )
+{
+ gctUINT32 mtlbOffset = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
+ gctUINT32 stlbOffset = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
+
+ return (mtlbOffset - Mmu->dynamicMappingStart) * gcdMMU_STLB_4K_ENTRY_NUM + stlbOffset;
+}
+
+gctUINT32
+_MtlbOffset(
+ gctUINT32 Address
+ )
+{
+ return (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
+}
+
+gctUINT32
+_StlbOffset(
+ gctUINT32 Address
+ )
+{
+ return (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
+}
+
+static gceSTATUS
+_AllocateStlb(
+ IN gckOS Os,
+ OUT gcsMMU_STLB_PTR *Stlb
+ )
+{
+ gceSTATUS status;
+ gcsMMU_STLB_PTR stlb;
+ gctPOINTER pointer;
+
+ /* Allocate slave TLB record. */
+ gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsMMU_STLB), &pointer));
+ stlb = pointer;
+
+ stlb->size = gcdMMU_STLB_4K_SIZE;
+
+ /* Allocate slave TLB entries. */
+ gcmkONERROR(gckOS_AllocateContiguous(
+ Os,
+ gcvFALSE,
+ &stlb->size,
+ &stlb->physical,
+ (gctPOINTER)&stlb->logical
+ ));
+
+ gcmkONERROR(gckOS_GetPhysicalAddress(Os, stlb->logical, &stlb->physBase));
+
+#if gcdUSE_MMU_EXCEPTION
+ _FillPageTable(stlb->logical, stlb->size / 4, gcdMMU_STLB_EXCEPTION);
+#else
+ gckOS_ZeroMemory(stlb->logical, stlb->size);
+#endif
+
+ *Stlb = stlb;
+
+ return gcvSTATUS_OK;
+
+OnError:
+ return status;
+}
+
+gceSTATUS
+_SetupProcessAddressSpace(
+ IN gckMMU Mmu
+ )
+{
+ gceSTATUS status;
+ gctINT numEntries = 0;
+ gctUINT32_PTR map;
+
+ numEntries = gcdPROCESS_ADDRESS_SPACE_SIZE
+ /* Address space mapped by one MTLB entry. */
+ / (1 << gcdMMU_MTLB_SHIFT);
+
+ Mmu->dynamicMappingStart = 0;
+
+ Mmu->pageTableSize = numEntries * 4096;
+
+ Mmu->pageTableEntries = Mmu->pageTableSize / gcmSIZEOF(gctUINT32);
+
+ gcmkONERROR(gckOS_Allocate(Mmu->os,
+ Mmu->pageTableSize,
+ (void **)&Mmu->mapLogical));
+
+ /* Initilization. */
+ map = Mmu->mapLogical;
+ _WritePageEntry(map, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
+ _WritePageEntry(map + 1, ~0U);
+ Mmu->heapList = 0;
+ Mmu->freeNodes = gcvFALSE;
+
+ return gcvSTATUS_OK;
+
+OnError:
+ return status;
+}
+#else
static gceSTATUS
_FillFlatMapping(
IN gckMMU Mmu,
gctUINT32 mEnd = end >> gcdMMU_MTLB_SHIFT;
gctUINT32 sStart = (start & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
gctUINT32 sEnd = (end & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
+ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
/* Grab the mutex. */
gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
gcsMMU_STLB_PTR stlb;
gctPOINTER pointer = gcvNULL;
gctUINT32 last = (mStart == mEnd) ? sEnd : (gcdMMU_STLB_64K_ENTRY_NUM - 1);
+ gctUINT32 mtlbEntry;
gcmkONERROR(gckOS_Allocate(Mmu->os, sizeof(struct _gcsMMU_STLB), &pointer));
stlb = pointer;
gcmkONERROR(gcvSTATUS_NOT_ALIGNED);
}
- _WritePageEntry(Mmu->mtlbLogical + mStart,
- stlb->physBase
- /* 64KB page size */
- | (1 << 2)
- /* Ignore exception */
- | (0 << 1)
- /* Present */
- | (1 << 0)
- );
+ mtlbEntry = stlb->physBase
+ /* 64KB page size */
+ | (1 << 2)
+ /* Ignore exception */
+ | (0 << 1)
+ /* Present */
+ | (1 << 0);
+
+ if (ace)
+ {
+ mtlbEntry = mtlbEntry
+ /* Secure */
+ | (1 << 4);
+ }
+
+ _WritePageEntry(Mmu->mtlbLogical + mStart, mtlbEntry);
+
#if gcdMMU_TABLE_DUMP
gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
__FUNCTION__, __LINE__,
gctINT i, nodeArraySize = 0;
gctUINT32 physical;
gctINT numEntries = 0;
- gctUINT32_PTR pageTable;
+ gctUINT32_PTR map;
gctBOOL acquired = gcvFALSE;
+ gctUINT32 mtlbEntry;
+ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
/* Find all the dynamic address space. */
gcmkONERROR(_FindDynamicSpace(Mmu, &nodeArray, &nodeArraySize));
Mmu->pageTableSize = numEntries * 4096;
- Mmu->pageTableEntries = Mmu->pageTableSize / gcmSIZEOF(gctUINT32);
+ gcmkSAFECASTSIZET(Mmu->pageTableEntries, Mmu->pageTableSize / gcmSIZEOF(gctUINT32));
+
+ gcmkONERROR(gckOS_Allocate(Mmu->os,
+ Mmu->pageTableSize,
+ (void **)&Mmu->mapLogical));
/* Construct Slave TLB. */
gcmkONERROR(gckOS_AllocateContiguous(Mmu->os,
#endif
/* Initilization. */
- pageTable = Mmu->pageTableLogical;
- _WritePageEntry(pageTable, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
- _WritePageEntry(pageTable + 1, ~0U);
+ map = Mmu->mapLogical;
+ _WritePageEntry(map, (Mmu->pageTableEntries << 8) | gcvMMU_FREE);
+ _WritePageEntry(map + 1, ~0U);
Mmu->heapList = 0;
Mmu->freeNodes = gcvFALSE;
i < (gctINT)Mmu->dynamicMappingStart + numEntries;
i++)
{
- _WritePageEntry(Mmu->mtlbLogical + i,
- physical
- /* 4KB page size */
- | (0 << 2)
- /* Ignore exception */
- | (0 << 1)
- /* Present */
- | (1 << 0)
- );
+ mtlbEntry = physical
+ /* 4KB page size */
+ | (0 << 2)
+ /* Ignore exception */
+ | (0 << 1)
+ /* Present */
+ | (1 << 0);
+
+ if (ace)
+ {
+ mtlbEntry = mtlbEntry
+ /* Secure */
+ | (1 << 4);
+ }
+
+ _WritePageEntry(Mmu->mtlbLogical + i, mtlbEntry);
+
#if gcdMMU_TABLE_DUMP
gckOS_Print("%s(%d): insert MTLB[%d]: %08x\n",
__FUNCTION__, __LINE__,
return gcvSTATUS_OK;
OnError:
- if (Mmu->pageTableLogical)
+ if (Mmu->mapLogical)
{
- /* Free the page table. */
gcmkVERIFY_OK(
- gckOS_FreeContiguous(Mmu->os,
- Mmu->pageTablePhysical,
- (gctPOINTER) Mmu->pageTableLogical,
- Mmu->pageTableSize));
+ gckOS_Free(Mmu->os, (gctPOINTER) Mmu->mapLogical));
+
+
+ gcmkVERIFY_OK(
+ gckOS_FreeContiguous(Mmu->os,
+ Mmu->pageTablePhysical,
+ (gctPOINTER) Mmu->pageTableLogical,
+ Mmu->pageTableSize));
}
if (acquired)
return status;
}
+#endif
/*******************************************************************************
**
gckHARDWARE hardware;
gceSTATUS status;
gckMMU mmu = gcvNULL;
- gctUINT32_PTR pageTable;
+ gctUINT32_PTR map;
gctPOINTER pointer = gcvNULL;
+#if gcdPROCESS_ADDRESS_SPACE
+ gctUINT32 i;
+ gctUINT32 physical;
+#endif
+ gctUINT32 physBase;
+ gctUINT32 physSize;
+ gctUINT32 gpuAddress;
gcmkHEADER_ARG("Kernel=0x%x MmuSize=%lu", Kernel, MmuSize);
mmu->mtlbLogical = gcvNULL;
mmu->staticSTLB = gcvNULL;
mmu->enabled = gcvFALSE;
-#ifdef __QNXNTO__
- mmu->nodeList = gcvNULL;
- mmu->nodeMutex = gcvNULL;
-#endif
+ mmu->mapLogical = gcvNULL;
/* Create the page table mutex. */
gcmkONERROR(gckOS_CreateMutex(os, &mmu->pageTableMutex));
-#ifdef __QNXNTO__
- /* Create the node list mutex. */
- gcmkONERROR(gckOS_CreateMutex(os, &mmu->nodeMutex));
-#endif
-
if (hardware->mmuVersion == 0)
{
mmu->pageTableSize = MmuSize;
- gcmkONERROR(
- gckOS_AllocateContiguous(os,
- gcvFALSE,
- &mmu->pageTableSize,
- &mmu->pageTablePhysical,
- &pointer));
+ /* Construct address space management table. */
+ gcmkONERROR(gckOS_Allocate(mmu->os,
+ mmu->pageTableSize,
+ &pointer));
+
+ mmu->mapLogical = pointer;
+
+ /* Construct page table read by GPU. */
+ gcmkONERROR(gckOS_AllocateContiguous(mmu->os,
+ gcvFALSE,
+ &mmu->pageTableSize,
+ &mmu->pageTablePhysical,
+ (gctPOINTER)&mmu->pageTableLogical));
- mmu->pageTableLogical = pointer;
/* Compute number of entries in page table. */
- mmu->pageTableEntries = mmu->pageTableSize / sizeof(gctUINT32);
+ gcmkSAFECASTSIZET(mmu->pageTableEntries, mmu->pageTableSize / sizeof(gctUINT32));
/* Mark all pages as free. */
- pageTable = mmu->pageTableLogical;
+ map = mmu->mapLogical;
#if gcdMMU_CLEAR_VALUE
- _FillPageTable(pageTable, mmu->pageTableEntries, gcdMMU_CLEAR_VALUE);
+ _FillPageTable(mmu->pageTableLogical, mmu->pageTableEntries, gcdMMU_CLEAR_VALUE);
#endif
- _WritePageEntry(pageTable, (mmu->pageTableEntries << 8) | gcvMMU_FREE);
- _WritePageEntry(pageTable + 1, ~0U);
+ _WritePageEntry(map, (mmu->pageTableEntries << 8) | gcvMMU_FREE);
+ _WritePageEntry(map + 1, ~0U);
mmu->heapList = 0;
mmu->freeNodes = gcvFALSE;
-
- /* Set page table address. */
- gcmkONERROR(
- gckHARDWARE_SetMMU(hardware, (gctPOINTER) mmu->pageTableLogical));
}
else
{
mmu->mtlbLogical = pointer;
+#if gcdPROCESS_ADDRESS_SPACE
+ _FillPageTable(pointer, mmu->mtlbSize / 4, gcdMMU_MTLB_EXCEPTION);
+
+ /* Allocate a array to store stlbs. */
+ gcmkONERROR(gckOS_Allocate(os, mmu->mtlbSize, &mmu->stlbs));
+
+ gckOS_ZeroMemory(mmu->stlbs, mmu->mtlbSize);
+
+ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
+ {
+ gcmkONERROR(gckOS_AtomConstruct(os, &mmu->pageTableDirty[i]));
+ }
+
+ _SetupProcessAddressSpace(mmu);
+
+ /* Map kernel command buffer in MMU. */
+ for (i = 0; i < gcdCOMMAND_QUEUES; i++)
+ {
+ gcmkONERROR(gckOS_GetPhysicalAddress(
+ mmu->os,
+ Kernel->command->queues[i].logical,
+ &physical
+ ));
+
+ gcmkONERROR(gckMMU_FlatMapping(mmu, physical));
+ }
+#else
/* Invalid all the entries. */
gcmkONERROR(
gckOS_ZeroMemory(pointer, mmu->mtlbSize));
+
+ gcmkONERROR(
+ gckOS_QueryOption(mmu->os, "physBase", &physBase));
+
+ gcmkONERROR(
+ gckOS_QueryOption(mmu->os, "physSize", &physSize));
+
+ gcmkONERROR(
+ gckOS_CPUPhysicalToGPUPhysical(mmu->os, physBase, &gpuAddress));
+
+ /* Setup [physBase - physSize) flat mapping. */
+ gcmkONERROR(_FillFlatMapping(
+ mmu,
+ gpuAddress,
+ physSize
+ ));
+
+ gcmkONERROR(_SetupDynamicSpace(mmu));
+#endif
}
/* Return the gckMMU object pointer. */
/* Roll back. */
if (mmu != gcvNULL)
{
- if (mmu->pageTableLogical != gcvNULL)
+ if (mmu->mapLogical != gcvNULL)
{
- /* Free the page table. */
+ gcmkVERIFY_OK(
+ gckOS_Free(os, (gctPOINTER) mmu->mapLogical));
+
+
gcmkVERIFY_OK(
gckOS_FreeContiguous(os,
mmu->pageTablePhysical,
(gctPOINTER) mmu->pageTableLogical,
mmu->pageTableSize));
-
}
if (mmu->mtlbLogical != gcvNULL)
gckOS_DeleteMutex(os, mmu->pageTableMutex));
}
-#ifdef __QNXNTO__
- if (mmu->nodeMutex != gcvNULL)
- {
- /* Delete the mutex. */
- gcmkVERIFY_OK(
- gckOS_DeleteMutex(os, mmu->nodeMutex));
- }
-#endif
-
/* Mark the gckMMU object as unknown. */
mmu->object.type = gcvOBJ_UNKNOWN;
IN gckMMU Mmu
)
{
-#ifdef __QNXNTO__
- gcuVIDMEM_NODE_PTR node, next;
+#if gcdPROCESS_ADDRESS_SPACE
+ gctUINT32 i;
#endif
-
gcmkHEADER_ARG("Mmu=0x%x", Mmu);
/* Verify the arguments. */
gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
-#ifdef __QNXNTO__
- /* Free all associated virtual memory. */
- for (node = Mmu->nodeList; node != gcvNULL; node = next)
- {
- next = node->Virtual.next;
- gcmkVERIFY_OK(gckVIDMEM_Free(node));
- }
-#endif
-
while (Mmu->staticSTLB != gcvNULL)
{
gcsMMU_STLB_PTR pre = Mmu->staticSTLB;
Mmu->mtlbSize));
}
- /* Free the page table. */
- gcmkVERIFY_OK(
- gckOS_FreeContiguous(Mmu->os,
- Mmu->pageTablePhysical,
- (gctPOINTER) Mmu->pageTableLogical,
- Mmu->pageTableSize));
+ /* Free address space management table. */
+ if (Mmu->mapLogical != gcvNULL)
+ {
+ gcmkVERIFY_OK(
+ gckOS_Free(Mmu->os, (gctPOINTER) Mmu->mapLogical));
+ }
-#ifdef __QNXNTO__
- /* Delete the node list mutex. */
- gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->nodeMutex));
-#endif
+ if (Mmu->pageTableLogical != gcvNULL)
+ {
+ /* Free page table. */
+ gcmkVERIFY_OK(
+ gckOS_FreeContiguous(Mmu->os,
+ Mmu->pageTablePhysical,
+ (gctPOINTER) Mmu->pageTableLogical,
+ Mmu->pageTableSize));
+ }
/* Delete the page table mutex. */
gcmkVERIFY_OK(gckOS_DeleteMutex(Mmu->os, Mmu->pageTableMutex));
+#if gcdPROCESS_ADDRESS_SPACE
+ for (i = 0; i < Mmu->mtlbSize / 4; i++)
+ {
+ struct _gcsMMU_STLB *stlb = ((struct _gcsMMU_STLB **)Mmu->stlbs)[i];
+
+ if (stlb)
+ {
+ gcmkVERIFY_OK(gckOS_FreeContiguous(
+ Mmu->os,
+ stlb->physical,
+ stlb->logical,
+ stlb->size));
+
+ gcmkOS_SAFE_FREE(Mmu->os, stlb);
+ }
+ }
+
+ gcmkOS_SAFE_FREE(Mmu->os, Mmu->stlbs);
+#endif
+
/* Mark the gckMMU object as unknown. */
Mmu->object.type = gcvOBJ_UNKNOWN;
{
gceSTATUS status;
gctUINT32 index = Index;
- gctUINT32_PTR map = Mmu->pageTableLogical;
+ gctUINT32_PTR map = Mmu->mapLogical;
gcmkHEADER();
gcmkONERROR(_Construct(Kernel, MmuSize, &sharedPageTable->mmu));
}
- else if (Kernel->hardware->mmuVersion == 0)
- {
- /* Set page table address. */
- gcmkONERROR(
- gckHARDWARE_SetMMU(Kernel->hardware, (gctPOINTER) sharedPageTable->mmu->pageTableLogical));
- }
*Mmu = sharedPageTable->mmu;
)
{
#if gcdSHARED_PAGETABLE
+ gckOS os = Mmu->os;
+
sharedPageTable->reference--;
if (sharedPageTable->reference == 0)
gcmkVERIFY_OK(_Destroy(Mmu));
}
- gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Mmu->os, sharedPageTable));
+ gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, sharedPageTable));
}
return gcvSTATUS_OK;
gceSTATUS status;
gctBOOL mutex = gcvFALSE;
gctUINT32 index = 0, previous = ~0U, left;
- gctUINT32_PTR pageTable;
+ gctUINT32_PTR map;
gctBOOL gotIt;
gctUINT32 address;
+ gctUINT32 pageCount;
gcmkHEADER_ARG("Mmu=0x%x PageCount=%lu", Mmu, PageCount);
if (PageCount > Mmu->pageTableEntries)
{
- gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
- __FUNCTION__, __LINE__);
-
/* Not enough pages avaiable. */
gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
}
+ gcmkSAFECASTSIZET(pageCount, PageCount);
+
/* Grab the mutex. */
gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
mutex = gcvTRUE;
/* Cast pointer to page table. */
- for (pageTable = Mmu->pageTableLogical, gotIt = gcvFALSE; !gotIt;)
+ for (map = Mmu->mapLogical, gotIt = gcvFALSE; !gotIt;)
{
index = Mmu->heapList;
gcmkONERROR(_AdjustIndex(
Mmu,
index,
- PageCount,
+ pageCount,
gcdVERTEX_START / gcmSIZEOF(gctUINT32),
&index
));
for (; !gotIt && (index < Mmu->pageTableEntries);)
{
/* Check the node type. */
- switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
+ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
{
case gcvMMU_SINGLE:
/* Single odes are valid if we only need 1 page. */
- if (PageCount == 1)
+ if (pageCount == 1)
{
gotIt = gcvTRUE;
}
{
/* Move to next node. */
previous = index;
- index = _ReadPageEntry(&pageTable[index]) >> 8;
+ index = _ReadPageEntry(&map[index]) >> 8;
}
break;
case gcvMMU_FREE:
/* Test if the node has enough space. */
- if (PageCount <= (_ReadPageEntry(&pageTable[index]) >> 8))
+ if (pageCount <= (_ReadPageEntry(&map[index]) >> 8))
{
gotIt = gcvTRUE;
}
{
/* Move to next node. */
previous = index;
- index = _ReadPageEntry(&pageTable[index + 1]);
+ index = _ReadPageEntry(&map[index + 1]);
}
break;
}
else
{
- gcmkPRINT("[galcore]: %s(%d): Run out of free page entry.",
- __FUNCTION__, __LINE__);
-
/* Out of resources. */
gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
}
}
}
- switch (gcmENTRY_TYPE(_ReadPageEntry(&pageTable[index])))
+ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
{
case gcvMMU_SINGLE:
/* Unlink single node from free list. */
gcmkONERROR(
- _Link(Mmu, previous, _ReadPageEntry(&pageTable[index]) >> 8));
+ _Link(Mmu, previous, _ReadPageEntry(&map[index]) >> 8));
break;
case gcvMMU_FREE:
/* Check how many pages will be left. */
- left = (_ReadPageEntry(&pageTable[index]) >> 8) - PageCount;
+ left = (_ReadPageEntry(&map[index]) >> 8) - pageCount;
switch (left)
{
case 0:
/* The entire node is consumed, just unlink it. */
gcmkONERROR(
- _Link(Mmu, previous, _ReadPageEntry(&pageTable[index + 1])));
+ _Link(Mmu, previous, _ReadPageEntry(&map[index + 1])));
break;
case 1:
/* One page will remain. Convert the node to a single node and
** advance the index. */
- _WritePageEntry(&pageTable[index], (_ReadPageEntry(&pageTable[index + 1]) << 8) | gcvMMU_SINGLE);
+ _WritePageEntry(&map[index], (_ReadPageEntry(&map[index + 1]) << 8) | gcvMMU_SINGLE);
index ++;
break;
default:
/* Enough pages remain for a new node. However, we will just adjust
** the size of the current node and advance the index. */
- _WritePageEntry(&pageTable[index], (left << 8) | gcvMMU_FREE);
+ _WritePageEntry(&map[index], (left << 8) | gcvMMU_FREE);
index += left;
break;
}
}
/* Mark node as used. */
- gcmkONERROR(_FillPageTable(&pageTable[index], PageCount, gcvMMU_USED));
+ gcmkONERROR(_FillPageTable(&map[index], pageCount, gcvMMU_USED));
/* Return pointer to page table. */
- *PageTable = &pageTable[index];
+ *PageTable = &Mmu->pageTableLogical[index];
/* Build virtual address. */
if (Mmu->hardware->mmuVersion == 0)
IN gctSIZE_T PageCount
)
{
- gctUINT32_PTR pageTable;
+ gctUINT32_PTR node;
gceSTATUS status;
gctBOOL acquired = gcvFALSE;
+ gctUINT32 pageCount;
gcmkHEADER_ARG("Mmu=0x%x PageTable=0x%x PageCount=%lu",
Mmu, PageTable, PageCount);
gcmkVERIFY_ARGUMENT(PageTable != gcvNULL);
gcmkVERIFY_ARGUMENT(PageCount > 0);
- /* Convert the pointer. */
- pageTable = (gctUINT32_PTR) PageTable;
+ gcmkSAFECASTSIZET(pageCount, PageCount);
+
+ /* Get the node by index. */
+ node = Mmu->mapLogical + ((gctUINT32_PTR)PageTable - Mmu->pageTableLogical);
gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
acquired = gcvTRUE;
#if gcdMMU_CLEAR_VALUE
if (Mmu->hardware->mmuVersion == 0)
{
- _FillPageTable(pageTable, PageCount, gcdMMU_CLEAR_VALUE);
+ _FillPageTable(PageTable, pageCount, gcdMMU_CLEAR_VALUE);
}
#endif
if (PageCount == 1)
{
- /* Single page node. */
- _WritePageEntry(pageTable,
- (~((1U<<8)-1)) | gcvMMU_SINGLE
+ /* Single page node. */
+ _WritePageEntry(node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
#if gcdUSE_MMU_EXCEPTION
- /* Enable exception */
- | 1 << 1
+ /* Enable exception */
+ _WritePageEntry(PageTable, (1 << 1));
#endif
- );
}
else
{
/* Mark the node as free. */
- _WritePageEntry(pageTable,
- (PageCount << 8) | gcvMMU_FREE
-#if gcdUSE_MMU_EXCEPTION
- /* Enable exception */
- | 1 << 1
-#endif
- );
- _WritePageEntry(pageTable + 1, ~0U);
+ _WritePageEntry(node, (pageCount << 8) | gcvMMU_FREE);
+ _WritePageEntry(node + 1, ~0U);
#if gcdUSE_MMU_EXCEPTION
/* Enable exception */
- gcmkVERIFY_OK(_FillPageTable(pageTable + 2, PageCount - 2, 1 << 1));
+ gcmkVERIFY_OK(_FillPageTable(PageTable, pageCount, 1 << 1));
#endif
}
)
{
return gckMMU_AllocatePagesEx(
- Mmu, PageCount, gcvSURF_UNKNOWN, PageTable, Address);
+ Mmu, PageCount, gcvSURF_TYPE_UNKNOWN, PageTable, Address);
}
gceSTATUS
#endif
}
-gceSTATUS
-gckMMU_Enable(
- IN gckMMU Mmu,
- IN gctUINT32 PhysBaseAddr,
- IN gctUINT32 PhysSize
- )
-{
- gceSTATUS status;
-#if gcdSHARED_PAGETABLE
- gckHARDWARE hardware;
- gctINT i;
-#endif
-
- gcmkHEADER_ARG("Mmu=0x%x", Mmu);
-
- /* Verify the arguments. */
- gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
-
-#if gcdSHARED_PAGETABLE
- if (Mmu->enabled)
- {
- gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
- return gcvSTATUS_SKIP;
- }
-#endif
-
- if (Mmu->hardware->mmuVersion == 0)
- {
- /* Success. */
- gcmkFOOTER_ARG("Status=%d", gcvSTATUS_SKIP);
- return gcvSTATUS_SKIP;
- }
- else
- {
- if (PhysSize != 0)
- {
- gcmkONERROR(_FillFlatMapping(
- Mmu,
- PhysBaseAddr,
- PhysSize
- ));
- }
-
- gcmkONERROR(_SetupDynamicSpace(Mmu));
-
-#if gcdSHARED_PAGETABLE
- for(i = 0; i < gcdMAX_GPU_COUNT; i++)
- {
- hardware = sharedPageTable->hardwares[i];
- if (hardware != gcvNULL)
- {
- gcmkONERROR(
- gckHARDWARE_SetMMUv2(
- hardware,
- gcvTRUE,
- Mmu->mtlbLogical,
- gcvMMU_MODE_4K,
- (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
- gcvFALSE
- ));
- }
- }
-#else
- gcmkONERROR(
- gckHARDWARE_SetMMUv2(
- Mmu->hardware,
- gcvTRUE,
- Mmu->mtlbLogical,
- gcvMMU_MODE_4K,
- (gctUINT8_PTR)Mmu->mtlbLogical + gcdMMU_MTLB_SIZE,
- gcvFALSE
- ));
-#endif
-
- Mmu->enabled = gcvTRUE;
-
- /* Success. */
- gcmkFOOTER_NO();
- return gcvSTATUS_OK;
- }
-
-OnError:
- /* Return the status. */
- gcmkFOOTER();
- return status;
-}
-
gceSTATUS
gckMMU_SetPage(
IN gckMMU Mmu,
gctUINT32 offset = (gctUINT32)PageEntry - (gctUINT32)Mmu->pageTableLogical;
#endif
- gctUINT32 data;
gcmkHEADER_ARG("Mmu=0x%x", Mmu);
/* Verify the arguments. */
if (Mmu->hardware->mmuVersion == 0)
{
- data = PageAddress;
+ _WritePageEntry(PageEntry, PageAddress);
}
else
{
- data = _SetPage(PageAddress);
+ _WritePageEntry(PageEntry, _SetPage(PageAddress));
}
- _WritePageEntry(PageEntry, data);
-
#if gcdMIRROR_PAGETABLE
for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
{
}
#endif
+
/* Success. */
gcmkFOOTER_NO();
return gcvSTATUS_OK;
}
-#ifdef __QNXNTO__
+#if gcdPROCESS_ADDRESS_SPACE
gceSTATUS
-gckMMU_InsertNode(
+gckMMU_GetPageEntry(
IN gckMMU Mmu,
- IN gcuVIDMEM_NODE_PTR Node)
+ IN gctUINT32 Address,
+ IN gctUINT32_PTR *PageTable
+ )
{
gceSTATUS status;
- gctBOOL mutex = gcvFALSE;
+ struct _gcsMMU_STLB *stlb;
+ struct _gcsMMU_STLB **stlbs = Mmu->stlbs;
+ gctUINT32 offset = _MtlbOffset(Address);
+ gctUINT32 mtlbEntry;
+ gctBOOL ace = gckHARDWARE_IsFeatureAvailable(Mmu->hardware, gcvFEATURE_ACE);
- gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
+ gcmkHEADER_ARG("Mmu=0x%x", Mmu);
+ /* Verify the arguments. */
gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
+ gcmkVERIFY_ARGUMENT((Address & 0xFFF) == 0);
- gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
- mutex = gcvTRUE;
+ stlb = stlbs[offset];
- Node->Virtual.next = Mmu->nodeList;
- Mmu->nodeList = Node;
+ if (stlb == gcvNULL)
+ {
+ gcmkONERROR(_AllocateStlb(Mmu->os, &stlb));
- gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
+ mtlbEntry = stlb->physBase
+ | gcdMMU_MTLB_4K_PAGE
+ | gcdMMU_MTLB_PRESENT
+ ;
- gcmkFOOTER();
+ if (ace)
+ {
+ mtlbEntry = mtlbEntry
+ /* Secure */
+ | (1 << 4);
+ }
+
+ /* Insert Slave TLB address to Master TLB entry.*/
+ _WritePageEntry(Mmu->mtlbLogical + offset, mtlbEntry);
+
+ /* Record stlb. */
+ stlbs[offset] = stlb;
+ }
+
+ *PageTable = &stlb->logical[_StlbOffset(Address)];
+
+ /* Success. */
+ gcmkFOOTER_NO();
return gcvSTATUS_OK;
OnError:
- if (mutex)
+ gcmkFOOTER();
+ return status;
+}
+
+gceSTATUS
+_CheckMap(
+ IN gckMMU Mmu
+ )
+{
+ gceSTATUS status;
+ gctUINT32_PTR map = Mmu->mapLogical;
+ gctUINT32 index;
+
+ for (index = Mmu->heapList; index < Mmu->pageTableEntries;)
{
- gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
+ /* Check the node type. */
+ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[index])))
+ {
+ case gcvMMU_SINGLE:
+ /* Move to next node. */
+ index = _ReadPageEntry(&map[index]) >> 8;
+ break;
+
+ case gcvMMU_FREE:
+ /* Move to next node. */
+ index = _ReadPageEntry(&map[index + 1]);
+ break;
+
+ default:
+ gcmkFATAL("MMU table correcupted at index [%u] = %x!", index, map[index]);
+ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
+ }
}
- gcmkFOOTER();
+ return gcvSTATUS_OK;
+
+OnError:
return status;
}
gceSTATUS
-gckMMU_RemoveNode(
+gckMMU_FlatMapping(
IN gckMMU Mmu,
- IN gcuVIDMEM_NODE_PTR Node)
+ IN gctUINT32 Physical
+ )
{
gceSTATUS status;
- gctBOOL mutex = gcvFALSE;
- gcuVIDMEM_NODE_PTR *iter;
+ gctUINT32 index = _AddressToIndex(Mmu, Physical);
+ gctUINT32 i;
+ gctBOOL gotIt = gcvFALSE;
+ gctUINT32_PTR map = Mmu->mapLogical;
+ gctUINT32 previous = ~0U;
+ gctUINT32_PTR pageTable;
- gcmkHEADER_ARG("Mmu=0x%x Node=0x%x", Mmu, Node);
+ gckMMU_GetPageEntry(Mmu, Physical, &pageTable);
- gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
+ _WritePageEntry(pageTable, _SetPage(Physical));
- gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
- mutex = gcvTRUE;
-
- for (iter = &Mmu->nodeList; *iter; iter = &(*iter)->Virtual.next)
+ if (map)
{
- if (*iter == Node)
+ /* Find node which contains index. */
+ for (i = 0; !gotIt && (i < Mmu->pageTableEntries);)
+ {
+ gctUINT32 numPages;
+
+ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
+ {
+ case gcvMMU_SINGLE:
+ if (i == index)
+ {
+ gotIt = gcvTRUE;
+ }
+ else
+ {
+ previous = i;
+ i = _ReadPageEntry(&map[i]) >> 8;
+ }
+ break;
+
+ case gcvMMU_FREE:
+ numPages = _ReadPageEntry(&map[i]) >> 8;
+ if (index >= i && index < i + numPages)
+ {
+ gotIt = gcvTRUE;
+ }
+ else
+ {
+ previous = i;
+ i = _ReadPageEntry(&map[i + 1]);
+ }
+ break;
+
+ default:
+ gcmkFATAL("MMU table correcupted at index %u!", index);
+ gcmkONERROR(gcvSTATUS_OUT_OF_RESOURCES);
+ }
+ }
+
+ switch (gcmENTRY_TYPE(_ReadPageEntry(&map[i])))
{
- *iter = Node->Virtual.next;
+ case gcvMMU_SINGLE:
+ /* Unlink single node from free list. */
+ gcmkONERROR(
+ _Link(Mmu, previous, _ReadPageEntry(&map[i]) >> 8));
+ break;
+
+ case gcvMMU_FREE:
+ /* Split the node. */
+ {
+ gctUINT32 start;
+ gctUINT32 next = _ReadPageEntry(&map[i+1]);
+ gctUINT32 total = _ReadPageEntry(&map[i]) >> 8;
+ gctUINT32 countLeft = index - i;
+ gctUINT32 countRight = total - countLeft - 1;
+
+ if (countLeft)
+ {
+ start = i;
+ _AddFree(Mmu, previous, start, countLeft);
+ previous = start;
+ }
+
+ if (countRight)
+ {
+ start = index + 1;
+ _AddFree(Mmu, previous, start, countRight);
+ previous = start;
+ }
+
+ _Link(Mmu, previous, next);
+ }
break;
}
}
- gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
-
- gcmkFOOTER();
return gcvSTATUS_OK;
OnError:
- if (mutex)
- {
- gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
- }
- gcmkFOOTER();
+ /* Roll back. */
return status;
}
+
+
gceSTATUS
-gckMMU_FreeHandleMemory(
- IN gckKERNEL Kernel,
+gckMMU_FreePagesEx(
IN gckMMU Mmu,
- IN gctUINT32 Pid
+ IN gctUINT32 Address,
+ IN gctSIZE_T PageCount
)
{
+ gctUINT32_PTR node;
gceSTATUS status;
- gctBOOL acquired = gcvFALSE;
- gcuVIDMEM_NODE_PTR curr, next;
- gcmkHEADER_ARG("Kernel=0x%x, Mmu=0x%x Pid=%u", Kernel, Mmu, Pid);
+#if gcdUSE_MMU_EXCEPTION
+ gctUINT32 i;
+ struct _gcsMMU_STLB *stlb;
+ struct _gcsMMU_STLB **stlbs = Mmu->stlbs;
+#endif
+
+ gcmkHEADER_ARG("Mmu=0x%x Address=0x%x PageCount=%lu",
+ Mmu, Address, PageCount);
- gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
+ /* Verify the arguments. */
gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
+ gcmkVERIFY_ARGUMENT(PageCount > 0);
- gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->nodeMutex, gcvINFINITE));
- acquired = gcvTRUE;
+ /* Get the node by index. */
+ node = Mmu->mapLogical + _AddressToIndex(Mmu, Address);
- for (curr = Mmu->nodeList; curr != gcvNULL; curr = next)
+ gcmkONERROR(gckOS_AcquireMutex(Mmu->os, Mmu->pageTableMutex, gcvINFINITE));
+
+ if (PageCount == 1)
{
- next = curr->Virtual.next;
+ /* Single page node. */
+ _WritePageEntry(node, (~((1U<<8)-1)) | gcvMMU_SINGLE);
+ }
+ else
+ {
+ /* Mark the node as free. */
+ _WritePageEntry(node, (PageCount << 8) | gcvMMU_FREE);
+ _WritePageEntry(node + 1, ~0U);
+ }
- if (curr->Virtual.processID == Pid)
- {
- while (curr->Virtual.unlockPendings[Kernel->core] == 0 && curr->Virtual.lockeds[Kernel->core] > 0)
- {
- gcmkONERROR(gckVIDMEM_Unlock(Kernel, curr, gcvSURF_TYPE_UNKNOWN, gcvNULL));
- }
+ /* We have free nodes. */
+ Mmu->freeNodes = gcvTRUE;
- gcmkVERIFY_OK(gckVIDMEM_Free(curr));
- }
+#if gcdUSE_MMU_EXCEPTION
+ for (i = 0; i < PageCount; i++)
+ {
+ /* Get */
+ stlb = stlbs[_MtlbOffset(Address)];
+
+ /* Enable exception */
+ stlb->logical[_StlbOffset(Address)] = gcdMMU_STLB_EXCEPTION;
}
+#endif
- gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
+ gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->pageTableMutex));
- gcmkFOOTER();
+
+ /* Success. */
+ gcmkFOOTER_NO();
return gcvSTATUS_OK;
OnError:
- if (acquired)
- {
- gcmkVERIFY_OK(gckOS_ReleaseMutex(Mmu->os, Mmu->nodeMutex));
- }
-
gcmkFOOTER();
return status;
}
gceSTATUS
gckMMU_Flush(
- IN gckMMU Mmu
+ IN gckMMU Mmu,
+ IN gceSURF_TYPE Type
)
{
gckHARDWARE hardware;
-#if gcdSHARED_PAGETABLE
+ gctUINT32 mask;
gctINT i;
+
+ if (Type == gcvSURF_VERTEX || Type == gcvSURF_INDEX)
+ {
+ mask = gcvPAGE_TABLE_DIRTY_BIT_FE;
+ }
+ else
+ {
+ mask = gcvPAGE_TABLE_DIRTY_BIT_OTHER;
+ }
+
+#if gcdPROCESS_ADDRESS_SPACE
+ for (i = 0; i < gcdMAX_GPU_COUNT; i++)
+ {
+ gcmkVERIFY_OK(
+ gckOS_AtomSetMask(Mmu->pageTableDirty[i], mask));
+ }
+#else
+#if gcdSHARED_PAGETABLE
for (i = 0; i < gcdMAX_GPU_COUNT; i++)
{
-#if gcdENABLE_VG
- if (i == gcvCORE_VG)
- {
- continue;
- }
-#endif
hardware = sharedPageTable->hardwares[i];
if (hardware)
{
- /* Notify cores who use this page table. */
- gcmkVERIFY_OK(
- gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
+ gcmkVERIFY_OK(gckOS_AtomSetMask(hardware->pageTableDirty, mask));
}
}
#elif gcdMIRROR_PAGETABLE
- gctINT i;
- for (i = 0; i < mirrorPageTable->reference; i++)
+ for (i = 0; i < (gctINT)mirrorPageTable->reference; i++)
{
hardware = mirrorPageTable->hardwares[i];
/* Notify cores who use this page table. */
gcmkVERIFY_OK(
- gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
+ gckOS_AtomSetMask(hardware->pageTableDirty, mask));
}
#else
hardware = Mmu->hardware;
gcmkVERIFY_OK(
- gckOS_AtomSet(hardware->os, hardware->pageTableDirty, 1));
+ gckOS_AtomSetMask(hardware->pageTableDirty, mask));
+#endif
#endif
return gcvSTATUS_OK;
IN gctUINT32 Address
)
{
+#if gcdPROCESS_ADDRESS_SPACE
+ gcsMMU_STLB_PTR *stlbs = Mmu->stlbs;
+ gcsMMU_STLB_PTR stlbDesc = stlbs[_MtlbOffset(Address)];
+#else
gctUINT32_PTR pageTable;
gctUINT32 index;
gctUINT32 mtlb, stlb;
+#endif
gcmkHEADER_ARG("Mmu=0x%08X Address=0x%08X", Mmu, Address);
gcmkVERIFY_OBJECT(Mmu, gcvOBJ_MMU);
gcmkASSERT(Mmu->hardware->mmuVersion > 0);
+#if gcdPROCESS_ADDRESS_SPACE
+ if (stlbDesc)
+ {
+ gcmkPRINT(" STLB entry = 0x%08X",
+ _ReadPageEntry(&stlbDesc->logical[_StlbOffset(Address)]));
+ }
+ else
+ {
+ gcmkPRINT(" MTLB entry is empty.");
+ }
+#else
mtlb = (Address & gcdMMU_MTLB_MASK) >> gcdMMU_MTLB_SHIFT;
- stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
- if (Address >= 0x80000000)
+ if (mtlb >= Mmu->dynamicMappingStart)
{
+ stlb = (Address & gcdMMU_STLB_4K_MASK) >> gcdMMU_STLB_4K_SHIFT;
+
pageTable = Mmu->pageTableLogical;
index = (mtlb - Mmu->dynamicMappingStart)
gcmkPRINT(" Page table entry = 0x%08X", _ReadPageEntry(pageTable + index));
}
+ else
+ {
+ gcsMMU_STLB_PTR stlbObj = Mmu->staticSTLB;
+ gctUINT32 entry = Mmu->mtlbLogical[mtlb];
+
+ stlb = (Address & gcdMMU_STLB_64K_MASK) >> gcdMMU_STLB_64K_SHIFT;
+
+ entry &= 0xFFFFFFF0;
+
+ while (stlbObj)
+ {
+
+ if (entry == stlbObj->physBase)
+ {
+ gcmkPRINT(" Page table entry = 0x%08X", stlbObj->logical[stlb]);
+ break;
+ }
+
+ stlbObj = stlbObj->next;
+ }
+ }
+#endif
gcmkFOOTER_NO();
return gcvSTATUS_OK;