1 /****************************************************************************
3 * Copyright (C) 2005 - 2013 by Vivante Corp.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the license, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 *****************************************************************************/
22 #include "gc_hal_kernel_precomp.h"
24 #define _GC_OBJ_ZONE gcvZONE_VIDMEM
26 /******************************************************************************\
27 ******************************* Private Functions ******************************
28 \******************************************************************************/
30 /*******************************************************************************
34 ** Split a node on the required byte boundary.
39 ** Pointer to an gckOS object.
41 ** gcuVIDMEM_NODE_PTR Node
42 ** Pointer to the node to split.
45 ** Number of bytes to keep in the node.
54 ** gcvTRUE if the node was split successfully, or gcvFALSE if there is an
61 IN gcuVIDMEM_NODE_PTR Node,
65 gcuVIDMEM_NODE_PTR node;
66 gctPOINTER pointer = gcvNULL;
68 /* Make sure the byte boundary makes sense. */
69 if ((Bytes <= 0) || (Bytes > Node->VidMem.bytes))
74 /* Allocate a new gcuVIDMEM_NODE object. */
75 if (gcmIS_ERROR(gckOS_Allocate(Os,
76 gcmSIZEOF(gcuVIDMEM_NODE),
85 /* Initialize gcuVIDMEM_NODE structure. */
86 node->VidMem.offset = Node->VidMem.offset + Bytes;
87 node->VidMem.bytes = Node->VidMem.bytes - Bytes;
88 node->VidMem.alignment = 0;
89 node->VidMem.locked = 0;
90 node->VidMem.memory = Node->VidMem.memory;
91 node->VidMem.pool = Node->VidMem.pool;
92 node->VidMem.physical = Node->VidMem.physical;
94 #if gcdUSE_VIDMEM_PER_PID
95 gcmkASSERT(Node->VidMem.physical != 0);
96 gcmkASSERT(Node->VidMem.logical != gcvNULL);
97 node->VidMem.processID = Node->VidMem.processID;
98 node->VidMem.physical = Node->VidMem.physical + Bytes;
99 node->VidMem.logical = Node->VidMem.logical + Bytes;
101 node->VidMem.processID = 0;
102 node->VidMem.logical = gcvNULL;
106 /* Insert node behind specified node. */
107 node->VidMem.next = Node->VidMem.next;
108 node->VidMem.prev = Node;
109 Node->VidMem.next = node->VidMem.next->VidMem.prev = node;
111 /* Insert free node behind specified node. */
112 node->VidMem.nextFree = Node->VidMem.nextFree;
113 node->VidMem.prevFree = Node;
114 Node->VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
116 /* Adjust size of specified node. */
117 Node->VidMem.bytes = Bytes;
123 /*******************************************************************************
127 ** Merge two adjacent nodes together.
132 ** Pointer to an gckOS object.
134 ** gcuVIDMEM_NODE_PTR Node
135 ** Pointer to the first of the two nodes to merge.
145 IN gcuVIDMEM_NODE_PTR Node
148 gcuVIDMEM_NODE_PTR node;
151 /* Save pointer to next node. */
152 node = Node->VidMem.next;
153 #if gcdUSE_VIDMEM_PER_PID
154 /* Check if the nodes are adjacent physically. */
155 if ( ((Node->VidMem.physical + Node->VidMem.bytes) != node->VidMem.physical) ||
156 ((Node->VidMem.logical + Node->VidMem.bytes) != node->VidMem.logical) )
163 /* This is a good time to make sure the heap is not corrupted. */
164 if (Node->VidMem.offset + Node->VidMem.bytes != node->VidMem.offset)
166 /* Corrupted heap. */
168 Node->VidMem.offset + Node->VidMem.bytes == node->VidMem.offset);
169 return gcvSTATUS_HEAP_CORRUPTED;
173 /* Adjust byte count. */
174 Node->VidMem.bytes += node->VidMem.bytes;
176 /* Unlink next node from linked list. */
177 Node->VidMem.next = node->VidMem.next;
178 Node->VidMem.nextFree = node->VidMem.nextFree;
180 Node->VidMem.next->VidMem.prev =
181 Node->VidMem.nextFree->VidMem.prevFree = Node;
183 /* Free next node. */
184 status = gcmkOS_SAFE_FREE(Os, node);
188 /******************************************************************************\
189 ******************************* gckVIDMEM API Code ******************************
190 \******************************************************************************/
192 /*******************************************************************************
194 ** gckVIDMEM_ConstructVirtual
196 ** Construct a new gcuVIDMEM_NODE union for virtual memory.
201 ** Pointer to an gckKERNEL object.
204 ** Number of byte to allocate.
208 ** gcuVIDMEM_NODE_PTR * Node
209 ** Pointer to a variable that receives the gcuVIDMEM_NODE union pointer.
212 gckVIDMEM_ConstructVirtual(
214 IN gctBOOL Contiguous,
216 OUT gcuVIDMEM_NODE_PTR * Node
221 gcuVIDMEM_NODE_PTR node = gcvNULL;
222 gctPOINTER pointer = gcvNULL;
225 gcmkHEADER_ARG("Kernel=0x%x Contiguous=%d Bytes=%lu", Kernel, Contiguous, Bytes);
227 /* Verify the arguments. */
228 gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
229 gcmkVERIFY_ARGUMENT(Bytes > 0);
230 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
232 /* Extract the gckOS object pointer. */
234 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
236 /* Allocate an gcuVIDMEM_NODE union. */
237 gcmkONERROR(gckOS_Allocate(os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
241 /* Initialize gcuVIDMEM_NODE union for virtual memory. */
242 node->Virtual.kernel = Kernel;
243 node->Virtual.contiguous = Contiguous;
244 node->Virtual.logical = gcvNULL;
246 for (i = 0; i < gcdMAX_GPU_COUNT; i++)
248 node->Virtual.lockeds[i] = 0;
249 node->Virtual.pageTables[i] = gcvNULL;
250 node->Virtual.lockKernels[i] = gcvNULL;
253 node->Virtual.mutex = gcvNULL;
255 gcmkONERROR(gckOS_GetProcessID(&node->Virtual.processID));
258 node->Virtual.next = gcvNULL;
259 node->Virtual.freePending = gcvFALSE;
260 for (i = 0; i < gcdMAX_GPU_COUNT; i++)
262 node->Virtual.unlockPendings[i] = gcvFALSE;
266 node->Virtual.freed = gcvFALSE;
268 gcmkONERROR(gckOS_ZeroMemory(&node->Virtual.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
270 /* Create the mutex. */
272 gckOS_CreateMutex(os, &node->Virtual.mutex));
274 /* Allocate the virtual memory. */
276 gckOS_AllocatePagedMemoryEx(os,
277 node->Virtual.contiguous,
278 node->Virtual.bytes = Bytes,
279 &node->Virtual.physical));
284 if (Kernel->core != gcvCORE_VG)
287 gckMMU_InsertNode(Kernel->mmu, node);
291 /* Return pointer to the gcuVIDMEM_NODE union. */
294 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
295 "Created virtual node 0x%x for %u bytes @ 0x%x",
296 node, Bytes, node->Virtual.physical);
299 gcmkFOOTER_ARG("*Node=0x%x", *Node);
306 if (node->Virtual.mutex != gcvNULL)
308 /* Destroy the mutex. */
309 gcmkVERIFY_OK(gckOS_DeleteMutex(os, node->Virtual.mutex));
312 /* Free the structure. */
313 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, node));
316 /* Return the status. */
321 /*******************************************************************************
323 ** gckVIDMEM_DestroyVirtual
325 ** Destroy an gcuVIDMEM_NODE union for virtual memory.
329 ** gcuVIDMEM_NODE_PTR Node
330 ** Pointer to a gcuVIDMEM_NODE union.
337 gckVIDMEM_DestroyVirtual(
338 IN gcuVIDMEM_NODE_PTR Node
344 gcmkHEADER_ARG("Node=0x%x", Node);
346 /* Verify the arguments. */
347 gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
349 /* Extact the gckOS object pointer. */
350 os = Node->Virtual.kernel->os;
351 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
356 if (Node->Virtual.kernel->core != gcvCORE_VG)
360 gckMMU_RemoveNode(Node->Virtual.kernel->mmu, Node));
364 /* Delete the mutex. */
365 gcmkVERIFY_OK(gckOS_DeleteMutex(os, Node->Virtual.mutex));
367 for (i = 0; i < gcdMAX_GPU_COUNT; i++)
369 if (Node->Virtual.pageTables[i] != gcvNULL)
374 /* Free the pages. */
375 gcmkVERIFY_OK(gckVGMMU_FreePages(Node->Virtual.lockKernels[i]->vg->mmu,
376 Node->Virtual.pageTables[i],
377 Node->Virtual.pageCount));
382 /* Free the pages. */
383 gcmkVERIFY_OK(gckMMU_FreePages(Node->Virtual.lockKernels[i]->mmu,
384 Node->Virtual.pageTables[i],
385 Node->Virtual.pageCount));
390 /* Delete the gcuVIDMEM_NODE union. */
391 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(os, Node));
398 /*******************************************************************************
400 ** gckVIDMEM_Construct
402 ** Construct a new gckVIDMEM object.
407 ** Pointer to an gckOS object.
409 ** gctUINT32 BaseAddress
410 ** Base address for the video memory heap.
413 ** Number of bytes in the video memory heap.
415 ** gctSIZE_T Threshold
416 ** Minimum number of bytes beyond am allocation before the node is
417 ** split. Can be used as a minimum alignment requirement.
419 ** gctSIZE_T BankSize
420 ** Number of bytes per physical memory bank. Used by bank
425 ** gckVIDMEM * Memory
426 ** Pointer to a variable that will hold the pointer to the gckVIDMEM
432 IN gctUINT32 BaseAddress,
434 IN gctSIZE_T Threshold,
435 IN gctSIZE_T BankSize,
436 OUT gckVIDMEM * Memory
439 gckVIDMEM memory = gcvNULL;
441 gcuVIDMEM_NODE_PTR node;
443 gctPOINTER pointer = gcvNULL;
445 gcmkHEADER_ARG("Os=0x%x BaseAddress=%08x Bytes=%lu Threshold=%lu "
447 Os, BaseAddress, Bytes, Threshold, BankSize);
449 /* Verify the arguments. */
450 gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
451 gcmkVERIFY_ARGUMENT(Bytes > 0);
452 gcmkVERIFY_ARGUMENT(Memory != gcvNULL);
454 /* Allocate the gckVIDMEM object. */
455 gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(struct _gckVIDMEM), &pointer));
459 /* Initialize the gckVIDMEM object. */
460 memory->object.type = gcvOBJ_VIDMEM;
463 /* Set video memory heap information. */
464 memory->baseAddress = BaseAddress;
465 memory->bytes = Bytes;
466 memory->freeBytes = Bytes;
467 memory->threshold = Threshold;
468 memory->mutex = gcvNULL;
469 #if gcdUSE_VIDMEM_PER_PID
470 gcmkONERROR(gckOS_GetProcessID(&memory->pid));
475 /* Walk all possible banks. */
476 for (i = 0; i < gcmCOUNTOF(memory->sentinel); ++i)
482 /* Use all bytes for the first bank. */
487 /* Compute number of bytes for this bank. */
488 bytes = gcmALIGN(BaseAddress + 1, BankSize) - BaseAddress;
492 /* Make sure we don't exceed the total number of bytes. */
499 /* Mark heap is not used. */
500 memory->sentinel[i].VidMem.next =
501 memory->sentinel[i].VidMem.prev =
502 memory->sentinel[i].VidMem.nextFree =
503 memory->sentinel[i].VidMem.prevFree = gcvNULL;
507 /* Allocate one gcuVIDMEM_NODE union. */
508 gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcuVIDMEM_NODE), &pointer));
512 /* Initialize gcuVIDMEM_NODE union. */
513 node->VidMem.memory = memory;
517 node->VidMem.nextFree =
518 node->VidMem.prevFree = &memory->sentinel[i];
520 node->VidMem.offset = BaseAddress;
521 node->VidMem.bytes = bytes;
522 node->VidMem.alignment = 0;
523 node->VidMem.physical = 0;
524 node->VidMem.pool = gcvPOOL_UNKNOWN;
526 node->VidMem.locked = 0;
528 #if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
529 node->VidMem.kernelVirtual = gcvNULL;
532 gcmkONERROR(gckOS_ZeroMemory(&node->VidMem.sharedInfo, gcmSIZEOF(gcsVIDMEM_NODE_SHARED_INFO)));
535 #if gcdUSE_VIDMEM_PER_PID
536 node->VidMem.processID = memory->pid;
537 node->VidMem.physical = memory->baseAddress + BaseAddress;
538 gcmkONERROR(gckOS_GetLogicalAddressProcess(Os,
539 node->VidMem.processID,
540 node->VidMem.physical,
541 &node->VidMem.logical));
543 node->VidMem.processID = 0;
544 node->VidMem.logical = gcvNULL;
548 /* Initialize the linked list of nodes. */
549 memory->sentinel[i].VidMem.next =
550 memory->sentinel[i].VidMem.prev =
551 memory->sentinel[i].VidMem.nextFree =
552 memory->sentinel[i].VidMem.prevFree = node;
555 memory->sentinel[i].VidMem.bytes = 0;
557 /* Adjust address for next bank. */
558 BaseAddress += bytes;
563 /* Assign all the bank mappings. */
564 memory->mapping[gcvSURF_RENDER_TARGET] = banks - 1;
565 memory->mapping[gcvSURF_BITMAP] = banks - 1;
566 if (banks > 1) --banks;
567 memory->mapping[gcvSURF_DEPTH] = banks - 1;
568 memory->mapping[gcvSURF_HIERARCHICAL_DEPTH] = banks - 1;
569 if (banks > 1) --banks;
570 memory->mapping[gcvSURF_TEXTURE] = banks - 1;
571 if (banks > 1) --banks;
572 memory->mapping[gcvSURF_VERTEX] = banks - 1;
573 if (banks > 1) --banks;
574 memory->mapping[gcvSURF_INDEX] = banks - 1;
575 if (banks > 1) --banks;
576 memory->mapping[gcvSURF_TILE_STATUS] = banks - 1;
577 if (banks > 1) --banks;
578 memory->mapping[gcvSURF_TYPE_UNKNOWN] = 0;
581 memory->mapping[gcvSURF_IMAGE] = 0;
582 memory->mapping[gcvSURF_MASK] = 0;
583 memory->mapping[gcvSURF_SCISSOR] = 0;
586 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
587 "[GALCORE] INDEX: bank %d",
588 memory->mapping[gcvSURF_INDEX]);
589 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
590 "[GALCORE] VERTEX: bank %d",
591 memory->mapping[gcvSURF_VERTEX]);
592 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
593 "[GALCORE] TEXTURE: bank %d",
594 memory->mapping[gcvSURF_TEXTURE]);
595 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
596 "[GALCORE] RENDER_TARGET: bank %d",
597 memory->mapping[gcvSURF_RENDER_TARGET]);
598 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
599 "[GALCORE] DEPTH: bank %d",
600 memory->mapping[gcvSURF_DEPTH]);
601 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
602 "[GALCORE] TILE_STATUS: bank %d",
603 memory->mapping[gcvSURF_TILE_STATUS]);
605 /* Allocate the mutex. */
606 gcmkONERROR(gckOS_CreateMutex(Os, &memory->mutex));
608 /* Return pointer to the gckVIDMEM object. */
612 gcmkFOOTER_ARG("*Memory=0x%x", *Memory);
617 if (memory != gcvNULL)
619 if (memory->mutex != gcvNULL)
621 /* Delete the mutex. */
622 gcmkVERIFY_OK(gckOS_DeleteMutex(Os, memory->mutex));
625 for (i = 0; i < banks; ++i)
628 gcmkASSERT(memory->sentinel[i].VidMem.next != gcvNULL);
629 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory->sentinel[i].VidMem.next));
632 /* Free the object. */
633 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Os, memory));
636 /* Return the status. */
641 /*******************************************************************************
645 ** Destroy an gckVIDMEM object.
650 ** Pointer to an gckVIDMEM object to destroy.
661 gcuVIDMEM_NODE_PTR node, next;
664 gcmkHEADER_ARG("Memory=0x%x", Memory);
666 /* Verify the arguments. */
667 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
669 /* Walk all sentinels. */
670 for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
672 /* Bail out of the heap is not used. */
673 if (Memory->sentinel[i].VidMem.next == gcvNULL)
678 /* Walk all the nodes until we reach the sentinel. */
679 for (node = Memory->sentinel[i].VidMem.next;
680 node->VidMem.bytes != 0;
683 /* Save pointer to the next node. */
684 next = node->VidMem.next;
687 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, node));
691 /* Free the mutex. */
692 gcmkVERIFY_OK(gckOS_DeleteMutex(Memory->os, Memory->mutex));
694 /* Mark the object as unknown. */
695 Memory->object.type = gcvOBJ_UNKNOWN;
697 /* Free the gckVIDMEM object. */
698 gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Memory->os, Memory));
705 /*******************************************************************************
707 ** gckVIDMEM_Allocate
709 ** Allocate rectangular memory from the gckVIDMEM object.
714 ** Pointer to an gckVIDMEM object.
717 ** Width of rectangle to allocate. Make sure the width is properly
721 ** Height of rectangle to allocate. Make sure the height is properly
725 ** Depth of rectangle to allocate. This equals to the number of
726 ** rectangles to allocate contiguously (i.e., for cubic maps and volume
729 ** gctUINT BytesPerPixel
730 ** Number of bytes per pixel.
732 ** gctUINT32 Alignment
733 ** Byte alignment for allocation.
736 ** Type of surface to allocate (use by bank optimization).
740 ** gcuVIDMEM_NODE_PTR * Node
741 ** Pointer to a variable that will hold the allocated memory node.
749 IN gctUINT BytesPerPixel,
750 IN gctUINT32 Alignment,
751 IN gceSURF_TYPE Type,
752 OUT gcuVIDMEM_NODE_PTR * Node
758 gcmkHEADER_ARG("Memory=0x%x Width=%u Height=%u Depth=%u BytesPerPixel=%u "
759 "Alignment=%u Type=%d",
760 Memory, Width, Height, Depth, BytesPerPixel, Alignment,
763 /* Verify the arguments. */
764 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
765 gcmkVERIFY_ARGUMENT(Width > 0);
766 gcmkVERIFY_ARGUMENT(Height > 0);
767 gcmkVERIFY_ARGUMENT(Depth > 0);
768 gcmkVERIFY_ARGUMENT(BytesPerPixel > 0);
769 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
771 /* Compute linear size. */
772 bytes = Width * Height * Depth * BytesPerPixel;
774 /* Allocate through linear function. */
776 gckVIDMEM_AllocateLinear(Memory, bytes, Alignment, Type, Node));
779 gcmkFOOTER_ARG("*Node=0x%x", *Node);
783 /* Return the status. */
788 #if gcdENABLE_BANK_ALIGNMENT
790 #if !gcdBANK_BIT_START
791 #error gcdBANK_BIT_START not defined.
795 #error gcdBANK_BIT_END not defined.
797 /*******************************************************************************
798 ** _GetSurfaceBankAlignment
800 ** Return the required offset alignment required to the make BaseAddress
806 ** Pointer to gcoOS object.
809 ** Type of allocation.
811 ** gctUINT32 BaseAddress
812 ** Base address of current video memory node.
816 ** gctUINT32_PTR AlignmentOffset
817 ** Pointer to a variable that will hold the number of bytes to skip in
818 ** the current video memory node in order to make the alignment bank
822 _GetSurfaceBankAlignment(
823 IN gceSURF_TYPE Type,
824 IN gctUINT32 BaseAddress,
825 OUT gctUINT32_PTR AlignmentOffset
829 /* To retrieve the bank. */
830 static const gctUINT32 bankMask = (0xFFFFFFFF << gcdBANK_BIT_START)
831 ^ (0xFFFFFFFF << (gcdBANK_BIT_END + 1));
833 /* To retrieve the bank and all the lower bytes. */
834 static const gctUINT32 byteMask = ~(0xFFFFFFFF << (gcdBANK_BIT_END + 1));
836 gcmkHEADER_ARG("Type=%d BaseAddress=0x%x ", Type, BaseAddress);
838 /* Verify the arguments. */
839 gcmkVERIFY_ARGUMENT(AlignmentOffset != gcvNULL);
843 case gcvSURF_RENDER_TARGET:
844 bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
846 /* Align to the first bank. */
847 *AlignmentOffset = (bank == 0) ?
849 ((1 << (gcdBANK_BIT_END + 1)) + 0) - (BaseAddress & byteMask);
853 bank = (BaseAddress & bankMask) >> (gcdBANK_BIT_START);
855 /* Align to the third bank. */
856 *AlignmentOffset = (bank == 2) ?
858 ((1 << (gcdBANK_BIT_END + 1)) + (2 << gcdBANK_BIT_START)) - (BaseAddress & byteMask);
860 /* Add a channel offset at the channel bit. */
861 *AlignmentOffset += (1 << gcdBANK_CHANNEL_BIT);
865 /* no alignment needed. */
866 *AlignmentOffset = 0;
869 /* Return the status. */
870 gcmkFOOTER_ARG("*AlignmentOffset=%u", *AlignmentOffset);
875 static gcuVIDMEM_NODE_PTR
880 IN gceSURF_TYPE Type,
881 IN OUT gctUINT32_PTR Alignment
884 gcuVIDMEM_NODE_PTR node;
887 #if gcdENABLE_BANK_ALIGNMENT
888 gctUINT32 bankAlignment;
892 if (Memory->sentinel[Bank].VidMem.nextFree == gcvNULL)
894 /* No free nodes left. */
898 #if gcdENABLE_BANK_ALIGNMENT
899 /* Walk all free nodes until we have one that is big enough or we have
900 ** reached the sentinel. */
901 for (node = Memory->sentinel[Bank].VidMem.nextFree;
902 node->VidMem.bytes != 0;
903 node = node->VidMem.nextFree)
905 gcmkONERROR(_GetSurfaceBankAlignment(
907 node->VidMem.memory->baseAddress + node->VidMem.offset,
910 bankAlignment = gcmALIGN(bankAlignment, *Alignment);
912 /* Compute number of bytes to skip for alignment. */
913 alignment = (*Alignment == 0)
915 : (*Alignment - (node->VidMem.offset % *Alignment));
917 if (alignment == *Alignment)
919 /* Node is already aligned. */
923 if (node->VidMem.bytes >= Bytes + alignment + bankAlignment)
925 /* This node is big enough. */
926 *Alignment = alignment + bankAlignment;
932 /* Walk all free nodes until we have one that is big enough or we have
933 reached the sentinel. */
934 for (node = Memory->sentinel[Bank].VidMem.nextFree;
935 node->VidMem.bytes != 0;
936 node = node->VidMem.nextFree)
939 gctINT modulo = gckMATH_ModuloInt(node->VidMem.offset, *Alignment);
941 /* Compute number of bytes to skip for alignment. */
942 alignment = (*Alignment == 0) ? 0 : (*Alignment - modulo);
944 if (alignment == *Alignment)
946 /* Node is already aligned. */
950 if (node->VidMem.bytes >= Bytes + alignment)
952 /* This node is big enough. */
953 *Alignment = alignment;
958 #if gcdENABLE_BANK_ALIGNMENT
961 /* Not enough memory. */
965 /*******************************************************************************
967 ** gckVIDMEM_AllocateLinear
969 ** Allocate linear memory from the gckVIDMEM object.
974 ** Pointer to an gckVIDMEM object.
977 ** Number of bytes to allocate.
979 ** gctUINT32 Alignment
980 ** Byte alignment for allocation.
983 ** Type of surface to allocate (use by bank optimization).
987 ** gcuVIDMEM_NODE_PTR * Node
988 ** Pointer to a variable that will hold the allocated memory node.
991 gckVIDMEM_AllocateLinear(
994 IN gctUINT32 Alignment,
995 IN gceSURF_TYPE Type,
996 OUT gcuVIDMEM_NODE_PTR * Node
1000 gcuVIDMEM_NODE_PTR node;
1001 gctUINT32 alignment;
1003 gctBOOL acquired = gcvFALSE;
1005 gcmkHEADER_ARG("Memory=0x%x Bytes=%lu Alignment=%u Type=%d",
1006 Memory, Bytes, Alignment, Type);
1008 /* Verify the arguments. */
1009 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
1010 gcmkVERIFY_ARGUMENT(Bytes > 0);
1011 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
1012 gcmkVERIFY_ARGUMENT(Type < gcvSURF_NUM_TYPES);
1014 /* Acquire the mutex. */
1015 gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
1018 #if !gcdUSE_VIDMEM_PER_PID
1020 if (Bytes > Memory->freeBytes)
1022 /* Not enough memory. */
1023 status = gcvSTATUS_OUT_OF_MEMORY;
1028 #if gcdSMALL_BLOCK_SIZE
1029 if ((Memory->freeBytes < (Memory->bytes/gcdRATIO_FOR_SMALL_MEMORY))
1030 && (Bytes >= gcdSMALL_BLOCK_SIZE)
1033 /* The left memory is for small memory.*/
1034 status = gcvSTATUS_OUT_OF_MEMORY;
1039 /* Find the default bank for this surface type. */
1040 gcmkASSERT((gctINT) Type < gcmCOUNTOF(Memory->mapping));
1041 bank = Memory->mapping[Type];
1042 alignment = Alignment;
1044 #if gcdUSE_VIDMEM_PER_PID
1045 if (Bytes <= Memory->freeBytes)
1048 /* Find a free node in the default bank. */
1049 node = _FindNode(Memory, bank, Bytes, Type, &alignment);
1051 /* Out of memory? */
1052 if (node == gcvNULL)
1054 /* Walk all lower banks. */
1055 for (i = bank - 1; i >= 0; --i)
1057 /* Find a free node inside the current bank. */
1058 node = _FindNode(Memory, i, Bytes, Type, &alignment);
1059 if (node != gcvNULL)
1066 if (node == gcvNULL)
1068 /* Walk all upper banks. */
1069 for (i = bank + 1; i < gcmCOUNTOF(Memory->sentinel); ++i)
1071 if (Memory->sentinel[i].VidMem.nextFree == gcvNULL)
1073 /* Abort when we reach unused banks. */
1077 /* Find a free node inside the current bank. */
1078 node = _FindNode(Memory, i, Bytes, Type, &alignment);
1079 if (node != gcvNULL)
1085 #if gcdUSE_VIDMEM_PER_PID
1089 if (node == gcvNULL)
1091 /* Out of memory. */
1092 #if gcdUSE_VIDMEM_PER_PID
1093 /* Allocate more memory from shared pool. */
1095 gctPHYS_ADDR physical_temp;
1099 bytes = gcmALIGN(Bytes, gcdUSE_VIDMEM_PER_PID_SIZE);
1101 gcmkONERROR(gckOS_AllocateContiguous(Memory->os,
1107 /* physical address is returned as 0 for user space. workaround. */
1108 if (physical_temp == gcvNULL)
1110 gcmkONERROR(gckOS_GetPhysicalAddress(Memory->os, logical, &physical));
1113 /* Allocate one gcuVIDMEM_NODE union. */
1115 gckOS_Allocate(Memory->os,
1116 gcmSIZEOF(gcuVIDMEM_NODE),
1117 (gctPOINTER *) &node));
1119 /* Initialize gcuVIDMEM_NODE union. */
1120 node->VidMem.memory = Memory;
1122 node->VidMem.offset = 0;
1123 node->VidMem.bytes = bytes;
1124 node->VidMem.alignment = 0;
1125 node->VidMem.physical = physical;
1126 node->VidMem.pool = gcvPOOL_UNKNOWN;
1128 node->VidMem.locked = 0;
1131 gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
1132 node->VidMem.logical = logical;
1133 gcmkASSERT(logical != gcvNULL);
1136 /* Insert node behind sentinel node. */
1137 node->VidMem.next = Memory->sentinel[bank].VidMem.next;
1138 node->VidMem.prev = &Memory->sentinel[bank];
1139 Memory->sentinel[bank].VidMem.next = node->VidMem.next->VidMem.prev = node;
1141 /* Insert free node behind sentinel node. */
1142 node->VidMem.nextFree = Memory->sentinel[bank].VidMem.nextFree;
1143 node->VidMem.prevFree = &Memory->sentinel[bank];
1144 Memory->sentinel[bank].VidMem.nextFree = node->VidMem.nextFree->VidMem.prevFree = node;
1146 Memory->freeBytes += bytes;
1148 status = gcvSTATUS_OUT_OF_MEMORY;
1153 /* Do we have an alignment? */
1156 /* Split the node so it is aligned. */
1157 if (_Split(Memory->os, node, alignment))
1159 /* Successful split, move to aligned node. */
1160 node = node->VidMem.next;
1162 /* Remove alignment. */
1167 /* Do we have enough memory after the allocation to split it? */
1168 if (node->VidMem.bytes - Bytes > Memory->threshold)
1170 /* Adjust the node size. */
1171 _Split(Memory->os, node, Bytes);
1174 /* Remove the node from the free list. */
1175 node->VidMem.prevFree->VidMem.nextFree = node->VidMem.nextFree;
1176 node->VidMem.nextFree->VidMem.prevFree = node->VidMem.prevFree;
1177 node->VidMem.nextFree =
1178 node->VidMem.prevFree = gcvNULL;
1180 /* Fill in the information. */
1181 node->VidMem.alignment = alignment;
1182 node->VidMem.memory = Memory;
1184 #if !gcdUSE_VIDMEM_PER_PID
1185 node->VidMem.logical = gcvNULL;
1186 gcmkONERROR(gckOS_GetProcessID(&node->VidMem.processID));
1188 gcmkASSERT(node->VidMem.logical != gcvNULL);
1192 /* Adjust the number of free bytes. */
1193 Memory->freeBytes -= node->VidMem.bytes;
1195 node->VidMem.freePending = gcvFALSE;
1197 #if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
1198 node->VidMem.kernelVirtual = gcvNULL;
1201 /* Release the mutex. */
1202 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1204 /* Return the pointer to the node. */
1207 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1208 "Allocated %u bytes @ 0x%x [0x%08X]",
1209 node->VidMem.bytes, node, node->VidMem.offset);
1212 gcmkFOOTER_ARG("*Node=0x%x", *Node);
1213 return gcvSTATUS_OK;
1218 /* Release the mutex. */
1219 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1222 /* Return the status. */
1227 /*******************************************************************************
1231 ** Free an allocated video memory node.
1235 ** gcuVIDMEM_NODE_PTR Node
1236 ** Pointer to a gcuVIDMEM_NODE object.
1244 IN gcuVIDMEM_NODE_PTR Node
1248 gckKERNEL kernel = gcvNULL;
1249 gckVIDMEM memory = gcvNULL;
1250 gcuVIDMEM_NODE_PTR node;
1251 gctBOOL mutexAcquired = gcvFALSE;
1253 gctBOOL acquired = gcvFALSE;
1254 gctINT32 i, totalLocked;
1256 gcmkHEADER_ARG("Node=0x%x", Node);
1258 /* Verify the arguments. */
1259 if ((Node == gcvNULL)
1260 || (Node->VidMem.memory == gcvNULL)
1263 /* Invalid object. */
1264 gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
1267 /**************************** Video Memory ********************************/
1269 if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
1271 if (Node->VidMem.locked > 0)
1273 /* Client still has a lock, defer free op 'till when lock reaches 0. */
1274 Node->VidMem.freePending = gcvTRUE;
1276 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1277 "Node 0x%x is locked (%d)... deferring free.",
1278 Node, Node->VidMem.locked);
1281 return gcvSTATUS_OK;
1284 /* Extract pointer to gckVIDMEM object owning the node. */
1285 memory = Node->VidMem.memory;
1287 /* Acquire the mutex. */
1289 gckOS_AcquireMutex(memory->os, memory->mutex, gcvINFINITE));
1291 mutexAcquired = gcvTRUE;
1294 #if !gcdUSE_VIDMEM_PER_PID
1296 Node->VidMem.processID = 0;
1297 Node->VidMem.logical = gcvNULL;
1300 /* Don't try to re-free an already freed node. */
1301 if ((Node->VidMem.nextFree == gcvNULL)
1302 && (Node->VidMem.prevFree == gcvNULL)
1306 #if gcdDYNAMIC_MAP_RESERVED_MEMORY && gcdENABLE_VG
1307 if (Node->VidMem.kernelVirtual)
1309 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1310 "%s(%d) Unmap %x from kernel space.",
1311 __FUNCTION__, __LINE__,
1312 Node->VidMem.kernelVirtual);
1315 gckOS_UnmapPhysical(memory->os,
1316 Node->VidMem.kernelVirtual,
1317 Node->VidMem.bytes));
1319 Node->VidMem.kernelVirtual = gcvNULL;
1323 /* Check if Node is already freed. */
1324 if (Node->VidMem.nextFree)
1326 /* Node is alread freed. */
1327 gcmkONERROR(gcvSTATUS_INVALID_DATA);
1330 /* Update the number of free bytes. */
1331 memory->freeBytes += Node->VidMem.bytes;
1333 /* Find the next free node. */
1334 for (node = Node->VidMem.next;
1335 node != gcvNULL && node->VidMem.nextFree == gcvNULL;
1336 node = node->VidMem.next) ;
1338 /* Insert this node in the free list. */
1339 Node->VidMem.nextFree = node;
1340 Node->VidMem.prevFree = node->VidMem.prevFree;
1342 Node->VidMem.prevFree->VidMem.nextFree =
1343 node->VidMem.prevFree = Node;
1345 /* Is the next node a free node and not the sentinel? */
1346 if ((Node->VidMem.next == Node->VidMem.nextFree)
1347 && (Node->VidMem.next->VidMem.bytes != 0)
1350 /* Merge this node with the next node. */
1351 gcmkONERROR(_Merge(memory->os, node = Node));
1352 gcmkASSERT(node->VidMem.nextFree != node);
1353 gcmkASSERT(node->VidMem.prevFree != node);
1356 /* Is the previous node a free node and not the sentinel? */
1357 if ((Node->VidMem.prev == Node->VidMem.prevFree)
1358 && (Node->VidMem.prev->VidMem.bytes != 0)
1361 /* Merge this node with the previous node. */
1362 gcmkONERROR(_Merge(memory->os, node = Node->VidMem.prev));
1363 gcmkASSERT(node->VidMem.nextFree != node);
1364 gcmkASSERT(node->VidMem.prevFree != node);
1368 /* Release the mutex. */
1369 gcmkVERIFY_OK(gckOS_ReleaseMutex(memory->os, memory->mutex));
1371 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1372 "Node 0x%x is freed.",
1377 return gcvSTATUS_OK;
1380 /*************************** Virtual Memory *******************************/
1382 /* Get gckKERNEL object. */
1383 kernel = Node->Virtual.kernel;
1385 /* Verify the gckKERNEL object pointer. */
1386 gcmkVERIFY_OBJECT(kernel, gcvOBJ_KERNEL);
1388 /* Get the gckOS object pointer. */
1390 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
1392 /* Grab the mutex. */
1394 gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
1398 for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
1400 totalLocked += Node->Virtual.lockeds[i];
1403 if (totalLocked > 0)
1405 gcmkTRACE_ZONE(gcvLEVEL_ERROR, gcvZONE_VIDMEM,
1406 "gckVIDMEM_Free: Virtual node 0x%x is locked (%d)",
1410 Node->Virtual.freed = gcvTRUE;
1412 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1416 /* Free the virtual memory. */
1417 gcmkVERIFY_OK(gckOS_FreePagedMemory(kernel->os,
1418 Node->Virtual.physical,
1419 Node->Virtual.bytes));
1421 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1423 /* Destroy the gcuVIDMEM_NODE union. */
1424 gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
1429 return gcvSTATUS_OK;
1434 /* Release the mutex. */
1435 gcmkVERIFY_OK(gckOS_ReleaseMutex(
1436 memory->os, memory->mutex
1442 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1445 /* Return the status. */
1452 /*******************************************************************************
1454 ** gcoVIDMEM_FreeHandleMemory
1456 ** Free all allocated video memory nodes for a handle.
1461 ** Pointer to an gcoVIDMEM object..
1468 gckVIDMEM_FreeHandleMemory(
1469 IN gckKERNEL Kernel,
1470 IN gckVIDMEM Memory,
1475 gctBOOL mutex = gcvFALSE;
1476 gcuVIDMEM_NODE_PTR node;
1478 gctUINT32 nodeCount = 0, byteCount = 0;
1481 gcmkHEADER_ARG("Kernel=0x%x, Memory=0x%x Pid=0x%u", Kernel, Memory, Pid);
1483 gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
1484 gcmkVERIFY_OBJECT(Memory, gcvOBJ_VIDMEM);
1486 gcmkONERROR(gckOS_AcquireMutex(Memory->os, Memory->mutex, gcvINFINITE));
1489 /* Walk all sentinels. */
1490 for (i = 0; i < gcmCOUNTOF(Memory->sentinel); ++i)
1492 /* Bail out of the heap if it is not used. */
1493 if (Memory->sentinel[i].VidMem.next == gcvNULL)
1502 /* Walk all the nodes until we reach the sentinel. */
1503 for (node = Memory->sentinel[i].VidMem.next;
1504 node->VidMem.bytes != 0;
1505 node = node->VidMem.next)
1507 /* Free the node if it was allocated by Handle. */
1508 if (node->VidMem.processID == Pid)
1510 /* Unlock video memory. */
1511 while (node->VidMem.locked > 0)
1513 gckVIDMEM_Unlock(Kernel, node, gcvSURF_TYPE_UNKNOWN, gcvNULL);
1517 byteCount += node->VidMem.bytes;
1519 /* Free video memory. */
1520 gcmkVERIFY_OK(gckVIDMEM_Free(node));
1523 * Freeing may cause a merge which will invalidate our iteration.
1524 * Don't be clever, just restart.
1530 #if gcdUSE_VIDMEM_PER_PID
1533 gcmkASSERT(node->VidMem.processID == Pid);
1541 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1543 return gcvSTATUS_OK;
1548 gcmkVERIFY_OK(gckOS_ReleaseMutex(Memory->os, Memory->mutex));
1556 /*******************************************************************************
1558 ** _NeedVirtualMapping
1560 ** Whether setup GPU page table for video node.
1564 ** Pointer to an gckKERNEL object.
1566 ** gcuVIDMEM_NODE_PTR Node
1567 ** Pointer to a gcuVIDMEM_NODE union.
1570 ** Id of current GPU.
1573 ** gctBOOL * NeedMapping
1574 ** A pointer hold the result whether Node should be mapping.
1577 _NeedVirtualMapping(
1578 IN gckKERNEL Kernel,
1580 IN gcuVIDMEM_NODE_PTR Node,
1581 OUT gctBOOL * NeedMapping
1589 gctUINT32 baseAddress;
1591 gcmkHEADER_ARG("Node=0x%X", Node);
1593 /* Verify the arguments. */
1594 gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
1595 gcmkVERIFY_ARGUMENT(Node != gcvNULL);
1596 gcmkVERIFY_ARGUMENT(NeedMapping != gcvNULL);
1597 gcmkVERIFY_ARGUMENT(Core < gcdMAX_GPU_COUNT);
1599 if (Node->Virtual.contiguous)
1602 if (Core == gcvCORE_VG)
1604 *NeedMapping = gcvFALSE;
1609 /* Convert logical address into a physical address. */
1611 gckOS_GetPhysicalAddress(Kernel->os, Node->Virtual.logical, &phys));
1613 gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
1615 gcmkASSERT(phys >= baseAddress);
1617 /* Subtract baseAddress to get a GPU address used for programming. */
1618 phys -= baseAddress;
1620 /* If part of region is belong to gcvPOOL_VIRTUAL,
1621 ** whole region has to be mapped. */
1622 end = phys + Node->Virtual.bytes - 1;
1624 gcmkONERROR(gckHARDWARE_SplitMemory(
1625 Kernel->hardware, end, &pool, &offset
1628 *NeedMapping = (pool == gcvPOOL_VIRTUAL);
1633 *NeedMapping = gcvTRUE;
1636 gcmkFOOTER_ARG("*NeedMapping=%d", *NeedMapping);
1637 return gcvSTATUS_OK;
1644 /*******************************************************************************
1648 ** Lock a video memory node and return its hardware specific address.
1653 ** Pointer to an gckKERNEL object.
1655 ** gcuVIDMEM_NODE_PTR Node
1656 ** Pointer to a gcuVIDMEM_NODE union.
1660 ** gctUINT32 * Address
1661 ** Pointer to a variable that will hold the hardware specific address.
1665 IN gckKERNEL Kernel,
1666 IN gcuVIDMEM_NODE_PTR Node,
1667 IN gctBOOL Cacheable,
1668 OUT gctUINT32 * Address
1672 gctBOOL acquired = gcvFALSE;
1673 gctBOOL locked = gcvFALSE;
1675 gctBOOL needMapping;
1676 gctUINT32 baseAddress;
1678 gcmkHEADER_ARG("Node=0x%x", Node);
1680 /* Verify the arguments. */
1681 gcmkVERIFY_ARGUMENT(Address != gcvNULL);
1683 if ((Node == gcvNULL)
1684 || (Node->VidMem.memory == gcvNULL)
1687 /* Invalid object. */
1688 gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
1691 /**************************** Video Memory ********************************/
1693 if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
1695 if (Cacheable == gcvTRUE)
1697 gcmkONERROR(gcvSTATUS_INVALID_REQUEST);
1700 /* Increment the lock count. */
1701 Node->VidMem.locked ++;
1703 /* Return the physical address of the node. */
1704 #if !gcdUSE_VIDMEM_PER_PID
1705 *Address = Node->VidMem.memory->baseAddress
1706 + Node->VidMem.offset
1707 + Node->VidMem.alignment;
1709 *Address = Node->VidMem.physical;
1712 /* Get hardware specific address. */
1714 if (Kernel->vg == gcvNULL)
1717 if (Kernel->hardware->mmuVersion == 0)
1719 /* Convert physical to GPU address for old mmu. */
1720 gcmkONERROR(gckOS_GetBaseAddress(Kernel->os, &baseAddress));
1721 gcmkASSERT(*Address > baseAddress);
1722 *Address -= baseAddress;
1726 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1727 "Locked node 0x%x (%d) @ 0x%08X",
1729 Node->VidMem.locked,
1733 /*************************** Virtual Memory *******************************/
1737 /* Verify the gckKERNEL object pointer. */
1738 gcmkVERIFY_OBJECT(Node->Virtual.kernel, gcvOBJ_KERNEL);
1740 /* Extract the gckOS object pointer. */
1741 os = Node->Virtual.kernel->os;
1742 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
1744 /* Grab the mutex. */
1745 gcmkONERROR(gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
1748 #if gcdPAGED_MEMORY_CACHEABLE
1749 /* Force video memory cacheable. */
1750 Cacheable = gcvTRUE;
1755 Node->Virtual.physical,
1756 Node->Virtual.bytes,
1758 &Node->Virtual.logical,
1759 &Node->Virtual.pageCount));
1761 /* Increment the lock count. */
1762 if (Node->Virtual.lockeds[Kernel->core] ++ == 0)
1764 /* Is this node pending for a final unlock? */
1766 if (!Node->Virtual.contiguous && Node->Virtual.unlockPendings[Kernel->core])
1768 /* Make sure we have a page table. */
1769 gcmkASSERT(Node->Virtual.pageTables[Kernel->core] != gcvNULL);
1771 /* Remove pending unlock. */
1772 Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
1775 /* First lock - create a page table. */
1776 gcmkASSERT(Node->Virtual.pageTables[Kernel->core] == gcvNULL);
1778 /* Make sure we mark our node as not flushed. */
1779 Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
1784 gcmkONERROR(_NeedVirtualMapping(Kernel, Kernel->core, Node, &needMapping));
1786 if (needMapping == gcvFALSE)
1788 /* Get hardware specific address. */
1790 if (Kernel->vg != gcvNULL)
1792 gcmkONERROR(gckVGHARDWARE_ConvertLogical(Kernel->vg->hardware,
1793 Node->Virtual.logical,
1794 &Node->Virtual.addresses[Kernel->core]));
1799 gcmkONERROR(gckHARDWARE_ConvertLogical(Kernel->hardware,
1800 Node->Virtual.logical,
1801 &Node->Virtual.addresses[Kernel->core]));
1807 if (Kernel->vg != gcvNULL)
1809 /* Allocate pages inside the MMU. */
1811 gckVGMMU_AllocatePages(Kernel->vg->mmu,
1812 Node->Virtual.pageCount,
1813 &Node->Virtual.pageTables[Kernel->core],
1814 &Node->Virtual.addresses[Kernel->core]));
1819 /* Allocate pages inside the MMU. */
1821 gckMMU_AllocatePagesEx(Kernel->mmu,
1822 Node->Virtual.pageCount,
1824 &Node->Virtual.pageTables[Kernel->core],
1825 &Node->Virtual.addresses[Kernel->core]));
1828 Node->Virtual.lockKernels[Kernel->core] = Kernel;
1830 /* Map the pages. */
1833 gckOS_MapPagesEx(os,
1835 Node->Virtual.physical,
1836 Node->Virtual.logical,
1837 Node->Virtual.pageCount,
1838 Node->Virtual.pageTables[Kernel->core]));
1841 gckOS_MapPagesEx(os,
1843 Node->Virtual.physical,
1844 Node->Virtual.pageCount,
1845 Node->Virtual.pageTables[Kernel->core]));
1849 if (Kernel->core == gcvCORE_VG)
1851 gcmkONERROR(gckVGMMU_Flush(Kernel->vg->mmu));
1856 gcmkONERROR(gckMMU_Flush(Kernel->mmu));
1859 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
1860 "Mapped virtual node 0x%x to 0x%08X",
1862 Node->Virtual.addresses[Kernel->core]);
1865 /* Return hardware address. */
1866 *Address = Node->Virtual.addresses[Kernel->core];
1868 /* Release the mutex. */
1869 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1873 gcmkFOOTER_ARG("*Address=%08x", *Address);
1874 return gcvSTATUS_OK;
1879 if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
1882 if (Kernel->vg != gcvNULL)
1884 /* Free the pages from the MMU. */
1886 gckVGMMU_FreePages(Kernel->vg->mmu,
1887 Node->Virtual.pageTables[Kernel->core],
1888 Node->Virtual.pageCount));
1893 /* Free the pages from the MMU. */
1895 gckMMU_FreePages(Kernel->mmu,
1896 Node->Virtual.pageTables[Kernel->core],
1897 Node->Virtual.pageCount));
1899 Node->Virtual.pageTables[Kernel->core] = gcvNULL;
1900 Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
1903 /* Unlock the pages. */
1905 gckOS_UnlockPages(os,
1906 Node->Virtual.physical,
1907 Node->Virtual.bytes,
1908 Node->Virtual.logical
1911 Node->Virtual.lockeds[Kernel->core]--;
1916 /* Release the mutex. */
1917 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
1920 /* Return the status. */
1925 /*******************************************************************************
1929 ** Unlock a video memory node.
1934 ** Pointer to an gckKERNEL object.
1936 ** gcuVIDMEM_NODE_PTR Node
1937 ** Pointer to a locked gcuVIDMEM_NODE union.
1939 ** gceSURF_TYPE Type
1940 ** Type of surface to unlock.
1942 ** gctBOOL * Asynchroneous
1943 ** Pointer to a variable specifying whether the surface should be
1944 ** unlocked asynchroneously or not.
1948 ** gctBOOL * Asynchroneous
1949 ** Pointer to a variable receiving the number of bytes used in the
1950 ** command buffer specified by 'Commands'. If gcvNULL, there is no
1955 IN gckKERNEL Kernel,
1956 IN gcuVIDMEM_NODE_PTR Node,
1957 IN gceSURF_TYPE Type,
1958 IN OUT gctBOOL * Asynchroneous
1962 gckHARDWARE hardware;
1964 gctSIZE_T requested, bufferSize;
1965 gckCOMMAND command = gcvNULL;
1966 gceKERNEL_FLUSH flush;
1968 gctBOOL acquired = gcvFALSE;
1969 gctBOOL commitEntered = gcvFALSE;
1970 gctINT32 i, totalLocked;
1972 gcmkHEADER_ARG("Node=0x%x Type=%d *Asynchroneous=%d",
1973 Node, Type, gcmOPT_VALUE(Asynchroneous));
1975 /* Verify the arguments. */
1976 if ((Node == gcvNULL)
1977 || (Node->VidMem.memory == gcvNULL)
1980 /* Invalid object. */
1981 gcmkONERROR(gcvSTATUS_INVALID_OBJECT);
1984 /**************************** Video Memory ********************************/
1986 if (Node->VidMem.memory->object.type == gcvOBJ_VIDMEM)
1988 if (Node->VidMem.locked <= 0)
1990 /* The surface was not locked. */
1991 status = gcvSTATUS_MEMORY_UNLOCKED;
1995 /* Decrement the lock count. */
1996 Node->VidMem.locked --;
1998 if (Asynchroneous != gcvNULL)
2000 /* No need for any events. */
2001 *Asynchroneous = gcvFALSE;
2004 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
2005 "Unlocked node 0x%x (%d)",
2007 Node->VidMem.locked);
2010 /* Unmap the video memory */
2011 if ((Node->VidMem.locked == 0) && (Node->VidMem.logical != gcvNULL))
2013 if (Kernel->core == gcvCORE_VG)
2015 gckKERNEL_UnmapVideoMemory(Kernel,
2016 Node->VidMem.logical,
2017 Node->VidMem.processID,
2018 Node->VidMem.bytes);
2019 Node->VidMem.logical = gcvNULL;
2022 #endif /* __QNXNTO__ */
2024 if (Node->VidMem.freePending && (Node->VidMem.locked == 0))
2026 /* Client has unlocked node previously attempted to be freed by compositor. Free now. */
2027 Node->VidMem.freePending = gcvFALSE;
2028 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
2029 "Deferred-freeing Node 0x%x.",
2031 gcmkONERROR(gckVIDMEM_Free(Node));
2035 /*************************** Virtual Memory *******************************/
2039 /* Verify the gckHARDWARE object pointer. */
2040 hardware = Kernel->hardware;
2041 gcmkVERIFY_OBJECT(hardware, gcvOBJ_HARDWARE);
2043 /* Verify the gckCOMMAND object pointer. */
2044 command = Kernel->command;
2045 gcmkVERIFY_OBJECT(command, gcvOBJ_COMMAND);
2047 /* Get the gckOS object pointer. */
2049 gcmkVERIFY_OBJECT(os, gcvOBJ_OS);
2051 /* Grab the mutex. */
2053 gckOS_AcquireMutex(os, Node->Virtual.mutex, gcvINFINITE));
2057 if (Asynchroneous == gcvNULL)
2059 if (Node->Virtual.lockeds[Kernel->core] == 0)
2061 status = gcvSTATUS_MEMORY_UNLOCKED;
2065 /* Decrement lock count. */
2066 -- Node->Virtual.lockeds[Kernel->core];
2068 /* See if we can unlock the resources. */
2069 if (Node->Virtual.lockeds[Kernel->core] == 0)
2071 /* Free the page table. */
2072 if (Node->Virtual.pageTables[Kernel->core] != gcvNULL)
2075 if (Kernel->vg != gcvNULL)
2078 gckVGMMU_FreePages(Kernel->vg->mmu,
2079 Node->Virtual.pageTables[Kernel->core],
2080 Node->Virtual.pageCount));
2086 gckMMU_FreePages(Kernel->mmu,
2087 Node->Virtual.pageTables[Kernel->core],
2088 Node->Virtual.pageCount));
2090 /* Mark page table as freed. */
2091 Node->Virtual.pageTables[Kernel->core] = gcvNULL;
2092 Node->Virtual.lockKernels[Kernel->core] = gcvNULL;
2096 /* Mark node as unlocked. */
2097 Node->Virtual.unlockPendings[Kernel->core] = gcvFALSE;
2101 for (i = 0, totalLocked = 0; i < gcdMAX_GPU_COUNT; i++)
2103 totalLocked += Node->Virtual.lockeds[i];
2106 if (totalLocked == 0)
2108 /* Owner have already freed this node
2109 ** and we are the last one to unlock, do
2111 if (Node->Virtual.freed)
2113 /* Free the virtual memory. */
2114 gcmkVERIFY_OK(gckOS_FreePagedMemory(Kernel->os,
2115 Node->Virtual.physical,
2116 Node->Virtual.bytes));
2118 /* Release mutex before node is destroyed */
2119 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
2121 acquired = gcvFALSE;
2123 /* Destroy the gcuVIDMEM_NODE union. */
2124 gcmkVERIFY_OK(gckVIDMEM_DestroyVirtual(Node));
2126 /* Node has been destroyed, so we should not touch it any more */
2128 return gcvSTATUS_OK;
2132 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
2133 "Unmapped virtual node 0x%x from 0x%08X",
2134 Node, Node->Virtual.addresses[Kernel->core]);
2140 /* If we need to unlock a node from virtual memory we have to be
2141 ** very carefull. If the node is still inside the caches we
2142 ** might get a bus error later if the cache line needs to be
2143 ** replaced. So - we have to flush the caches before we do
2146 /* gckCommand_EnterCommit() can't be called in interrupt handler because
2147 ** of a dead lock situation:
2148 ** process call Command_Commit(), and acquire Command->mutexQueue in
2149 ** gckCOMMAND_EnterCommit(). Then it will wait for a signal which depends
2150 ** on interrupt handler to generate, if interrupt handler enter
2151 ** gckCommand_EnterCommit(), process will never get the signal. */
2153 /* So, flush cache when we still in process context, and then ask caller to
2154 ** schedule a event. */
2157 gckOS_UnlockPages(os,
2158 Node->Virtual.physical,
2159 Node->Virtual.bytes,
2160 Node->Virtual.logical));
2162 if (!Node->Virtual.contiguous
2163 && (Node->Virtual.lockeds[Kernel->core] == 1)
2165 && (Kernel->vg == gcvNULL)
2169 if (Type == gcvSURF_BITMAP)
2171 /* Flush 2D cache. */
2172 flush = gcvFLUSH_2D;
2174 else if (Type == gcvSURF_RENDER_TARGET)
2176 /* Flush color cache. */
2177 flush = gcvFLUSH_COLOR;
2179 else if (Type == gcvSURF_DEPTH)
2181 /* Flush depth cache. */
2182 flush = gcvFLUSH_DEPTH;
2186 /* No flush required. */
2187 flush = (gceKERNEL_FLUSH) 0;
2192 gckHARDWARE_Flush(hardware, flush, gcvNULL, &requested));
2196 /* Acquire the command queue. */
2197 gcmkONERROR(gckCOMMAND_EnterCommit(command, gcvFALSE));
2198 commitEntered = gcvTRUE;
2200 gcmkONERROR(gckCOMMAND_Reserve(
2201 command, requested, &buffer, &bufferSize
2204 gcmkONERROR(gckHARDWARE_Flush(
2205 hardware, flush, buffer, &bufferSize
2208 /* Mark node as pending. */
2210 Node->Virtual.unlockPendings[Kernel->core] = gcvTRUE;
2213 gcmkONERROR(gckCOMMAND_Execute(command, requested));
2215 /* Release the command queue. */
2216 gcmkONERROR(gckCOMMAND_ExitCommit(command, gcvFALSE));
2217 commitEntered = gcvFALSE;
2222 gckOS_Print("Hardware already is freed.\n");
2226 gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_VIDMEM,
2227 "Scheduled unlock for virtual node 0x%x",
2230 /* Schedule the surface to be unlocked. */
2231 *Asynchroneous = gcvTRUE;
2234 /* Release the mutex. */
2235 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
2237 acquired = gcvFALSE;
2241 gcmkFOOTER_ARG("*Asynchroneous=%d", gcmOPT_VALUE(Asynchroneous));
2242 return gcvSTATUS_OK;
2247 /* Release the command queue mutex. */
2248 gcmkVERIFY_OK(gckCOMMAND_ExitCommit(command, gcvFALSE));
2253 /* Release the mutex. */
2254 gcmkVERIFY_OK(gckOS_ReleaseMutex(os, Node->Virtual.mutex));
2257 /* Return the status. */