4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * The Communication(Shared) Memory Management(CMM) module provides
7 * shared memory management services for DSP/BIOS Bridge data streaming
10 * Multiple shared memory segments can be registered with CMM.
11 * Each registered SM segment is represented by a SM "allocator" that
12 * describes a block of physically contiguous shared memory used for
13 * future allocations by CMM.
15 * Memory is coalesced back to the appropriate heap when a buffer is
19 * Va: Virtual address.
20 * Pa: Physical or kernel system address.
22 * Copyright (C) 2005-2006 Texas Instruments, Inc.
24 * This package is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License version 2 as
26 * published by the Free Software Foundation.
28 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
30 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
32 #include <linux/types.h>
33 #include <linux/list.h>
35 /* ----------------------------------- DSP/BIOS Bridge */
36 #include <dspbridge/dbdefs.h>
38 /* ----------------------------------- OS Adaptation Layer */
39 #include <dspbridge/sync.h>
41 /* ----------------------------------- Platform Manager */
42 #include <dspbridge/dev.h>
43 #include <dspbridge/proc.h>
45 /* ----------------------------------- This */
46 #include <dspbridge/cmm.h>
48 /* ----------------------------------- Defines, Data Structures, Typedefs */
49 #define NEXT_PA(pnode) (pnode->pa + pnode->size)
51 /* Other bus/platform translations */
52 #define DSPPA2GPPPA(base, x, y) ((x)+(y))
53 #define GPPPA2DSPPA(base, x, y) ((x)-(y))
56 * Allocators define a block of contiguous memory used for future allocations.
58 * sma - shared memory allocator.
59 * vma - virtual memory allocator.(not used).
61 struct cmm_allocator { /* sma */
62 unsigned int shm_base; /* Start of physical SM block */
63 u32 sm_size; /* Size of SM block in bytes */
64 unsigned int vm_base; /* Start of VM block. (Dev driver
65 * context for 'sma') */
66 u32 dsp_phys_addr_offset; /* DSP PA to GPP PA offset for this
68 s8 c_factor; /* DSPPa to GPPPa Conversion Factor */
69 unsigned int dsp_base; /* DSP virt base byte address */
70 u32 dsp_size; /* DSP seg size in bytes */
71 struct cmm_object *cmm_mgr; /* back ref to parent mgr */
72 /* node list of available memory */
73 struct list_head free_list;
74 /* node list of memory in use */
75 struct list_head in_use_list;
78 struct cmm_xlator { /* Pa<->Va translator object */
79 /* CMM object this translator associated */
80 struct cmm_object *cmm_mgr;
82 * Client process virtual base address that corresponds to phys SM
83 * base address for translator's seg_id.
84 * Only 1 segment ID currently supported.
86 unsigned int virt_base; /* virtual base address */
87 u32 virt_size; /* size of virt space in bytes */
88 u32 seg_id; /* Segment Id */
94 * Cmm Lock is used to serialize access mem manager for multi-threads.
96 struct mutex cmm_lock; /* Lock to access cmm mgr */
97 struct list_head node_free_list; /* Free list of memory nodes */
98 u32 min_block_size; /* Min SM block; default 16 bytes */
99 u32 page_size; /* Memory Page size (1k/4k) */
100 /* GPP SM segment ptrs */
101 struct cmm_allocator *pa_gppsm_seg_tab[CMM_MAXGPPSEGS];
104 /* Default CMM Mgr attributes */
105 static struct cmm_mgrattrs cmm_dfltmgrattrs = {
106 /* min_block_size, min block size(bytes) allocated by cmm mgr */
110 /* Default allocation attributes */
111 static struct cmm_attrs cmm_dfltalctattrs = {
112 1 /* seg_id, default segment Id for allocator */
115 /* Address translator default attrs */
116 static struct cmm_xlatorattrs cmm_dfltxlatorattrs = {
117 /* seg_id, does not have to match cmm_dfltalctattrs ul_seg_id */
120 0, /* dsp_buf_size */
125 /* SM node representing a block of memory. */
127 struct list_head link; /* must be 1st element */
128 u32 pa; /* Phys addr */
129 u32 va; /* Virtual address in device process context */
130 u32 size; /* SM block size in bytes */
131 u32 client_proc; /* Process that allocated this mem block */
134 /* ----------------------------------- Function Prototypes */
135 static void add_to_free_list(struct cmm_allocator *allocator,
136 struct cmm_mnode *pnode);
137 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
139 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
141 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
142 u32 dw_va, u32 ul_size);
143 /* get available slot for new allocator */
144 static s32 get_slot(struct cmm_object *cmm_mgr_obj);
145 static void un_register_gppsm_seg(struct cmm_allocator *psma);
148 * ======== cmm_calloc_buf ========
150 * Allocate a SM buffer, zero contents, and return the physical address
151 * and optional driver context virtual address(pp_buf_va).
153 * The freelist is sorted in increasing size order. Get the first
154 * block that satifies the request and sort the remaining back on
155 * the freelist; if large enough. The kept block is placed on the
158 void *cmm_calloc_buf(struct cmm_object *hcmm_mgr, u32 usize,
159 struct cmm_attrs *pattrs, void **pp_buf_va)
161 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
163 struct cmm_mnode *pnode = NULL;
164 struct cmm_mnode *new_node = NULL;
165 struct cmm_allocator *allocator = NULL;
171 pattrs = &cmm_dfltalctattrs;
173 if (pp_buf_va != NULL)
176 if (cmm_mgr_obj && (usize != 0)) {
177 if (pattrs->seg_id > 0) {
178 /* SegId > 0 is SM */
179 /* get the allocator object for this segment id */
181 get_allocator(cmm_mgr_obj, pattrs->seg_id);
182 /* keep block size a multiple of min_block_size */
184 ((usize - 1) & ~(cmm_mgr_obj->min_block_size -
186 + cmm_mgr_obj->min_block_size;
187 mutex_lock(&cmm_mgr_obj->cmm_lock);
188 pnode = get_free_block(allocator, usize);
191 delta_size = (pnode->size - usize);
192 if (delta_size >= cmm_mgr_obj->min_block_size) {
193 /* create a new block with the leftovers and
196 get_node(cmm_mgr_obj, pnode->pa + usize,
199 /* leftovers go free */
200 add_to_free_list(allocator, new_node);
201 /* adjust our node's size */
204 /* Tag node with client process requesting allocation
205 * We'll need to free up a process's alloc'd SM if the
206 * client process goes away.
208 /* Return TGID instead of process handle */
209 pnode->client_proc = current->tgid;
211 /* put our node on InUse list */
212 list_add_tail(&pnode->link, &allocator->in_use_list);
213 buf_pa = (void *)pnode->pa; /* physical address */
215 pbyte = (u8 *) pnode->va;
216 for (cnt = 0; cnt < (s32) usize; cnt++, pbyte++)
219 if (pp_buf_va != NULL) {
220 /* Virtual address */
221 *pp_buf_va = (void *)pnode->va;
224 mutex_unlock(&cmm_mgr_obj->cmm_lock);
230 * ======== cmm_create ========
232 * Create a communication memory manager object.
234 int cmm_create(struct cmm_object **ph_cmm_mgr,
235 struct dev_object *hdev_obj,
236 const struct cmm_mgrattrs *mgr_attrts)
238 struct cmm_object *cmm_obj = NULL;
242 /* create, zero, and tag a cmm mgr object */
243 cmm_obj = kzalloc(sizeof(struct cmm_object), GFP_KERNEL);
247 if (mgr_attrts == NULL)
248 mgr_attrts = &cmm_dfltmgrattrs; /* set defaults */
250 /* save away smallest block allocation for this cmm mgr */
251 cmm_obj->min_block_size = mgr_attrts->min_block_size;
252 cmm_obj->page_size = PAGE_SIZE;
254 /* create node free list */
255 INIT_LIST_HEAD(&cmm_obj->node_free_list);
256 mutex_init(&cmm_obj->cmm_lock);
257 *ph_cmm_mgr = cmm_obj;
263 * ======== cmm_destroy ========
265 * Release the communication memory manager resources.
267 int cmm_destroy(struct cmm_object *hcmm_mgr, bool force)
269 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
270 struct cmm_info temp_info;
273 struct cmm_mnode *node, *tmp;
279 mutex_lock(&cmm_mgr_obj->cmm_lock);
280 /* If not force then fail if outstanding allocations exist */
282 /* Check for outstanding memory allocations */
283 status = cmm_get_info(hcmm_mgr, &temp_info);
285 if (temp_info.total_in_use_cnt > 0) {
286 /* outstanding allocations */
292 /* UnRegister SM allocator */
293 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
294 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] != NULL) {
295 un_register_gppsm_seg
296 (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg]);
297 /* Set slot to NULL for future reuse */
298 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = NULL;
302 list_for_each_entry_safe(node, tmp, &cmm_mgr_obj->node_free_list,
304 list_del(&node->link);
307 mutex_unlock(&cmm_mgr_obj->cmm_lock);
309 /* delete CS & cmm mgr object */
310 mutex_destroy(&cmm_mgr_obj->cmm_lock);
317 * ======== cmm_free_buf ========
319 * Free the given buffer.
321 int cmm_free_buf(struct cmm_object *hcmm_mgr, void *buf_pa, u32 ul_seg_id)
323 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
324 int status = -EFAULT;
325 struct cmm_mnode *curr, *tmp;
326 struct cmm_allocator *allocator;
327 struct cmm_attrs *pattrs;
329 if (ul_seg_id == 0) {
330 pattrs = &cmm_dfltalctattrs;
331 ul_seg_id = pattrs->seg_id;
333 if (!hcmm_mgr || !(ul_seg_id > 0)) {
338 allocator = get_allocator(cmm_mgr_obj, ul_seg_id);
342 mutex_lock(&cmm_mgr_obj->cmm_lock);
343 list_for_each_entry_safe(curr, tmp, &allocator->in_use_list, link) {
344 if (curr->pa == (u32) buf_pa) {
345 list_del(&curr->link);
346 add_to_free_list(allocator, curr);
351 mutex_unlock(&cmm_mgr_obj->cmm_lock);
357 * ======== cmm_get_handle ========
359 * Return the communication memory manager object for this device.
360 * This is typically called from the client process.
362 int cmm_get_handle(void *hprocessor, struct cmm_object **ph_cmm_mgr)
365 struct dev_object *hdev_obj;
367 if (hprocessor != NULL)
368 status = proc_get_dev_object(hprocessor, &hdev_obj);
370 hdev_obj = dev_get_first(); /* default */
373 status = dev_get_cmm_mgr(hdev_obj, ph_cmm_mgr);
379 * ======== cmm_get_info ========
381 * Return the current memory utilization information.
383 int cmm_get_info(struct cmm_object *hcmm_mgr,
384 struct cmm_info *cmm_info_obj)
386 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
389 struct cmm_allocator *altr;
390 struct cmm_mnode *curr;
396 mutex_lock(&cmm_mgr_obj->cmm_lock);
397 cmm_info_obj->num_gppsm_segs = 0; /* # of SM segments */
398 /* Total # of outstanding alloc */
399 cmm_info_obj->total_in_use_cnt = 0;
401 cmm_info_obj->min_block_size = cmm_mgr_obj->min_block_size;
402 /* check SM memory segments */
403 for (ul_seg = 1; ul_seg <= CMM_MAXGPPSEGS; ul_seg++) {
404 /* get the allocator object for this segment id */
405 altr = get_allocator(cmm_mgr_obj, ul_seg);
408 cmm_info_obj->num_gppsm_segs++;
409 cmm_info_obj->seg_info[ul_seg - 1].seg_base_pa =
410 altr->shm_base - altr->dsp_size;
411 cmm_info_obj->seg_info[ul_seg - 1].total_seg_size =
412 altr->dsp_size + altr->sm_size;
413 cmm_info_obj->seg_info[ul_seg - 1].gpp_base_pa =
415 cmm_info_obj->seg_info[ul_seg - 1].gpp_size =
417 cmm_info_obj->seg_info[ul_seg - 1].dsp_base_va =
419 cmm_info_obj->seg_info[ul_seg - 1].dsp_size =
421 cmm_info_obj->seg_info[ul_seg - 1].seg_base_va =
422 altr->vm_base - altr->dsp_size;
423 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt = 0;
425 list_for_each_entry(curr, &altr->in_use_list, link) {
426 cmm_info_obj->total_in_use_cnt++;
427 cmm_info_obj->seg_info[ul_seg - 1].in_use_cnt++;
430 mutex_unlock(&cmm_mgr_obj->cmm_lock);
435 * ======== cmm_register_gppsm_seg ========
437 * Register a block of SM with the CMM to be used for later GPP SM
440 int cmm_register_gppsm_seg(struct cmm_object *hcmm_mgr,
441 u32 dw_gpp_base_pa, u32 ul_size,
442 u32 dsp_addr_offset, s8 c_factor,
443 u32 dw_dsp_base, u32 ul_dsp_size,
444 u32 *sgmt_id, u32 gpp_base_va)
446 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
447 struct cmm_allocator *psma = NULL;
449 struct cmm_mnode *new_node;
452 dev_dbg(bridge, "%s: dw_gpp_base_pa %x ul_size %x dsp_addr_offset %x dw_dsp_base %x ul_dsp_size %x gpp_base_va %x\n",
453 __func__, dw_gpp_base_pa, ul_size, dsp_addr_offset,
454 dw_dsp_base, ul_dsp_size, gpp_base_va);
459 /* make sure we have room for another allocator */
460 mutex_lock(&cmm_mgr_obj->cmm_lock);
462 slot_seg = get_slot(cmm_mgr_obj);
468 /* Check if input ul_size is big enough to alloc at least one block */
469 if (ul_size < cmm_mgr_obj->min_block_size) {
474 /* create, zero, and tag an SM allocator object */
475 psma = kzalloc(sizeof(struct cmm_allocator), GFP_KERNEL);
481 psma->cmm_mgr = hcmm_mgr; /* ref to parent */
482 psma->shm_base = dw_gpp_base_pa; /* SM Base phys */
483 psma->sm_size = ul_size; /* SM segment size in bytes */
484 psma->vm_base = gpp_base_va;
485 psma->dsp_phys_addr_offset = dsp_addr_offset;
486 psma->c_factor = c_factor;
487 psma->dsp_base = dw_dsp_base;
488 psma->dsp_size = ul_dsp_size;
489 if (psma->vm_base == 0) {
493 /* return the actual segment identifier */
494 *sgmt_id = (u32) slot_seg + 1;
496 INIT_LIST_HEAD(&psma->free_list);
497 INIT_LIST_HEAD(&psma->in_use_list);
499 /* Get a mem node for this hunk-o-memory */
500 new_node = get_node(cmm_mgr_obj, dw_gpp_base_pa,
501 psma->vm_base, ul_size);
502 /* Place node on the SM allocator's free list */
504 list_add_tail(&new_node->link, &psma->free_list);
510 cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] = psma;
513 /* Cleanup allocator */
515 un_register_gppsm_seg(psma);
516 mutex_unlock(&cmm_mgr_obj->cmm_lock);
522 * ======== cmm_un_register_gppsm_seg ========
524 * UnRegister GPP SM segments with the CMM.
526 int cmm_un_register_gppsm_seg(struct cmm_object *hcmm_mgr,
529 struct cmm_object *cmm_mgr_obj = (struct cmm_object *)hcmm_mgr;
531 struct cmm_allocator *psma;
532 u32 ul_id = ul_seg_id;
537 if (ul_seg_id == CMM_ALLSEGMENTS)
540 if ((ul_id <= 0) || (ul_id > CMM_MAXGPPSEGS))
544 * FIXME: CMM_MAXGPPSEGS == 1. why use a while cycle? Seems to me like
545 * the ul_seg_id is not needed here. It must be always 1.
547 while (ul_id <= CMM_MAXGPPSEGS) {
548 mutex_lock(&cmm_mgr_obj->cmm_lock);
549 /* slot = seg_id-1 */
550 psma = cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1];
552 un_register_gppsm_seg(psma);
553 /* Set alctr ptr to NULL for future reuse */
554 cmm_mgr_obj->pa_gppsm_seg_tab[ul_id - 1] = NULL;
555 } else if (ul_seg_id != CMM_ALLSEGMENTS) {
558 mutex_unlock(&cmm_mgr_obj->cmm_lock);
559 if (ul_seg_id != CMM_ALLSEGMENTS)
568 * ======== un_register_gppsm_seg ========
570 * UnRegister the SM allocator by freeing all its resources and
571 * nulling cmm mgr table entry.
573 * This routine is always called within cmm lock crit sect.
575 static void un_register_gppsm_seg(struct cmm_allocator *psma)
577 struct cmm_mnode *curr, *tmp;
579 /* free nodes on free list */
580 list_for_each_entry_safe(curr, tmp, &psma->free_list, link) {
581 list_del(&curr->link);
585 /* free nodes on InUse list */
586 list_for_each_entry_safe(curr, tmp, &psma->in_use_list, link) {
587 list_del(&curr->link);
591 if ((void *)psma->vm_base != NULL)
592 MEM_UNMAP_LINEAR_ADDRESS((void *)psma->vm_base);
594 /* Free allocator itself */
599 * ======== get_slot ========
601 * An available slot # is returned. Returns negative on failure.
603 static s32 get_slot(struct cmm_object *cmm_mgr_obj)
605 s32 slot_seg = -1; /* neg on failure */
606 /* get first available slot in cmm mgr SMSegTab[] */
607 for (slot_seg = 0; slot_seg < CMM_MAXGPPSEGS; slot_seg++) {
608 if (cmm_mgr_obj->pa_gppsm_seg_tab[slot_seg] == NULL)
612 if (slot_seg == CMM_MAXGPPSEGS)
613 slot_seg = -1; /* failed */
619 * ======== get_node ========
621 * Get a memory node from freelist or create a new one.
623 static struct cmm_mnode *get_node(struct cmm_object *cmm_mgr_obj, u32 dw_pa,
624 u32 dw_va, u32 ul_size)
626 struct cmm_mnode *pnode;
628 /* Check cmm mgr's node freelist */
629 if (list_empty(&cmm_mgr_obj->node_free_list)) {
630 pnode = kzalloc(sizeof(struct cmm_mnode), GFP_KERNEL);
634 /* surely a valid element */
635 pnode = list_first_entry(&cmm_mgr_obj->node_free_list,
636 struct cmm_mnode, link);
637 list_del_init(&pnode->link);
642 pnode->size = ul_size;
648 * ======== delete_node ========
650 * Put a memory node on the cmm nodelist for later use.
651 * Doesn't actually delete the node. Heap thrashing friendly.
653 static void delete_node(struct cmm_object *cmm_mgr_obj, struct cmm_mnode *pnode)
655 list_add_tail(&pnode->link, &cmm_mgr_obj->node_free_list);
659 * ====== get_free_block ========
661 * Scan the free block list and return the first block that satisfies
664 static struct cmm_mnode *get_free_block(struct cmm_allocator *allocator,
667 struct cmm_mnode *node, *tmp;
672 list_for_each_entry_safe(node, tmp, &allocator->free_list, link) {
673 if (usize <= node->size) {
674 list_del(&node->link);
683 * ======== add_to_free_list ========
685 * Coalesce node into the freelist in ascending size order.
687 static void add_to_free_list(struct cmm_allocator *allocator,
688 struct cmm_mnode *node)
690 struct cmm_mnode *curr;
693 pr_err("%s: failed - node is NULL\n", __func__);
697 list_for_each_entry(curr, &allocator->free_list, link) {
698 if (NEXT_PA(curr) == node->pa) {
699 curr->size += node->size;
700 delete_node(allocator->cmm_mgr, node);
703 if (curr->pa == NEXT_PA(node)) {
706 curr->size += node->size;
707 delete_node(allocator->cmm_mgr, node);
711 list_for_each_entry(curr, &allocator->free_list, link) {
712 if (curr->size >= node->size) {
713 list_add_tail(&node->link, &curr->link);
717 list_add_tail(&node->link, &allocator->free_list);
721 * ======== get_allocator ========
723 * Return the allocator for the given SM Segid.
724 * SegIds: 1,2,3..max.
726 static struct cmm_allocator *get_allocator(struct cmm_object *cmm_mgr_obj,
729 return cmm_mgr_obj->pa_gppsm_seg_tab[ul_seg_id - 1];
733 * The CMM_Xlator[xxx] routines below are used by Node and Stream
734 * to perform SM address translation to the client process address space.
735 * A "translator" object is created by a node/stream for each SM seg used.
739 * ======== cmm_xlator_create ========
741 * Create an address translator object.
743 int cmm_xlator_create(struct cmm_xlatorobject **xlator,
744 struct cmm_object *hcmm_mgr,
745 struct cmm_xlatorattrs *xlator_attrs)
747 struct cmm_xlator *xlator_object = NULL;
751 if (xlator_attrs == NULL)
752 xlator_attrs = &cmm_dfltxlatorattrs; /* set defaults */
754 xlator_object = kzalloc(sizeof(struct cmm_xlator), GFP_KERNEL);
755 if (xlator_object != NULL) {
756 xlator_object->cmm_mgr = hcmm_mgr; /* ref back to CMM */
758 xlator_object->seg_id = xlator_attrs->seg_id;
763 *xlator = (struct cmm_xlatorobject *)xlator_object;
769 * ======== cmm_xlator_alloc_buf ========
771 void *cmm_xlator_alloc_buf(struct cmm_xlatorobject *xlator, void *va_buf,
774 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
777 struct cmm_attrs attrs;
780 attrs.seg_id = xlator_obj->seg_id;
781 __raw_writel(0, va_buf);
784 cmm_calloc_buf(xlator_obj->cmm_mgr, pa_size, &attrs, NULL);
786 /* convert to translator(node/strm) process Virtual
788 tmp_va_buff = cmm_xlator_translate(xlator,
790 __raw_writel((u32)tmp_va_buff, va_buf);
797 * ======== cmm_xlator_free_buf ========
799 * Free the given SM buffer and descriptor.
800 * Does not free virtual memory.
802 int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator, void *buf_va)
804 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
809 /* convert Va to Pa so we can free it. */
810 buf_pa = cmm_xlator_translate(xlator, buf_va, CMM_VA2PA);
812 status = cmm_free_buf(xlator_obj->cmm_mgr, buf_pa,
815 /* Uh oh, this shouldn't happen. Descriptor
817 pr_err("%s, line %d: Assertion failed\n",
826 * ======== cmm_xlator_info ========
828 * Set/Get translator info.
830 int cmm_xlator_info(struct cmm_xlatorobject *xlator, u8 **paddr,
831 u32 ul_size, u32 segm_id, bool set_info)
833 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
838 /* set translators virtual address range */
839 xlator_obj->virt_base = (u32) *paddr;
840 xlator_obj->virt_size = ul_size;
841 } else { /* return virt base address */
842 *paddr = (u8 *) xlator_obj->virt_base;
851 * ======== cmm_xlator_translate ========
853 void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
854 enum cmm_xlatetype xtype)
856 u32 dw_addr_xlate = 0;
857 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
858 struct cmm_object *cmm_mgr_obj = NULL;
859 struct cmm_allocator *allocator = NULL;
865 cmm_mgr_obj = (struct cmm_object *)xlator_obj->cmm_mgr;
866 /* get this translator's default SM allocator */
867 allocator = cmm_mgr_obj->pa_gppsm_seg_tab[xlator_obj->seg_id - 1];
871 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
872 (xtype == CMM_PA2VA)) {
873 if (xtype == CMM_PA2VA) {
874 /* Gpp Va = Va Base + offset */
875 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
878 dw_addr_xlate = xlator_obj->virt_base + dw_offset;
879 /* Check if translated Va base is in range */
880 if ((dw_addr_xlate < xlator_obj->virt_base) ||
882 (xlator_obj->virt_base +
883 xlator_obj->virt_size))) {
884 dw_addr_xlate = 0; /* bad address */
887 /* Gpp PA = Gpp Base + offset */
889 (u8 *) paddr - (u8 *) xlator_obj->virt_base;
891 allocator->shm_base - allocator->dsp_size +
895 dw_addr_xlate = (u32) paddr;
897 /*Now convert address to proper target physical address if needed */
898 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
899 /* Got Gpp Pa now, convert to DSP Pa */
901 GPPPA2DSPPA((allocator->shm_base - allocator->dsp_size),
903 allocator->dsp_phys_addr_offset *
904 allocator->c_factor);
905 } else if (xtype == CMM_DSPPA2PA) {
906 /* Got DSP Pa, convert to GPP Pa */
908 DSPPA2GPPPA(allocator->shm_base - allocator->dsp_size,
910 allocator->dsp_phys_addr_offset *
911 allocator->c_factor);
914 return (void *)dw_addr_xlate;