4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * DSP/BIOS Bridge Node Manager.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/types.h>
20 /* ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
23 /* ----------------------------------- DSP/BIOS Bridge */
24 #include <dspbridge/dbdefs.h>
26 /* ----------------------------------- Trace & Debug */
27 #include <dspbridge/dbc.h>
29 /* ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/list.h>
31 #include <dspbridge/memdefs.h>
32 #include <dspbridge/proc.h>
33 #include <dspbridge/strm.h>
34 #include <dspbridge/sync.h>
35 #include <dspbridge/ntfy.h>
37 /* ----------------------------------- Platform Manager */
38 #include <dspbridge/cmm.h>
39 #include <dspbridge/cod.h>
40 #include <dspbridge/dev.h>
41 #include <dspbridge/msg.h>
43 /* ----------------------------------- Resource Manager */
44 #include <dspbridge/dbdcd.h>
45 #include <dspbridge/disp.h>
46 #include <dspbridge/rms_sh.h>
48 /* ----------------------------------- Link Driver */
49 #include <dspbridge/dspdefs.h>
50 #include <dspbridge/dspioctl.h>
52 /* ----------------------------------- Others */
53 #include <dspbridge/gb.h>
54 #include <dspbridge/uuidutil.h>
56 /* ----------------------------------- This */
57 #include <dspbridge/nodepriv.h>
58 #include <dspbridge/node.h>
60 /* Static/Dynamic Loader includes */
61 #include <dspbridge/dbll.h>
62 #include <dspbridge/nldr.h>
64 #include <dspbridge/drv.h>
65 #include <dspbridge/drvdefs.h>
66 #include <dspbridge/resourcecleanup.h>
69 #include <dspbridge/dspdeh.h>
71 #define HOSTPREFIX "/host"
72 #define PIPEPREFIX "/dbpipe"
74 #define MAX_INPUTS(h) \
75 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
76 #define MAX_OUTPUTS(h) \
77 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
79 #define NODE_GET_PRIORITY(h) ((h)->prio)
80 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
81 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
83 #define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
84 #define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
86 #define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
87 #define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
89 #define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
91 #define EXECUTEPHASE 2
94 /* Define default STRM parameters */
96 * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
97 * or make defaults configurable.
99 #define DEFAULTBUFSIZE 32
100 #define DEFAULTNBUFS 2
101 #define DEFAULTSEGID 0
102 #define DEFAULTALIGNMENT 0
103 #define DEFAULTTIMEOUT 10000
105 #define RMSQUERYSERVER 0
106 #define RMSCONFIGURESERVER 1
107 #define RMSCREATENODE 2
108 #define RMSEXECUTENODE 3
109 #define RMSDELETENODE 4
110 #define RMSCHANGENODEPRIORITY 5
111 #define RMSREADMEMORY 6
112 #define RMSWRITEMEMORY 7
114 #define MAXTIMEOUT 2000
118 #define PWR_TIMEOUT 500 /* default PWR timeout in msec */
120 #define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
123 * ======== node_mgr ========
126 struct dev_object *hdev_obj; /* Device object */
127 /* Function interface to Bridge driver */
128 struct bridge_drv_interface *intf_fxns;
129 struct dcd_manager *hdcd_mgr; /* Proc/Node data manager */
130 struct disp_object *disp_obj; /* Node dispatcher */
131 struct lst_list *node_list; /* List of all allocated nodes */
132 u32 num_nodes; /* Number of nodes in node_list */
133 u32 num_created; /* Number of nodes *created* on DSP */
134 struct gb_t_map *pipe_map; /* Pipe connection bit map */
135 struct gb_t_map *pipe_done_map; /* Pipes that are half free */
136 struct gb_t_map *chnl_map; /* Channel allocation bit map */
137 struct gb_t_map *dma_chnl_map; /* DMA Channel allocation bit map */
138 struct gb_t_map *zc_chnl_map; /* Zero-Copy Channel alloc bit map */
139 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
140 struct mutex node_mgr_lock; /* For critical sections */
141 u32 ul_fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
142 struct msg_mgr *msg_mgr_obj;
144 /* Processor properties needed by Node Dispatcher */
145 u32 ul_num_chnls; /* Total number of channels */
146 u32 ul_chnl_offset; /* Offset of chnl ids rsvd for RMS */
147 u32 ul_chnl_buf_size; /* Buffer size for data to RMS */
148 int proc_family; /* eg, 5000 */
149 int proc_type; /* eg, 5510 */
150 u32 udsp_word_size; /* Size of DSP word on host bytes */
151 u32 udsp_data_mau_size; /* Size of DSP data MAU */
152 u32 udsp_mau_size; /* Size of MAU */
153 s32 min_pri; /* Minimum runtime priority for node */
154 s32 max_pri; /* Maximum runtime priority for node */
156 struct strm_mgr *strm_mgr_obj; /* STRM manager */
158 /* Loader properties */
159 struct nldr_object *nldr_obj; /* Handle to loader */
160 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
161 bool loader_init; /* Loader Init function succeeded? */
165 * ======== connecttype ========
175 * ======== stream_chnl ========
178 enum connecttype type; /* Type of stream connection */
179 u32 dev_id; /* pipe or channel id */
183 * ======== node_object ========
186 struct list_head list_elem;
187 struct node_mgr *hnode_mgr; /* The manager of this node */
188 struct proc_object *hprocessor; /* Back pointer to processor */
189 struct dsp_uuid node_uuid; /* Node's ID */
190 s32 prio; /* Node's current priority */
191 u32 utimeout; /* Timeout for blocking NODE calls */
192 u32 heap_size; /* Heap Size */
193 u32 udsp_heap_virt_addr; /* Heap Size */
194 u32 ugpp_heap_virt_addr; /* Heap Size */
195 enum node_type ntype; /* Type of node: message, task, etc */
196 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
197 u32 num_inputs; /* Current number of inputs */
198 u32 num_outputs; /* Current number of outputs */
199 u32 max_input_index; /* Current max input stream index */
200 u32 max_output_index; /* Current max output stream index */
201 struct stream_chnl *inputs; /* Node's input streams */
202 struct stream_chnl *outputs; /* Node's output streams */
203 struct node_createargs create_args; /* Args for node create func */
204 nodeenv node_env; /* Environment returned by RMS */
205 struct dcd_genericobj dcd_props; /* Node properties from DCD */
206 struct dsp_cbdata *pargs; /* Optional args to pass to node */
207 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
208 char *pstr_dev_name; /* device name, if device node */
209 struct sync_object *sync_done; /* Synchronize node_terminate */
210 s32 exit_status; /* execute function return status */
212 /* Information needed for node_get_attr() */
213 void *device_owner; /* If dev node, task that owns it */
214 u32 num_gpp_inputs; /* Current # of from GPP streams */
215 u32 num_gpp_outputs; /* Current # of to GPP streams */
216 /* Current stream connections */
217 struct dsp_streamconnect *stream_connect;
220 struct msg_queue *msg_queue_obj;
222 /* These fields used for SM messaging */
223 struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
225 /* Handle to pass to dynamic loader */
226 struct nldr_nodeobject *nldr_node_obj;
227 bool loaded; /* Code is (dynamically) loaded */
228 bool phase_split; /* Phases split in many libs or ovly */
232 /* Default buffer attributes */
233 static struct dsp_bufferattr node_dfltbufattrs = {
236 0, /* buf_alignment */
239 static void delete_node(struct node_object *hnode,
240 struct process_context *pr_ctxt);
241 static void delete_node_mgr(struct node_mgr *hnode_mgr);
242 static void fill_stream_connect(struct node_object *node1,
243 struct node_object *node2, u32 stream1,
245 static void fill_stream_def(struct node_object *hnode,
246 struct node_strmdef *pstrm_def,
247 struct dsp_strmattr *pattrs);
248 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
249 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
251 static int get_node_props(struct dcd_manager *hdcd_mgr,
252 struct node_object *hnode,
253 const struct dsp_uuid *node_uuid,
254 struct dcd_genericobj *dcd_prop);
255 static int get_proc_props(struct node_mgr *hnode_mgr,
256 struct dev_object *hdev_obj);
257 static int get_rms_fxns(struct node_mgr *hnode_mgr);
258 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
259 u32 ul_num_bytes, u32 mem_space);
260 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
261 u32 ul_num_bytes, u32 mem_space);
263 static u32 refs; /* module reference count */
265 /* Dynamic loader functions. */
266 static struct node_ldr_fxns nldr_fxns = {
277 enum node_state node_get_state(void *hnode)
279 struct node_object *pnode = (struct node_object *)hnode;
283 return pnode->node_state;
287 * ======== node_allocate ========
289 * Allocate GPP resources to manage a node on the DSP.
291 int node_allocate(struct proc_object *hprocessor,
292 const struct dsp_uuid *node_uuid,
293 const struct dsp_cbdata *pargs,
294 const struct dsp_nodeattrin *attr_in,
295 struct node_res_object **noderes,
296 struct process_context *pr_ctxt)
298 struct node_mgr *hnode_mgr;
299 struct dev_object *hdev_obj;
300 struct node_object *pnode = NULL;
301 enum node_type node_type = NODE_TASK;
302 struct node_msgargs *pmsg_args;
303 struct node_taskargs *ptask_args;
305 struct bridge_drv_interface *intf_fxns;
307 struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
312 u32 ul_stack_seg_addr, ul_stack_seg_val;
314 struct cfg_hostres *host_res;
315 struct bridge_dev_context *pbridge_context;
318 struct dsp_processorstate proc_state;
322 DBC_REQUIRE(refs > 0);
323 DBC_REQUIRE(hprocessor != NULL);
324 DBC_REQUIRE(noderes != NULL);
325 DBC_REQUIRE(node_uuid != NULL);
329 status = proc_get_processor_id(hprocessor, &proc_id);
331 if (proc_id != DSP_UNIT)
334 status = proc_get_dev_object(hprocessor, &hdev_obj);
336 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
337 if (hnode_mgr == NULL)
345 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
346 if (!pbridge_context) {
351 status = proc_get_state(hprocessor, &proc_state,
352 sizeof(struct dsp_processorstate));
355 /* If processor is in error state then don't attempt
356 to send the message */
357 if (proc_state.proc_state == PROC_ERROR) {
362 /* Assuming that 0 is not a valid function address */
363 if (hnode_mgr->ul_fxn_addrs[0] == 0) {
364 /* No RMS on target - we currently can't handle this */
365 pr_err("%s: Failed, no RMS in base image\n", __func__);
368 /* Validate attr_in fields, if non-NULL */
370 /* Check if attr_in->prio is within range */
371 if (attr_in->prio < hnode_mgr->min_pri ||
372 attr_in->prio > hnode_mgr->max_pri)
376 /* Allocate node object and fill in */
380 pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
385 pnode->hnode_mgr = hnode_mgr;
386 /* This critical section protects get_node_props */
387 mutex_lock(&hnode_mgr->node_mgr_lock);
389 /* Get dsp_ndbprops from node database */
390 status = get_node_props(hnode_mgr->hdcd_mgr, pnode, node_uuid,
391 &(pnode->dcd_props));
395 pnode->node_uuid = *node_uuid;
396 pnode->hprocessor = hprocessor;
397 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
398 pnode->utimeout = pnode->dcd_props.obj_data.node_obj.ndb_props.utimeout;
399 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
401 /* Currently only C64 DSP builds support Node Dynamic * heaps */
402 /* Allocate memory for node heap */
403 pnode->create_args.asa.task_arg_obj.heap_size = 0;
404 pnode->create_args.asa.task_arg_obj.udsp_heap_addr = 0;
405 pnode->create_args.asa.task_arg_obj.udsp_heap_res_addr = 0;
406 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr = 0;
410 /* Check if we have a user allocated node heap */
411 if (!(attr_in->pgpp_virt_addr))
414 /* check for page aligned Heap size */
415 if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
416 pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
417 __func__, attr_in->heap_size);
420 pnode->create_args.asa.task_arg_obj.heap_size =
422 pnode->create_args.asa.task_arg_obj.ugpp_heap_addr =
423 (u32) attr_in->pgpp_virt_addr;
428 map_attrs |= DSP_MAPLITTLEENDIAN;
429 map_attrs |= DSP_MAPELEMSIZE32;
430 map_attrs |= DSP_MAPVIRTUALADDR;
431 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
432 pnode->create_args.asa.task_arg_obj.heap_size,
433 NULL, (void **)&mapped_addr, map_attrs,
436 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
439 pnode->create_args.asa.task_arg_obj.udsp_heap_addr =
443 mutex_unlock(&hnode_mgr->node_mgr_lock);
444 if (attr_in != NULL) {
445 /* Overrides of NBD properties */
446 pnode->utimeout = attr_in->utimeout;
447 pnode->prio = attr_in->prio;
449 /* Create object to manage notifications */
451 pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
454 ntfy_init(pnode->ntfy_obj);
460 node_type = node_get_type(pnode);
461 /* Allocate dsp_streamconnect array for device, task, and
462 * dais socket nodes. */
463 if (node_type != NODE_MESSAGE) {
464 num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
465 pnode->stream_connect = kzalloc(num_streams *
466 sizeof(struct dsp_streamconnect),
468 if (num_streams > 0 && pnode->stream_connect == NULL)
472 if (!status && (node_type == NODE_TASK ||
473 node_type == NODE_DAISSOCKET)) {
474 /* Allocate arrays for maintainig stream connections */
475 pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
476 sizeof(struct stream_chnl), GFP_KERNEL);
477 pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
478 sizeof(struct stream_chnl), GFP_KERNEL);
479 ptask_args = &(pnode->create_args.asa.task_arg_obj);
480 ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
481 sizeof(struct node_strmdef),
483 ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
484 sizeof(struct node_strmdef),
486 if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
487 ptask_args->strm_in_def
489 || (MAX_OUTPUTS(pnode) > 0
490 && (pnode->outputs == NULL
491 || ptask_args->strm_out_def == NULL)))
495 if (!status && (node_type != NODE_DEVICE)) {
496 /* Create an event that will be posted when RMS_EXIT is
498 pnode->sync_done = kzalloc(sizeof(struct sync_object),
500 if (pnode->sync_done)
501 sync_init_event(pnode->sync_done);
506 /*Get the shared mem mgr for this nodes dev object */
507 status = cmm_get_handle(hprocessor, &hcmm_mgr);
509 /* Allocate a SM addr translator for this node
511 status = cmm_xlator_create(&pnode->xlator,
516 /* Fill in message args */
517 if ((pargs != NULL) && (pargs->cb_data > 0)) {
519 &(pnode->create_args.asa.node_msg_args);
520 pmsg_args->pdata = kzalloc(pargs->cb_data,
522 if (pmsg_args->pdata == NULL) {
525 pmsg_args->arg_length = pargs->cb_data;
526 memcpy(pmsg_args->pdata,
534 if (!status && node_type != NODE_DEVICE) {
535 /* Create a message queue for this node */
536 intf_fxns = hnode_mgr->intf_fxns;
538 (*intf_fxns->pfn_msg_create_queue) (hnode_mgr->msg_mgr_obj,
539 &pnode->msg_queue_obj,
541 pnode->create_args.asa.
542 node_msg_args.max_msgs,
547 /* Create object for dynamic loading */
549 status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
555 &pnode->phase_split);
558 /* Compare value read from Node Properties and check if it is same as
559 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
560 * GPP Address, Read the value in that address and override the
561 * stack_seg value in task args */
563 (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
564 stack_seg_name != NULL) {
566 pnode->dcd_props.obj_data.node_obj.ndb_props.
567 stack_seg_name, STACKSEGLABEL) == 0) {
569 hnode_mgr->nldr_fxns.
570 pfn_get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
573 pr_err("%s: Failed to get addr for DYNEXT_BEG"
574 " status = 0x%x\n", __func__, status);
577 hnode_mgr->nldr_fxns.
578 pfn_get_fxn_addr(pnode->nldr_node_obj,
579 "L1DSRAM_HEAP", &pul_value);
582 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
583 " status = 0x%x\n", __func__, status);
585 host_res = pbridge_context->resources;
590 pr_err("%s: Failed to get host resource, status"
591 " = 0x%x\n", __func__, status);
595 ul_gpp_mem_base = (u32) host_res->dw_mem_base[1];
596 off_set = pul_value - dynext_base;
597 ul_stack_seg_addr = ul_gpp_mem_base + off_set;
598 ul_stack_seg_val = readl(ul_stack_seg_addr);
600 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
601 " 0x%x\n", __func__, ul_stack_seg_val,
604 pnode->create_args.asa.task_arg_obj.stack_seg =
611 /* Add the node to the node manager's list of allocated
613 lst_init_elem((struct list_head *)pnode);
614 NODE_SET_STATE(pnode, NODE_ALLOCATED);
616 mutex_lock(&hnode_mgr->node_mgr_lock);
618 lst_put_tail(hnode_mgr->node_list, (struct list_head *) pnode);
619 ++(hnode_mgr->num_nodes);
621 /* Exit critical section */
622 mutex_unlock(&hnode_mgr->node_mgr_lock);
624 /* Preset this to assume phases are split
625 * (for overlay and dll) */
626 pnode->phase_split = true;
628 /* Notify all clients registered for DSP_NODESTATECHANGE. */
629 proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
633 delete_node(pnode, pr_ctxt);
638 status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
640 delete_node(pnode, pr_ctxt);
644 *noderes = (struct node_res_object *)node_res;
645 drv_proc_node_update_heap_status(node_res, true);
646 drv_proc_node_update_status(node_res, true);
648 DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
650 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
651 "node_res: %p status: 0x%x\n", __func__, hprocessor,
652 node_uuid, pargs, attr_in, noderes, status);
657 * ======== node_alloc_msg_buf ========
659 * Allocates buffer for zero copy messaging.
661 DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
662 struct dsp_bufferattr *pattr,
665 struct node_object *pnode = (struct node_object *)hnode;
667 bool va_flag = false;
671 DBC_REQUIRE(refs > 0);
672 DBC_REQUIRE(pbuffer != NULL);
674 DBC_REQUIRE(usize > 0);
678 else if (node_get_type(pnode) == NODE_DEVICE)
685 pattr = &node_dfltbufattrs; /* set defaults */
687 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
688 if (proc_id != DSP_UNIT) {
692 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
693 * virt address, so set this info in this node's translator
694 * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
695 * virtual address from node's translator. */
696 if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
697 (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
699 set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
701 /* Clear mask bits */
702 pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
703 /* Set/get this node's translators virtual address base/size */
704 status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
705 pattr->segment_id, set_info);
707 if (!status && (!va_flag)) {
708 if (pattr->segment_id != 1) {
709 /* Node supports single SM segment only. */
712 /* Arbitrary SM buffer alignment not supported for host side
713 * allocs, but guaranteed for the following alignment
715 switch (pattr->buf_alignment) {
722 /* alignment value not suportted */
727 /* allocate physical buffer from seg_id in node's
729 (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
731 if (*pbuffer == NULL) {
732 pr_err("%s: error - Out of shared memory\n",
743 * ======== node_change_priority ========
745 * Change the priority of a node in the allocated state, or that is
746 * currently running or paused on the target.
748 int node_change_priority(struct node_object *hnode, s32 prio)
750 struct node_object *pnode = (struct node_object *)hnode;
751 struct node_mgr *hnode_mgr = NULL;
752 enum node_type node_type;
753 enum node_state state;
757 DBC_REQUIRE(refs > 0);
759 if (!hnode || !hnode->hnode_mgr) {
762 hnode_mgr = hnode->hnode_mgr;
763 node_type = node_get_type(hnode);
764 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
766 else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
772 /* Enter critical section */
773 mutex_lock(&hnode_mgr->node_mgr_lock);
775 state = node_get_state(hnode);
776 if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
777 NODE_SET_PRIORITY(hnode, prio);
779 if (state != NODE_RUNNING) {
783 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
784 if (proc_id == DSP_UNIT) {
786 disp_node_change_priority(hnode_mgr->disp_obj,
788 hnode_mgr->ul_fxn_addrs
789 [RMSCHANGENODEPRIORITY],
790 hnode->node_env, prio);
793 NODE_SET_PRIORITY(hnode, prio);
797 /* Leave critical section */
798 mutex_unlock(&hnode_mgr->node_mgr_lock);
804 * ======== node_connect ========
806 * Connect two nodes on the DSP, or a node on the DSP to the GPP.
808 int node_connect(struct node_object *node1, u32 stream1,
809 struct node_object *node2,
810 u32 stream2, struct dsp_strmattr *pattrs,
811 struct dsp_cbdata *conn_param)
813 struct node_mgr *hnode_mgr;
814 char *pstr_dev_name = NULL;
815 enum node_type node1_type = NODE_TASK;
816 enum node_type node2_type = NODE_TASK;
817 struct node_strmdef *pstrm_def;
818 struct node_strmdef *input = NULL;
819 struct node_strmdef *output = NULL;
820 struct node_object *dev_node_obj;
821 struct node_object *hnode;
822 struct stream_chnl *pstream;
823 u32 pipe_id = GB_NOBITS;
824 u32 chnl_id = GB_NOBITS;
828 DBC_REQUIRE(refs > 0);
830 if ((node1 != (struct node_object *)DSP_HGPPNODE && !node1) ||
831 (node2 != (struct node_object *)DSP_HGPPNODE && !node2))
835 /* The two nodes must be on the same processor */
836 if (node1 != (struct node_object *)DSP_HGPPNODE &&
837 node2 != (struct node_object *)DSP_HGPPNODE &&
838 node1->hnode_mgr != node2->hnode_mgr)
840 /* Cannot connect a node to itself */
846 /* node_get_type() will return NODE_GPP if hnode =
848 node1_type = node_get_type(node1);
849 node2_type = node_get_type(node2);
850 /* Check stream indices ranges */
851 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
852 stream1 >= MAX_OUTPUTS(node1)) || (node2_type != NODE_GPP
861 * Only the following types of connections are allowed:
862 * task/dais socket < == > task/dais socket
863 * task/dais socket < == > device
864 * task/dais socket < == > GPP
866 * ie, no message nodes, and at least one task or dais
869 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
870 (node1_type != NODE_TASK && node1_type != NODE_DAISSOCKET &&
871 node2_type != NODE_TASK && node2_type != NODE_DAISSOCKET))
875 * Check stream mode. Default is STRMMODE_PROCCOPY.
877 if (!status && pattrs) {
878 if (pattrs->strm_mode != STRMMODE_PROCCOPY)
879 status = -EPERM; /* illegal stream mode */
885 if (node1_type != NODE_GPP) {
886 hnode_mgr = node1->hnode_mgr;
888 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
889 hnode_mgr = node2->hnode_mgr;
891 /* Enter critical section */
892 mutex_lock(&hnode_mgr->node_mgr_lock);
894 /* Nodes must be in the allocated state */
895 if (node1_type != NODE_GPP && node_get_state(node1) != NODE_ALLOCATED)
898 if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED)
902 /* Check that stream indices for task and dais socket nodes
903 * are not already be used. (Device nodes checked later) */
904 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
906 &(node1->create_args.asa.
907 task_arg_obj.strm_out_def[stream1]);
908 if (output->sz_device != NULL)
912 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
914 &(node2->create_args.asa.
915 task_arg_obj.strm_in_def[stream2]);
916 if (input->sz_device != NULL)
921 /* Connecting two task nodes? */
922 if (!status && ((node1_type == NODE_TASK ||
923 node1_type == NODE_DAISSOCKET)
924 && (node2_type == NODE_TASK
925 || node2_type == NODE_DAISSOCKET))) {
926 /* Find available pipe */
927 pipe_id = gb_findandset(hnode_mgr->pipe_map);
928 if (pipe_id == GB_NOBITS) {
929 status = -ECONNREFUSED;
931 node1->outputs[stream1].type = NODECONNECT;
932 node2->inputs[stream2].type = NODECONNECT;
933 node1->outputs[stream1].dev_id = pipe_id;
934 node2->inputs[stream2].dev_id = pipe_id;
935 output->sz_device = kzalloc(PIPENAMELEN + 1,
937 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
938 if (output->sz_device == NULL ||
939 input->sz_device == NULL) {
940 /* Undo the connection */
941 kfree(output->sz_device);
943 kfree(input->sz_device);
945 output->sz_device = NULL;
946 input->sz_device = NULL;
947 gb_clear(hnode_mgr->pipe_map, pipe_id);
950 /* Copy "/dbpipe<pipId>" name to device names */
951 sprintf(output->sz_device, "%s%d",
952 PIPEPREFIX, pipe_id);
953 strcpy(input->sz_device, output->sz_device);
957 /* Connecting task node to host? */
958 if (!status && (node1_type == NODE_GPP ||
959 node2_type == NODE_GPP)) {
960 if (node1_type == NODE_GPP) {
961 chnl_mode = CHNL_MODETODSP;
963 DBC_ASSERT(node2_type == NODE_GPP);
964 chnl_mode = CHNL_MODEFROMDSP;
966 /* Reserve a channel id. We need to put the name "/host<id>"
967 * in the node's create_args, but the host
968 * side channel will not be opened until DSPStream_Open is
969 * called for this node. */
971 if (pattrs->strm_mode == STRMMODE_RDMA) {
973 gb_findandset(hnode_mgr->dma_chnl_map);
974 /* dma chans are 2nd transport chnl set
976 (chnl_id != GB_NOBITS) ?
979 hnode_mgr->ul_num_chnls) : chnl_id;
980 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
981 chnl_id = gb_findandset(hnode_mgr->zc_chnl_map);
982 /* zero-copy chans are 3nd transport set
984 (chnl_id != GB_NOBITS) ? (chnl_id = chnl_id +
989 } else { /* must be PROCCOPY */
990 DBC_ASSERT(pattrs->strm_mode ==
992 chnl_id = gb_findandset(hnode_mgr->chnl_map);
996 /* default to PROCCOPY */
997 chnl_id = gb_findandset(hnode_mgr->chnl_map);
999 if (chnl_id == GB_NOBITS) {
1000 status = -ECONNREFUSED;
1003 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
1004 if (pstr_dev_name != NULL)
1008 if (pattrs->strm_mode == STRMMODE_RDMA) {
1009 gb_clear(hnode_mgr->dma_chnl_map, chnl_id -
1010 hnode_mgr->ul_num_chnls);
1011 } else if (pattrs->strm_mode == STRMMODE_ZEROCOPY) {
1012 gb_clear(hnode_mgr->zc_chnl_map, chnl_id -
1013 (2 * hnode_mgr->ul_num_chnls));
1015 DBC_ASSERT(pattrs->strm_mode ==
1017 gb_clear(hnode_mgr->chnl_map, chnl_id);
1020 gb_clear(hnode_mgr->chnl_map, chnl_id);
1025 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1026 node2->inputs[stream2].type = HOSTCONNECT;
1027 node2->inputs[stream2].dev_id = chnl_id;
1028 input->sz_device = pstr_dev_name;
1030 node1->outputs[stream1].type = HOSTCONNECT;
1031 node1->outputs[stream1].dev_id = chnl_id;
1032 output->sz_device = pstr_dev_name;
1034 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1037 /* Connecting task node to device node? */
1038 if (!status && ((node1_type == NODE_DEVICE) ||
1039 (node2_type == NODE_DEVICE))) {
1040 if (node2_type == NODE_DEVICE) {
1041 /* node1 == > device */
1042 dev_node_obj = node2;
1044 pstream = &(node1->outputs[stream1]);
1047 /* device == > node2 */
1048 dev_node_obj = node1;
1050 pstream = &(node2->inputs[stream2]);
1053 /* Set up create args */
1054 pstream->type = DEVICECONNECT;
1055 dw_length = strlen(dev_node_obj->pstr_dev_name);
1056 if (conn_param != NULL) {
1057 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1058 conn_param->cb_data,
1061 pstrm_def->sz_device = kzalloc(dw_length + 1,
1064 if (pstrm_def->sz_device == NULL) {
1067 /* Copy device name */
1068 strncpy(pstrm_def->sz_device,
1069 dev_node_obj->pstr_dev_name, dw_length);
1070 if (conn_param != NULL) {
1071 strncat(pstrm_def->sz_device,
1072 (char *)conn_param->node_data,
1073 (u32) conn_param->cb_data);
1075 dev_node_obj->device_owner = hnode;
1079 /* Fill in create args */
1080 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1081 node1->create_args.asa.task_arg_obj.num_outputs++;
1082 fill_stream_def(node1, output, pattrs);
1084 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1085 node2->create_args.asa.task_arg_obj.num_inputs++;
1086 fill_stream_def(node2, input, pattrs);
1088 /* Update node1 and node2 stream_connect */
1089 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1090 node1->num_outputs++;
1091 if (stream1 > node1->max_output_index)
1092 node1->max_output_index = stream1;
1095 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1096 node2->num_inputs++;
1097 if (stream2 > node2->max_input_index)
1098 node2->max_input_index = stream2;
1101 fill_stream_connect(node1, node2, stream1, stream2);
1103 /* end of sync_enter_cs */
1104 /* Exit critical section */
1105 mutex_unlock(&hnode_mgr->node_mgr_lock);
1107 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1108 "pattrs: %p status: 0x%x\n", __func__, node1,
1109 stream1, node2, stream2, pattrs, status);
1114 * ======== node_create ========
1116 * Create a node on the DSP by remotely calling the node's create function.
1118 int node_create(struct node_object *hnode)
1120 struct node_object *pnode = (struct node_object *)hnode;
1121 struct node_mgr *hnode_mgr;
1122 struct bridge_drv_interface *intf_fxns;
1124 enum node_type node_type;
1127 struct dsp_cbdata cb_data;
1129 struct dsp_processorstate proc_state;
1130 struct proc_object *hprocessor;
1131 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1132 struct dspbridge_platform_data *pdata =
1133 omap_dspbridge_dev->dev.platform_data;
1136 DBC_REQUIRE(refs > 0);
1141 hprocessor = hnode->hprocessor;
1142 status = proc_get_state(hprocessor, &proc_state,
1143 sizeof(struct dsp_processorstate));
1146 /* If processor is in error state then don't attempt to create
1148 if (proc_state.proc_state == PROC_ERROR) {
1152 /* create struct dsp_cbdata struct for PWR calls */
1153 cb_data.cb_data = PWR_TIMEOUT;
1154 node_type = node_get_type(hnode);
1155 hnode_mgr = hnode->hnode_mgr;
1156 intf_fxns = hnode_mgr->intf_fxns;
1157 /* Get access to node dispatcher */
1158 mutex_lock(&hnode_mgr->node_mgr_lock);
1160 /* Check node state */
1161 if (node_get_state(hnode) != NODE_ALLOCATED)
1165 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1170 if (proc_id != DSP_UNIT)
1173 /* Make sure streams are properly connected */
1174 if ((hnode->num_inputs && hnode->max_input_index >
1175 hnode->num_inputs - 1) ||
1176 (hnode->num_outputs && hnode->max_output_index >
1177 hnode->num_outputs - 1))
1181 /* If node's create function is not loaded, load it */
1182 /* Boost the OPP level to max level that DSP can be requested */
1183 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1184 if (pdata->cpu_set_freq)
1185 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1187 status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
1189 /* Get address of node's create function */
1191 hnode->loaded = true;
1192 if (node_type != NODE_DEVICE) {
1193 status = get_fxn_address(hnode, &ul_create_fxn,
1197 pr_err("%s: failed to load create code: 0x%x\n",
1200 /* Request the lowest OPP level */
1201 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1202 if (pdata->cpu_set_freq)
1203 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1205 /* Get address of iAlg functions, if socket node */
1207 if (node_type == NODE_DAISSOCKET) {
1208 status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
1209 (hnode->nldr_node_obj,
1210 hnode->dcd_props.obj_data.node_obj.
1212 &hnode->create_args.asa.
1213 task_arg_obj.ul_dais_arg);
1218 if (node_type != NODE_DEVICE) {
1219 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1220 hnode_mgr->ul_fxn_addrs
1223 &(hnode->create_args),
1224 &(hnode->node_env));
1226 /* Set the message queue id to the node env
1228 intf_fxns = hnode_mgr->intf_fxns;
1229 (*intf_fxns->pfn_msg_set_queue_id) (hnode->
1235 /* Phase II/Overlays: Create, execute, delete phases possibly in
1236 * different files/sections. */
1237 if (hnode->loaded && hnode->phase_split) {
1238 /* If create code was dynamically loaded, we can now unload
1240 status1 = hnode_mgr->nldr_fxns.pfn_unload(hnode->nldr_node_obj,
1242 hnode->loaded = false;
1245 pr_err("%s: Failed to unload create code: 0x%x\n",
1248 /* Update node state and node manager state */
1250 NODE_SET_STATE(hnode, NODE_CREATED);
1251 hnode_mgr->num_created++;
1254 if (status != -EBADR) {
1255 /* Put back in NODE_ALLOCATED state if error occurred */
1256 NODE_SET_STATE(hnode, NODE_ALLOCATED);
1259 /* Free access to node dispatcher */
1260 mutex_unlock(&hnode_mgr->node_mgr_lock);
1263 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
1264 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1267 dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1273 * ======== node_create_mgr ========
1275 * Create a NODE Manager object.
1277 int node_create_mgr(struct node_mgr **node_man,
1278 struct dev_object *hdev_obj)
1281 struct node_mgr *node_mgr_obj = NULL;
1282 struct disp_attr disp_attr_obj;
1283 char *sz_zl_file = "";
1284 struct nldr_attrs nldr_attrs_obj;
1287 DBC_REQUIRE(refs > 0);
1288 DBC_REQUIRE(node_man != NULL);
1289 DBC_REQUIRE(hdev_obj != NULL);
1292 /* Allocate Node manager object */
1293 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1295 node_mgr_obj->hdev_obj = hdev_obj;
1296 node_mgr_obj->node_list = kzalloc(sizeof(struct lst_list),
1298 node_mgr_obj->pipe_map = gb_create(MAXPIPES);
1299 node_mgr_obj->pipe_done_map = gb_create(MAXPIPES);
1300 if (node_mgr_obj->node_list == NULL
1301 || node_mgr_obj->pipe_map == NULL
1302 || node_mgr_obj->pipe_done_map == NULL) {
1305 INIT_LIST_HEAD(&node_mgr_obj->node_list->head);
1306 node_mgr_obj->ntfy_obj = kmalloc(
1307 sizeof(struct ntfy_object), GFP_KERNEL);
1308 if (node_mgr_obj->ntfy_obj)
1309 ntfy_init(node_mgr_obj->ntfy_obj);
1313 node_mgr_obj->num_created = 0;
1317 /* get devNodeType */
1319 status = dev_get_dev_type(hdev_obj, &dev_type);
1321 /* Create the DCD Manager */
1324 dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
1326 status = get_proc_props(node_mgr_obj, hdev_obj);
1329 /* Create NODE Dispatcher */
1331 disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
1332 disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
1333 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1334 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1336 disp_create(&node_mgr_obj->disp_obj, hdev_obj,
1339 /* Create a STRM Manager */
1341 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1344 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1345 /* Get msg_ctrl queue manager */
1346 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1347 mutex_init(&node_mgr_obj->node_mgr_lock);
1348 node_mgr_obj->chnl_map = gb_create(node_mgr_obj->ul_num_chnls);
1349 /* dma chnl map. ul_num_chnls is # per transport */
1350 node_mgr_obj->dma_chnl_map =
1351 gb_create(node_mgr_obj->ul_num_chnls);
1352 node_mgr_obj->zc_chnl_map =
1353 gb_create(node_mgr_obj->ul_num_chnls);
1354 if ((node_mgr_obj->chnl_map == NULL)
1355 || (node_mgr_obj->dma_chnl_map == NULL)
1356 || (node_mgr_obj->zc_chnl_map == NULL)) {
1359 /* Block out reserved channels */
1360 for (i = 0; i < node_mgr_obj->ul_chnl_offset; i++)
1361 gb_set(node_mgr_obj->chnl_map, i);
1363 /* Block out channels reserved for RMS */
1364 gb_set(node_mgr_obj->chnl_map,
1365 node_mgr_obj->ul_chnl_offset);
1366 gb_set(node_mgr_obj->chnl_map,
1367 node_mgr_obj->ul_chnl_offset + 1);
1371 /* NO RM Server on the IVA */
1372 if (dev_type != IVA_UNIT) {
1373 /* Get addresses of any RMS functions loaded */
1374 status = get_rms_fxns(node_mgr_obj);
1378 /* Get loader functions and create loader */
1380 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
1383 nldr_attrs_obj.pfn_ovly = ovly;
1384 nldr_attrs_obj.pfn_write = mem_write;
1385 nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
1386 nldr_attrs_obj.us_dsp_mau_size = node_mgr_obj->udsp_mau_size;
1387 node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.pfn_init();
1389 node_mgr_obj->nldr_fxns.pfn_create(&node_mgr_obj->nldr_obj,
1394 *node_man = node_mgr_obj;
1396 delete_node_mgr(node_mgr_obj);
1398 DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
1404 * ======== node_delete ========
1406 * Delete a node on the DSP by remotely calling the node's delete function.
1407 * Loads the node's delete function if necessary. Free GPP side resources
1408 * after node's delete function returns.
1410 int node_delete(struct node_res_object *noderes,
1411 struct process_context *pr_ctxt)
1413 struct node_object *pnode = noderes->hnode;
1414 struct node_mgr *hnode_mgr;
1415 struct proc_object *hprocessor;
1416 struct disp_object *disp_obj;
1418 enum node_type node_type;
1419 enum node_state state;
1422 struct dsp_cbdata cb_data;
1424 struct bridge_drv_interface *intf_fxns;
1426 void *node_res = noderes;
1428 struct dsp_processorstate proc_state;
1429 DBC_REQUIRE(refs > 0);
1435 /* create struct dsp_cbdata struct for PWR call */
1436 cb_data.cb_data = PWR_TIMEOUT;
1437 hnode_mgr = pnode->hnode_mgr;
1438 hprocessor = pnode->hprocessor;
1439 disp_obj = hnode_mgr->disp_obj;
1440 node_type = node_get_type(pnode);
1441 intf_fxns = hnode_mgr->intf_fxns;
1442 /* Enter critical section */
1443 mutex_lock(&hnode_mgr->node_mgr_lock);
1445 state = node_get_state(pnode);
1446 /* Execute delete phase code for non-device node in all cases
1447 * except when the node was only allocated. Delete phase must be
1448 * executed even if create phase was executed, but failed.
1449 * If the node environment pointer is non-NULL, the delete phase
1450 * code must be executed. */
1451 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1452 node_type != NODE_DEVICE) {
1453 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1457 if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1458 /* If node has terminated, execute phase code will
1459 * have already been unloaded in node_on_exit(). If the
1460 * node is PAUSED, the execute phase is loaded, and it
1461 * is now ok to unload it. If the node is running, we
1462 * will unload the execute phase only after deleting
1464 if (state == NODE_PAUSED && pnode->loaded &&
1465 pnode->phase_split) {
1466 /* Ok to unload execute code as long as node
1467 * is not * running */
1469 hnode_mgr->nldr_fxns.
1470 pfn_unload(pnode->nldr_node_obj,
1472 pnode->loaded = false;
1473 NODE_SET_STATE(pnode, NODE_DONE);
1475 /* Load delete phase code if not loaded or if haven't
1476 * * unloaded EXECUTE phase */
1477 if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1478 pnode->phase_split) {
1480 hnode_mgr->nldr_fxns.
1481 pfn_load(pnode->nldr_node_obj, NLDR_DELETE);
1483 pnode->loaded = true;
1485 pr_err("%s: fail - load delete code:"
1486 " 0x%x\n", __func__, status);
1491 /* Unblock a thread trying to terminate the node */
1492 (void)sync_set_event(pnode->sync_done);
1493 if (proc_id == DSP_UNIT) {
1494 /* ul_delete_fxn = address of node's delete
1496 status = get_fxn_address(pnode, &ul_delete_fxn,
1498 } else if (proc_id == IVA_UNIT)
1499 ul_delete_fxn = (u32) pnode->node_env;
1501 status = proc_get_state(hprocessor,
1504 dsp_processorstate));
1505 if (proc_state.proc_state != PROC_ERROR) {
1507 disp_node_delete(disp_obj, pnode,
1514 NODE_SET_STATE(pnode, NODE_DONE);
1516 /* Unload execute, if not unloaded, and delete
1518 if (state == NODE_RUNNING &&
1519 pnode->phase_split) {
1521 hnode_mgr->nldr_fxns.
1522 pfn_unload(pnode->nldr_node_obj,
1526 pr_err("%s: fail - unload execute code:"
1527 " 0x%x\n", __func__, status1);
1530 hnode_mgr->nldr_fxns.pfn_unload(pnode->
1533 pnode->loaded = false;
1535 pr_err("%s: fail - unload delete code: "
1536 "0x%x\n", __func__, status1);
1540 /* Free host side resources even if a failure occurred */
1541 /* Remove node from hnode_mgr->node_list */
1542 lst_remove_elem(hnode_mgr->node_list, (struct list_head *)pnode);
1543 hnode_mgr->num_nodes--;
1544 /* Decrement count of nodes created on DSP */
1545 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1546 (pnode->node_env != (u32) NULL)))
1547 hnode_mgr->num_created--;
1548 /* Free host-side resources allocated by node_create()
1549 * delete_node() fails if SM buffers not freed by client! */
1550 drv_proc_node_update_status(node_res, false);
1551 delete_node(pnode, pr_ctxt);
1554 * Release all Node resources and its context
1556 idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1559 /* Exit critical section */
1560 mutex_unlock(&hnode_mgr->node_mgr_lock);
1561 proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1563 dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1568 * ======== node_delete_mgr ========
1570 * Delete the NODE Manager.
1572 int node_delete_mgr(struct node_mgr *hnode_mgr)
1576 DBC_REQUIRE(refs > 0);
1579 delete_node_mgr(hnode_mgr);
1587 * ======== node_enum_nodes ========
1589 * Enumerate currently allocated nodes.
1591 int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1592 u32 node_tab_size, u32 *pu_num_nodes,
1595 struct node_object *hnode;
1598 DBC_REQUIRE(refs > 0);
1599 DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
1600 DBC_REQUIRE(pu_num_nodes != NULL);
1601 DBC_REQUIRE(pu_allocated != NULL);
1607 /* Enter critical section */
1608 mutex_lock(&hnode_mgr->node_mgr_lock);
1610 if (hnode_mgr->num_nodes > node_tab_size) {
1611 *pu_allocated = hnode_mgr->num_nodes;
1615 hnode = (struct node_object *)lst_first(hnode_mgr->
1617 for (i = 0; i < hnode_mgr->num_nodes; i++) {
1619 node_tab[i] = hnode;
1620 hnode = (struct node_object *)lst_next
1621 (hnode_mgr->node_list,
1622 (struct list_head *)hnode);
1624 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1626 /* end of sync_enter_cs */
1627 /* Exit critical section */
1628 mutex_unlock(&hnode_mgr->node_mgr_lock);
1634 * ======== node_exit ========
1636 * Discontinue usage of NODE module.
1638 void node_exit(void)
1640 DBC_REQUIRE(refs > 0);
1644 DBC_ENSURE(refs >= 0);
1648 * ======== node_free_msg_buf ========
1650 * Frees the message buffer.
1652 int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
1653 struct dsp_bufferattr *pattr)
1655 struct node_object *pnode = (struct node_object *)hnode;
1658 DBC_REQUIRE(refs > 0);
1659 DBC_REQUIRE(pbuffer != NULL);
1660 DBC_REQUIRE(pnode != NULL);
1661 DBC_REQUIRE(pnode->xlator != NULL);
1667 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
1668 if (proc_id == DSP_UNIT) {
1670 if (pattr == NULL) {
1672 pattr = &node_dfltbufattrs;
1674 /* Node supports single SM segment only */
1675 if (pattr->segment_id != 1)
1678 /* pbuffer is clients Va. */
1679 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1682 DBC_ASSERT(NULL); /* BUG */
1689 * ======== node_get_attr ========
1691 * Copy the current attributes of the specified node into a dsp_nodeattr
1694 int node_get_attr(struct node_object *hnode,
1695 struct dsp_nodeattr *pattr, u32 attr_size)
1697 struct node_mgr *hnode_mgr;
1699 DBC_REQUIRE(refs > 0);
1700 DBC_REQUIRE(pattr != NULL);
1701 DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
1706 hnode_mgr = hnode->hnode_mgr;
1707 /* Enter hnode_mgr critical section (since we're accessing
1708 * data that could be changed by node_change_priority() and
1709 * node_connect(). */
1710 mutex_lock(&hnode_mgr->node_mgr_lock);
1711 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1712 /* dsp_nodeattrin */
1713 pattr->in_node_attr_in.cb_struct =
1714 sizeof(struct dsp_nodeattrin);
1715 pattr->in_node_attr_in.prio = hnode->prio;
1716 pattr->in_node_attr_in.utimeout = hnode->utimeout;
1717 pattr->in_node_attr_in.heap_size =
1718 hnode->create_args.asa.task_arg_obj.heap_size;
1719 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1720 hnode->create_args.asa.task_arg_obj.ugpp_heap_addr;
1721 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1722 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1724 get_node_info(hnode, &(pattr->node_info));
1725 /* end of sync_enter_cs */
1726 /* Exit critical section */
1727 mutex_unlock(&hnode_mgr->node_mgr_lock);
1733 * ======== node_get_channel_id ========
1735 * Get the channel index reserved for a stream connection between the
1738 int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1741 enum node_type node_type;
1742 int status = -EINVAL;
1743 DBC_REQUIRE(refs > 0);
1744 DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
1745 DBC_REQUIRE(chan_id != NULL);
1751 node_type = node_get_type(hnode);
1752 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1756 if (dir == DSP_TONODE) {
1757 if (index < MAX_INPUTS(hnode)) {
1758 if (hnode->inputs[index].type == HOSTCONNECT) {
1759 *chan_id = hnode->inputs[index].dev_id;
1764 DBC_ASSERT(dir == DSP_FROMNODE);
1765 if (index < MAX_OUTPUTS(hnode)) {
1766 if (hnode->outputs[index].type == HOSTCONNECT) {
1767 *chan_id = hnode->outputs[index].dev_id;
1776 * ======== node_get_message ========
1778 * Retrieve a message from a node on the DSP.
1780 int node_get_message(struct node_object *hnode,
1781 struct dsp_msg *message, u32 utimeout)
1783 struct node_mgr *hnode_mgr;
1784 enum node_type node_type;
1785 struct bridge_drv_interface *intf_fxns;
1788 struct dsp_processorstate proc_state;
1789 struct proc_object *hprocessor;
1791 DBC_REQUIRE(refs > 0);
1792 DBC_REQUIRE(message != NULL);
1798 hprocessor = hnode->hprocessor;
1799 status = proc_get_state(hprocessor, &proc_state,
1800 sizeof(struct dsp_processorstate));
1803 /* If processor is in error state then don't attempt to get the
1805 if (proc_state.proc_state == PROC_ERROR) {
1809 hnode_mgr = hnode->hnode_mgr;
1810 node_type = node_get_type(hnode);
1811 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1812 node_type != NODE_DAISSOCKET) {
1816 /* This function will block unless a message is available. Since
1817 * DSPNode_RegisterNotify() allows notification when a message
1818 * is available, the system can be designed so that
1819 * DSPNode_GetMessage() is only called when a message is
1821 intf_fxns = hnode_mgr->intf_fxns;
1823 (*intf_fxns->pfn_msg_get) (hnode->msg_queue_obj, message, utimeout);
1824 /* Check if message contains SM descriptor */
1825 if (status || !(message->dw_cmd & DSP_RMSBUFDESC))
1828 /* Translate DSP byte addr to GPP Va. */
1829 tmp_buf = cmm_xlator_translate(hnode->xlator,
1830 (void *)(message->dw_arg1 *
1832 udsp_word_size), CMM_DSPPA2PA);
1833 if (tmp_buf != NULL) {
1834 /* now convert this GPP Pa to Va */
1835 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1837 if (tmp_buf != NULL) {
1838 /* Adjust SM size in msg */
1839 message->dw_arg1 = (u32) tmp_buf;
1840 message->dw_arg2 *= hnode->hnode_mgr->udsp_word_size;
1848 dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1849 hnode, message, utimeout);
1854 * ======== node_get_nldr_obj ========
1856 int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1857 struct nldr_object **nldr_ovlyobj)
1860 struct node_mgr *node_mgr_obj = hnode_mgr;
1861 DBC_REQUIRE(nldr_ovlyobj != NULL);
1866 *nldr_ovlyobj = node_mgr_obj->nldr_obj;
1868 DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
1873 * ======== node_get_strm_mgr ========
1875 * Returns the Stream manager.
1877 int node_get_strm_mgr(struct node_object *hnode,
1878 struct strm_mgr **strm_man)
1882 DBC_REQUIRE(refs > 0);
1887 *strm_man = hnode->hnode_mgr->strm_mgr_obj;
1893 * ======== node_get_load_type ========
1895 enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1897 DBC_REQUIRE(refs > 0);
1900 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1903 return hnode->dcd_props.obj_data.node_obj.us_load_type;
1908 * ======== node_get_timeout ========
1910 * Returns the timeout value for this node.
1912 u32 node_get_timeout(struct node_object *hnode)
1914 DBC_REQUIRE(refs > 0);
1917 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1920 return hnode->utimeout;
1925 * ======== node_get_type ========
1927 * Returns the node type.
1929 enum node_type node_get_type(struct node_object *hnode)
1931 enum node_type node_type;
1933 if (hnode == (struct node_object *)DSP_HGPPNODE)
1934 node_type = NODE_GPP;
1939 node_type = hnode->ntype;
1945 * ======== node_init ========
1947 * Initialize the NODE module.
1949 bool node_init(void)
1951 DBC_REQUIRE(refs >= 0);
1959 * ======== node_on_exit ========
1961 * Gets called when RMS_EXIT is received for a node.
1963 void node_on_exit(struct node_object *hnode, s32 node_status)
1968 /* Set node state to done */
1969 NODE_SET_STATE(hnode, NODE_DONE);
1970 hnode->exit_status = node_status;
1971 if (hnode->loaded && hnode->phase_split) {
1972 (void)hnode->hnode_mgr->nldr_fxns.pfn_unload(hnode->
1975 hnode->loaded = false;
1977 /* Unblock call to node_terminate */
1978 (void)sync_set_event(hnode->sync_done);
1979 /* Notify clients */
1980 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
1981 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1985 * ======== node_pause ========
1987 * Suspend execution of a node currently running on the DSP.
1989 int node_pause(struct node_object *hnode)
1991 struct node_object *pnode = (struct node_object *)hnode;
1992 enum node_type node_type;
1993 enum node_state state;
1994 struct node_mgr *hnode_mgr;
1997 struct dsp_processorstate proc_state;
1998 struct proc_object *hprocessor;
2000 DBC_REQUIRE(refs > 0);
2005 node_type = node_get_type(hnode);
2006 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2012 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2014 if (proc_id == IVA_UNIT)
2018 hnode_mgr = hnode->hnode_mgr;
2020 /* Enter critical section */
2021 mutex_lock(&hnode_mgr->node_mgr_lock);
2022 state = node_get_state(hnode);
2023 /* Check node state */
2024 if (state != NODE_RUNNING)
2029 hprocessor = hnode->hprocessor;
2030 status = proc_get_state(hprocessor, &proc_state,
2031 sizeof(struct dsp_processorstate));
2034 /* If processor is in error state then don't attempt
2035 to send the message */
2036 if (proc_state.proc_state == PROC_ERROR) {
2041 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2042 hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY],
2043 hnode->node_env, NODE_SUSPENDEDPRI);
2047 NODE_SET_STATE(hnode, NODE_PAUSED);
2050 /* End of sync_enter_cs */
2051 /* Leave critical section */
2052 mutex_unlock(&hnode_mgr->node_mgr_lock);
2054 proc_notify_clients(hnode->hprocessor,
2055 DSP_NODESTATECHANGE);
2056 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2060 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2065 * ======== node_put_message ========
2067 * Send a message to a message node, task node, or XDAIS socket node. This
2068 * function will block until the message stream can accommodate the
2069 * message, or a timeout occurs.
2071 int node_put_message(struct node_object *hnode,
2072 const struct dsp_msg *pmsg, u32 utimeout)
2074 struct node_mgr *hnode_mgr = NULL;
2075 enum node_type node_type;
2076 struct bridge_drv_interface *intf_fxns;
2077 enum node_state state;
2080 struct dsp_msg new_msg;
2081 struct dsp_processorstate proc_state;
2082 struct proc_object *hprocessor;
2084 DBC_REQUIRE(refs > 0);
2085 DBC_REQUIRE(pmsg != NULL);
2091 hprocessor = hnode->hprocessor;
2092 status = proc_get_state(hprocessor, &proc_state,
2093 sizeof(struct dsp_processorstate));
2096 /* If processor is in bad state then don't attempt sending the
2098 if (proc_state.proc_state == PROC_ERROR) {
2102 hnode_mgr = hnode->hnode_mgr;
2103 node_type = node_get_type(hnode);
2104 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
2105 node_type != NODE_DAISSOCKET)
2109 /* Check node state. Can't send messages to a node after
2110 * we've sent the RMS_EXIT command. There is still the
2111 * possibility that node_terminate can be called after we've
2112 * checked the state. Could add another SYNC object to
2113 * prevent this (can't use node_mgr_lock, since we don't
2114 * want to block other NODE functions). However, the node may
2115 * still exit on its own, before this message is sent. */
2116 mutex_lock(&hnode_mgr->node_mgr_lock);
2117 state = node_get_state(hnode);
2118 if (state == NODE_TERMINATING || state == NODE_DONE)
2121 /* end of sync_enter_cs */
2122 mutex_unlock(&hnode_mgr->node_mgr_lock);
2127 /* assign pmsg values to new msg */
2129 /* Now, check if message contains a SM buffer descriptor */
2130 if (pmsg->dw_cmd & DSP_RMSBUFDESC) {
2131 /* Translate GPP Va to DSP physical buf Ptr. */
2132 tmp_buf = cmm_xlator_translate(hnode->xlator,
2133 (void *)new_msg.dw_arg1,
2135 if (tmp_buf != NULL) {
2136 /* got translation, convert to MAUs in msg */
2137 if (hnode->hnode_mgr->udsp_word_size != 0) {
2140 hnode->hnode_mgr->udsp_word_size;
2142 new_msg.dw_arg2 /= hnode->hnode_mgr->
2145 pr_err("%s: udsp_word_size is zero!\n",
2147 status = -EPERM; /* bad DSPWordSize */
2149 } else { /* failed to translate buffer address */
2154 intf_fxns = hnode_mgr->intf_fxns;
2155 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
2156 &new_msg, utimeout);
2159 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2160 "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2165 * ======== node_register_notify ========
2167 * Register to be notified on specific events for this node.
2169 int node_register_notify(struct node_object *hnode, u32 event_mask,
2171 struct dsp_notification *hnotification)
2173 struct bridge_drv_interface *intf_fxns;
2176 DBC_REQUIRE(refs > 0);
2177 DBC_REQUIRE(hnotification != NULL);
2182 /* Check if event mask is a valid node related event */
2183 if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2186 /* Check if notify type is valid */
2187 if (notify_type != DSP_SIGNALEVENT)
2190 /* Only one Notification can be registered at a
2191 * time - Limitation */
2192 if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2196 if (event_mask == DSP_NODESTATECHANGE) {
2197 status = ntfy_register(hnode->ntfy_obj, hnotification,
2198 event_mask & DSP_NODESTATECHANGE,
2201 /* Send Message part of event mask to msg_ctrl */
2202 intf_fxns = hnode->hnode_mgr->intf_fxns;
2203 status = (*intf_fxns->pfn_msg_register_notify)
2204 (hnode->msg_queue_obj,
2205 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2210 dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2211 "hnotification: %p status 0x%x\n", __func__, hnode,
2212 event_mask, notify_type, hnotification, status);
2217 * ======== node_run ========
2219 * Start execution of a node's execute phase, or resume execution of a node
2220 * that has been suspended (via NODE_NodePause()) on the DSP. Load the
2221 * node's execute function if necessary.
2223 int node_run(struct node_object *hnode)
2225 struct node_object *pnode = (struct node_object *)hnode;
2226 struct node_mgr *hnode_mgr;
2227 enum node_type node_type;
2228 enum node_state state;
2233 struct bridge_drv_interface *intf_fxns;
2234 struct dsp_processorstate proc_state;
2235 struct proc_object *hprocessor;
2237 DBC_REQUIRE(refs > 0);
2243 hprocessor = hnode->hprocessor;
2244 status = proc_get_state(hprocessor, &proc_state,
2245 sizeof(struct dsp_processorstate));
2248 /* If processor is in error state then don't attempt to run the node */
2249 if (proc_state.proc_state == PROC_ERROR) {
2253 node_type = node_get_type(hnode);
2254 if (node_type == NODE_DEVICE)
2259 hnode_mgr = hnode->hnode_mgr;
2264 intf_fxns = hnode_mgr->intf_fxns;
2265 /* Enter critical section */
2266 mutex_lock(&hnode_mgr->node_mgr_lock);
2268 state = node_get_state(hnode);
2269 if (state != NODE_CREATED && state != NODE_PAUSED)
2273 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2278 if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2281 if (state == NODE_CREATED) {
2282 /* If node's execute function is not loaded, load it */
2283 if (!(hnode->loaded) && hnode->phase_split) {
2285 hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
2288 hnode->loaded = true;
2290 pr_err("%s: fail - load execute code: 0x%x\n",
2295 /* Get address of node's execute function */
2296 if (proc_id == IVA_UNIT)
2297 ul_execute_fxn = (u32) hnode->node_env;
2299 status = get_fxn_address(hnode, &ul_execute_fxn,
2304 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
2306 disp_node_run(hnode_mgr->disp_obj, hnode,
2307 ul_fxn_addr, ul_execute_fxn,
2310 } else if (state == NODE_PAUSED) {
2311 ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSCHANGENODEPRIORITY];
2312 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2313 ul_fxn_addr, hnode->node_env,
2314 NODE_GET_PRIORITY(hnode));
2316 /* We should never get here */
2320 /* Update node state. */
2322 NODE_SET_STATE(hnode, NODE_RUNNING);
2323 else /* Set state back to previous value */
2324 NODE_SET_STATE(hnode, state);
2325 /*End of sync_enter_cs */
2326 /* Exit critical section */
2327 mutex_unlock(&hnode_mgr->node_mgr_lock);
2329 proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
2330 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2333 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2338 * ======== node_terminate ========
2340 * Signal a node running on the DSP that it should exit its execute phase
2343 int node_terminate(struct node_object *hnode, int *pstatus)
2345 struct node_object *pnode = (struct node_object *)hnode;
2346 struct node_mgr *hnode_mgr = NULL;
2347 enum node_type node_type;
2348 struct bridge_drv_interface *intf_fxns;
2349 enum node_state state;
2350 struct dsp_msg msg, killmsg;
2352 u32 proc_id, kill_time_out;
2353 struct deh_mgr *hdeh_mgr;
2354 struct dsp_processorstate proc_state;
2356 DBC_REQUIRE(refs > 0);
2357 DBC_REQUIRE(pstatus != NULL);
2359 if (!hnode || !hnode->hnode_mgr) {
2363 if (pnode->hprocessor == NULL) {
2367 status = proc_get_processor_id(pnode->hprocessor, &proc_id);
2370 hnode_mgr = hnode->hnode_mgr;
2371 node_type = node_get_type(hnode);
2372 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2376 /* Check node state */
2377 mutex_lock(&hnode_mgr->node_mgr_lock);
2378 state = node_get_state(hnode);
2379 if (state != NODE_RUNNING) {
2381 /* Set the exit status if node terminated on
2383 if (state == NODE_DONE)
2384 *pstatus = hnode->exit_status;
2387 NODE_SET_STATE(hnode, NODE_TERMINATING);
2389 /* end of sync_enter_cs */
2390 mutex_unlock(&hnode_mgr->node_mgr_lock);
2394 * Send exit message. Do not change state to NODE_DONE
2395 * here. That will be done in callback.
2397 status = proc_get_state(pnode->hprocessor, &proc_state,
2398 sizeof(struct dsp_processorstate));
2401 /* If processor is in error state then don't attempt to send
2402 * A kill task command */
2403 if (proc_state.proc_state == PROC_ERROR) {
2408 msg.dw_cmd = RMS_EXIT;
2409 msg.dw_arg1 = hnode->node_env;
2410 killmsg.dw_cmd = RMS_KILLTASK;
2411 killmsg.dw_arg1 = hnode->node_env;
2412 intf_fxns = hnode_mgr->intf_fxns;
2414 if (hnode->utimeout > MAXTIMEOUT)
2415 kill_time_out = MAXTIMEOUT;
2417 kill_time_out = (hnode->utimeout) * 2;
2419 status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj, &msg,
2425 * Wait on synchronization object that will be
2426 * posted in the callback on receiving RMS_EXIT
2427 * message, or by node_delete. Check for valid hnode,
2428 * in case posted by node_delete().
2430 status = sync_wait_on_event(hnode->sync_done,
2432 if (status != ETIME)
2435 status = (*intf_fxns->pfn_msg_put)(hnode->msg_queue_obj,
2436 &killmsg, hnode->utimeout);
2439 status = sync_wait_on_event(hnode->sync_done,
2443 * Here it goes the part of the simulation of
2444 * the DSP exception.
2446 dev_get_deh_mgr(hnode_mgr->hdev_obj, &hdeh_mgr);
2450 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
2455 /* Enter CS before getting exit status, in case node was
2457 mutex_lock(&hnode_mgr->node_mgr_lock);
2458 /* Make sure node wasn't deleted while we blocked */
2462 *pstatus = hnode->exit_status;
2463 dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2464 __func__, hnode, hnode->node_env, status);
2466 mutex_unlock(&hnode_mgr->node_mgr_lock);
2467 } /*End of sync_enter_cs */
2473 * ======== delete_node ========
2475 * Free GPP resources allocated in node_allocate() or node_connect().
2477 static void delete_node(struct node_object *hnode,
2478 struct process_context *pr_ctxt)
2480 struct node_mgr *hnode_mgr;
2481 struct bridge_drv_interface *intf_fxns;
2483 enum node_type node_type;
2484 struct stream_chnl stream;
2485 struct node_msgargs node_msg_args;
2486 struct node_taskargs task_arg_obj;
2491 hnode_mgr = hnode->hnode_mgr;
2495 node_type = node_get_type(hnode);
2496 if (node_type != NODE_DEVICE) {
2497 node_msg_args = hnode->create_args.asa.node_msg_args;
2498 kfree(node_msg_args.pdata);
2500 /* Free msg_ctrl queue */
2501 if (hnode->msg_queue_obj) {
2502 intf_fxns = hnode_mgr->intf_fxns;
2503 (*intf_fxns->pfn_msg_delete_queue) (hnode->
2505 hnode->msg_queue_obj = NULL;
2508 kfree(hnode->sync_done);
2510 /* Free all stream info */
2511 if (hnode->inputs) {
2512 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2513 stream = hnode->inputs[i];
2514 free_stream(hnode_mgr, stream);
2516 kfree(hnode->inputs);
2517 hnode->inputs = NULL;
2519 if (hnode->outputs) {
2520 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2521 stream = hnode->outputs[i];
2522 free_stream(hnode_mgr, stream);
2524 kfree(hnode->outputs);
2525 hnode->outputs = NULL;
2527 task_arg_obj = hnode->create_args.asa.task_arg_obj;
2528 if (task_arg_obj.strm_in_def) {
2529 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2530 kfree(task_arg_obj.strm_in_def[i].sz_device);
2531 task_arg_obj.strm_in_def[i].sz_device = NULL;
2533 kfree(task_arg_obj.strm_in_def);
2534 task_arg_obj.strm_in_def = NULL;
2536 if (task_arg_obj.strm_out_def) {
2537 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2538 kfree(task_arg_obj.strm_out_def[i].sz_device);
2539 task_arg_obj.strm_out_def[i].sz_device = NULL;
2541 kfree(task_arg_obj.strm_out_def);
2542 task_arg_obj.strm_out_def = NULL;
2544 if (task_arg_obj.udsp_heap_res_addr) {
2545 status = proc_un_map(hnode->hprocessor, (void *)
2546 task_arg_obj.udsp_heap_addr,
2550 if (node_type != NODE_MESSAGE) {
2551 kfree(hnode->stream_connect);
2552 hnode->stream_connect = NULL;
2554 kfree(hnode->pstr_dev_name);
2555 hnode->pstr_dev_name = NULL;
2557 if (hnode->ntfy_obj) {
2558 ntfy_delete(hnode->ntfy_obj);
2559 kfree(hnode->ntfy_obj);
2560 hnode->ntfy_obj = NULL;
2563 /* These were allocated in dcd_get_object_def (via node_allocate) */
2564 kfree(hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn);
2565 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn = NULL;
2567 kfree(hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn);
2568 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn = NULL;
2570 kfree(hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn);
2571 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn = NULL;
2573 kfree(hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name);
2574 hnode->dcd_props.obj_data.node_obj.pstr_i_alg_name = NULL;
2576 /* Free all SM address translator resources */
2577 kfree(hnode->xlator);
2578 kfree(hnode->nldr_node_obj);
2579 hnode->nldr_node_obj = NULL;
2580 hnode->hnode_mgr = NULL;
2588 * ======== delete_node_mgr ========
2590 * Frees the node manager.
2592 static void delete_node_mgr(struct node_mgr *hnode_mgr)
2594 struct node_object *hnode;
2597 /* Free resources */
2598 if (hnode_mgr->hdcd_mgr)
2599 dcd_destroy_manager(hnode_mgr->hdcd_mgr);
2601 /* Remove any elements remaining in lists */
2602 if (hnode_mgr->node_list) {
2603 while ((hnode = (struct node_object *)
2604 lst_get_head(hnode_mgr->node_list)))
2605 delete_node(hnode, NULL);
2607 DBC_ASSERT(LST_IS_EMPTY(hnode_mgr->node_list));
2608 kfree(hnode_mgr->node_list);
2610 mutex_destroy(&hnode_mgr->node_mgr_lock);
2611 if (hnode_mgr->ntfy_obj) {
2612 ntfy_delete(hnode_mgr->ntfy_obj);
2613 kfree(hnode_mgr->ntfy_obj);
2616 if (hnode_mgr->pipe_map)
2617 gb_delete(hnode_mgr->pipe_map);
2619 if (hnode_mgr->pipe_done_map)
2620 gb_delete(hnode_mgr->pipe_done_map);
2622 if (hnode_mgr->chnl_map)
2623 gb_delete(hnode_mgr->chnl_map);
2625 if (hnode_mgr->dma_chnl_map)
2626 gb_delete(hnode_mgr->dma_chnl_map);
2628 if (hnode_mgr->zc_chnl_map)
2629 gb_delete(hnode_mgr->zc_chnl_map);
2631 if (hnode_mgr->disp_obj)
2632 disp_delete(hnode_mgr->disp_obj);
2634 if (hnode_mgr->strm_mgr_obj)
2635 strm_delete(hnode_mgr->strm_mgr_obj);
2637 /* Delete the loader */
2638 if (hnode_mgr->nldr_obj)
2639 hnode_mgr->nldr_fxns.pfn_delete(hnode_mgr->nldr_obj);
2641 if (hnode_mgr->loader_init)
2642 hnode_mgr->nldr_fxns.pfn_exit();
2649 * ======== fill_stream_connect ========
2651 * Fills stream information.
2653 static void fill_stream_connect(struct node_object *node1,
2654 struct node_object *node2,
2655 u32 stream1, u32 stream2)
2658 struct dsp_streamconnect *strm1 = NULL;
2659 struct dsp_streamconnect *strm2 = NULL;
2660 enum node_type node1_type = NODE_TASK;
2661 enum node_type node2_type = NODE_TASK;
2663 node1_type = node_get_type(node1);
2664 node2_type = node_get_type(node2);
2665 if (node1 != (struct node_object *)DSP_HGPPNODE) {
2667 if (node1_type != NODE_DEVICE) {
2668 strm_index = node1->num_inputs +
2669 node1->num_outputs - 1;
2670 strm1 = &(node1->stream_connect[strm_index]);
2671 strm1->cb_struct = sizeof(struct dsp_streamconnect);
2672 strm1->this_node_stream_index = stream1;
2675 if (node2 != (struct node_object *)DSP_HGPPNODE) {
2676 /* NODE == > NODE */
2677 if (node1_type != NODE_DEVICE) {
2678 strm1->connected_node = node2;
2679 strm1->ui_connected_node_id = node2->node_uuid;
2680 strm1->connected_node_stream_index = stream2;
2681 strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2683 if (node2_type != NODE_DEVICE) {
2684 strm_index = node2->num_inputs +
2685 node2->num_outputs - 1;
2686 strm2 = &(node2->stream_connect[strm_index]);
2688 sizeof(struct dsp_streamconnect);
2689 strm2->this_node_stream_index = stream2;
2690 strm2->connected_node = node1;
2691 strm2->ui_connected_node_id = node1->node_uuid;
2692 strm2->connected_node_stream_index = stream1;
2693 strm2->connect_type = CONNECTTYPE_NODEINPUT;
2695 } else if (node1_type != NODE_DEVICE)
2696 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2699 DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
2700 strm_index = node2->num_inputs + node2->num_outputs - 1;
2701 strm2 = &(node2->stream_connect[strm_index]);
2702 strm2->cb_struct = sizeof(struct dsp_streamconnect);
2703 strm2->this_node_stream_index = stream2;
2704 strm2->connect_type = CONNECTTYPE_GPPINPUT;
2709 * ======== fill_stream_def ========
2711 * Fills Stream attributes.
2713 static void fill_stream_def(struct node_object *hnode,
2714 struct node_strmdef *pstrm_def,
2715 struct dsp_strmattr *pattrs)
2717 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2719 if (pattrs != NULL) {
2720 pstrm_def->num_bufs = pattrs->num_bufs;
2721 pstrm_def->buf_size =
2722 pattrs->buf_size / hnode_mgr->udsp_data_mau_size;
2723 pstrm_def->seg_id = pattrs->seg_id;
2724 pstrm_def->buf_alignment = pattrs->buf_alignment;
2725 pstrm_def->utimeout = pattrs->utimeout;
2727 pstrm_def->num_bufs = DEFAULTNBUFS;
2728 pstrm_def->buf_size =
2729 DEFAULTBUFSIZE / hnode_mgr->udsp_data_mau_size;
2730 pstrm_def->seg_id = DEFAULTSEGID;
2731 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2732 pstrm_def->utimeout = DEFAULTTIMEOUT;
2737 * ======== free_stream ========
2739 * Updates the channel mask and frees the pipe id.
2741 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2743 /* Free up the pipe id unless other node has not yet been deleted. */
2744 if (stream.type == NODECONNECT) {
2745 if (gb_test(hnode_mgr->pipe_done_map, stream.dev_id)) {
2746 /* The other node has already been deleted */
2747 gb_clear(hnode_mgr->pipe_done_map, stream.dev_id);
2748 gb_clear(hnode_mgr->pipe_map, stream.dev_id);
2750 /* The other node has not been deleted yet */
2751 gb_set(hnode_mgr->pipe_done_map, stream.dev_id);
2753 } else if (stream.type == HOSTCONNECT) {
2754 if (stream.dev_id < hnode_mgr->ul_num_chnls) {
2755 gb_clear(hnode_mgr->chnl_map, stream.dev_id);
2756 } else if (stream.dev_id < (2 * hnode_mgr->ul_num_chnls)) {
2758 gb_clear(hnode_mgr->dma_chnl_map, stream.dev_id -
2759 (1 * hnode_mgr->ul_num_chnls));
2760 } else if (stream.dev_id < (3 * hnode_mgr->ul_num_chnls)) {
2762 gb_clear(hnode_mgr->zc_chnl_map, stream.dev_id -
2763 (2 * hnode_mgr->ul_num_chnls));
2769 * ======== get_fxn_address ========
2771 * Retrieves the address for create, execute or delete phase for a node.
2773 static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
2776 char *pstr_fxn_name = NULL;
2777 struct node_mgr *hnode_mgr = hnode->hnode_mgr;
2779 DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
2780 node_get_type(hnode) == NODE_DAISSOCKET ||
2781 node_get_type(hnode) == NODE_MESSAGE);
2786 hnode->dcd_props.obj_data.node_obj.pstr_create_phase_fxn;
2790 hnode->dcd_props.obj_data.node_obj.pstr_execute_phase_fxn;
2794 hnode->dcd_props.obj_data.node_obj.pstr_delete_phase_fxn;
2797 /* Should never get here */
2803 hnode_mgr->nldr_fxns.pfn_get_fxn_addr(hnode->nldr_node_obj,
2804 pstr_fxn_name, fxn_addr);
2810 * ======== get_node_info ========
2812 * Retrieves the node information.
2814 void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2819 DBC_REQUIRE(node_info != NULL);
2821 node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2822 node_info->nb_node_database_props =
2823 hnode->dcd_props.obj_data.node_obj.ndb_props;
2824 node_info->execution_priority = hnode->prio;
2825 node_info->device_owner = hnode->device_owner;
2826 node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2827 node_info->node_env = hnode->node_env;
2829 node_info->ns_execution_state = node_get_state(hnode);
2831 /* Copy stream connect data */
2832 for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2833 node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2838 * ======== get_node_props ========
2840 * Retrieve node properties.
2842 static int get_node_props(struct dcd_manager *hdcd_mgr,
2843 struct node_object *hnode,
2844 const struct dsp_uuid *node_uuid,
2845 struct dcd_genericobj *dcd_prop)
2848 struct node_msgargs *pmsg_args;
2849 struct node_taskargs *task_arg_obj;
2850 enum node_type node_type = NODE_TASK;
2851 struct dsp_ndbprops *pndb_props =
2852 &(dcd_prop->obj_data.node_obj.ndb_props);
2854 char sz_uuid[MAXUUIDLEN];
2856 status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2857 DSP_DCDNODETYPE, dcd_prop);
2860 hnode->ntype = node_type = pndb_props->ntype;
2862 /* Create UUID value to set in registry. */
2863 uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
2865 dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2867 /* Fill in message args that come from NDB */
2868 if (node_type != NODE_DEVICE) {
2869 pmsg_args = &(hnode->create_args.asa.node_msg_args);
2871 dcd_prop->obj_data.node_obj.msg_segid;
2872 pmsg_args->notify_type =
2873 dcd_prop->obj_data.node_obj.msg_notify_type;
2874 pmsg_args->max_msgs = pndb_props->message_depth;
2875 dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2876 pmsg_args->max_msgs);
2878 /* Copy device name */
2879 DBC_REQUIRE(pndb_props->ac_name);
2880 len = strlen(pndb_props->ac_name);
2881 DBC_ASSERT(len < MAXDEVNAMELEN);
2882 hnode->pstr_dev_name = kzalloc(len + 1, GFP_KERNEL);
2883 if (hnode->pstr_dev_name == NULL) {
2886 strncpy(hnode->pstr_dev_name,
2887 pndb_props->ac_name, len);
2892 /* Fill in create args that come from NDB */
2893 if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2894 task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2895 task_arg_obj->prio = pndb_props->prio;
2896 task_arg_obj->stack_size = pndb_props->stack_size;
2897 task_arg_obj->sys_stack_size =
2898 pndb_props->sys_stack_size;
2899 task_arg_obj->stack_seg = pndb_props->stack_seg;
2900 dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2901 "0x%x words System Stack Size: 0x%x words "
2902 "Stack Segment: 0x%x profile count : 0x%x\n",
2903 task_arg_obj->prio, task_arg_obj->stack_size,
2904 task_arg_obj->sys_stack_size,
2905 task_arg_obj->stack_seg,
2906 pndb_props->count_profiles);
2914 * ======== get_proc_props ========
2916 * Retrieve the processor properties.
2918 static int get_proc_props(struct node_mgr *hnode_mgr,
2919 struct dev_object *hdev_obj)
2921 struct cfg_hostres *host_res;
2922 struct bridge_dev_context *pbridge_context;
2925 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2926 if (!pbridge_context)
2930 host_res = pbridge_context->resources;
2933 hnode_mgr->ul_chnl_offset = host_res->dw_chnl_offset;
2934 hnode_mgr->ul_chnl_buf_size = host_res->dw_chnl_buf_size;
2935 hnode_mgr->ul_num_chnls = host_res->dw_num_chnls;
2938 * PROC will add an API to get dsp_processorinfo.
2939 * Fill in default values for now.
2941 /* TODO -- Instead of hard coding, take from registry */
2942 hnode_mgr->proc_family = 6000;
2943 hnode_mgr->proc_type = 6410;
2944 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2945 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2946 hnode_mgr->udsp_word_size = DSPWORDSIZE;
2947 hnode_mgr->udsp_data_mau_size = DSPWORDSIZE;
2948 hnode_mgr->udsp_mau_size = 1;
2955 * ======== node_get_uuid_props ========
2957 * Fetch Node UUID properties from DCD/DOF file.
2959 int node_get_uuid_props(void *hprocessor,
2960 const struct dsp_uuid *node_uuid,
2961 struct dsp_ndbprops *node_props)
2963 struct node_mgr *hnode_mgr = NULL;
2964 struct dev_object *hdev_obj;
2966 struct dcd_nodeprops dcd_node_props;
2967 struct dsp_processorstate proc_state;
2969 DBC_REQUIRE(refs > 0);
2970 DBC_REQUIRE(hprocessor != NULL);
2971 DBC_REQUIRE(node_uuid != NULL);
2973 if (hprocessor == NULL || node_uuid == NULL) {
2977 status = proc_get_state(hprocessor, &proc_state,
2978 sizeof(struct dsp_processorstate));
2981 /* If processor is in error state then don't attempt
2982 to send the message */
2983 if (proc_state.proc_state == PROC_ERROR) {
2988 status = proc_get_dev_object(hprocessor, &hdev_obj);
2990 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
2991 if (hnode_mgr == NULL) {
2998 * Enter the critical section. This is needed because
2999 * dcd_get_object_def will ultimately end up calling dbll_open/close,
3000 * which needs to be protected in order to not corrupt the zlib manager
3003 mutex_lock(&hnode_mgr->node_mgr_lock);
3005 dcd_node_props.pstr_create_phase_fxn = NULL;
3006 dcd_node_props.pstr_execute_phase_fxn = NULL;
3007 dcd_node_props.pstr_delete_phase_fxn = NULL;
3008 dcd_node_props.pstr_i_alg_name = NULL;
3010 status = dcd_get_object_def(hnode_mgr->hdcd_mgr,
3011 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
3012 (struct dcd_genericobj *)&dcd_node_props);
3015 *node_props = dcd_node_props.ndb_props;
3016 kfree(dcd_node_props.pstr_create_phase_fxn);
3018 kfree(dcd_node_props.pstr_execute_phase_fxn);
3020 kfree(dcd_node_props.pstr_delete_phase_fxn);
3022 kfree(dcd_node_props.pstr_i_alg_name);
3024 /* Leave the critical section, we're done. */
3025 mutex_unlock(&hnode_mgr->node_mgr_lock);
3031 * ======== get_rms_fxns ========
3033 * Retrieve the RMS functions.
3035 static int get_rms_fxns(struct node_mgr *hnode_mgr)
3038 struct dev_object *dev_obj = hnode_mgr->hdev_obj;
3041 static char *psz_fxns[NUMRMSFXNS] = {
3042 "RMS_queryServer", /* RMSQUERYSERVER */
3043 "RMS_configureServer", /* RMSCONFIGURESERVER */
3044 "RMS_createNode", /* RMSCREATENODE */
3045 "RMS_executeNode", /* RMSEXECUTENODE */
3046 "RMS_deleteNode", /* RMSDELETENODE */
3047 "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
3048 "RMS_readMemory", /* RMSREADMEMORY */
3049 "RMS_writeMemory", /* RMSWRITEMEMORY */
3050 "RMS_copy", /* RMSCOPY */
3053 for (i = 0; i < NUMRMSFXNS; i++) {
3054 status = dev_get_symbol(dev_obj, psz_fxns[i],
3055 &(hnode_mgr->ul_fxn_addrs[i]));
3057 if (status == -ESPIPE) {
3059 * May be loaded dynamically (in the future),
3060 * but return an error for now.
3062 dev_dbg(bridge, "%s: RMS function: %s currently"
3063 " not loaded\n", __func__, psz_fxns[i]);
3065 dev_dbg(bridge, "%s: Symbol not found: %s "
3066 "status = 0x%x\n", __func__,
3067 psz_fxns[i], status);
3077 * ======== ovly ========
3079 * Called during overlay.Sends command to RMS to copy a block of data.
3081 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
3082 u32 ul_num_bytes, u32 mem_space)
3084 struct node_object *hnode = (struct node_object *)priv_ref;
3085 struct node_mgr *hnode_mgr;
3090 struct bridge_dev_context *hbridge_context;
3091 /* Function interface to Bridge driver*/
3092 struct bridge_drv_interface *intf_fxns;
3096 hnode_mgr = hnode->hnode_mgr;
3098 ul_size = ul_num_bytes / hnode_mgr->udsp_word_size;
3099 ul_timeout = hnode->utimeout;
3101 /* Call new MemCopy function */
3102 intf_fxns = hnode_mgr->intf_fxns;
3103 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3106 (*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
3107 dsp_run_addr, dsp_load_addr,
3108 ul_num_bytes, (u32) mem_space);
3110 ul_bytes = ul_num_bytes;
3112 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
3115 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
3123 * ======== mem_write ========
3125 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
3126 u32 ul_num_bytes, u32 mem_space)
3128 struct node_object *hnode = (struct node_object *)priv_ref;
3129 struct node_mgr *hnode_mgr;
3133 struct bridge_dev_context *hbridge_context;
3134 /* Function interface to Bridge driver */
3135 struct bridge_drv_interface *intf_fxns;
3138 DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
3140 hnode_mgr = hnode->hnode_mgr;
3142 ul_timeout = hnode->utimeout;
3143 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
3145 /* Call new MemWrite function */
3146 intf_fxns = hnode_mgr->intf_fxns;
3147 status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
3148 status = (*intf_fxns->pfn_brd_mem_write) (hbridge_context, pbuf,
3149 dsp_add, ul_num_bytes, mem_sect_type);
3151 return ul_num_bytes;
3154 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3156 * ======== node_find_addr ========
3158 int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3159 u32 offset_range, void *sym_addr_output, char *sym_name)
3161 struct node_object *node_obj;
3162 int status = -ENOENT;
3165 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3166 (unsigned int) node_mgr,
3167 sym_addr, offset_range,
3168 (unsigned int) sym_addr_output, sym_name);
3170 node_obj = (struct node_object *)(node_mgr->node_list->head.next);
3172 for (n = 0; n < node_mgr->num_nodes; n++) {
3173 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3174 offset_range, sym_addr_output, sym_name);
3179 node_obj = (struct node_object *) (node_obj->list_elem.next);