cod_delete(cod_mgr);
}
- DBC_ENSURE((DSP_SUCCEEDED(status)) ||
+ DBC_ENSURE((!status) ||
((dcd_mgr_obj == NULL) && (status == -ENOMEM)));
func_end:
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
len = strlen(sz_reg_key);
spin_lock(&dbdcd_lock);
list_for_each_entry(dcd_key, ®_key_list, link) {
status = -ENODATA;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Create UUID value using string retrieved from
* registry. */
uuid_uuid_from_string(sz_value, &dsp_uuid_obj);
/* Retrieve paths from the registry based on struct dsp_uuid */
dw_buf_size = DCD_MAXPATHLENGTH;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
spin_lock(&dbdcd_lock);
list_for_each_entry(dcd_key, ®_key_list, link) {
if (!strncmp(dcd_key->name, sz_reg_key,
#else
status = cod_read_section(lib, sz_sect_name, psz_coff_buf, ul_len);
#endif
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Compres DSP buffer to conform to PC format. */
if (strstr(dcd_key->path, "iva") == NULL) {
compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
status =
cod_read_section(lib, DCD_REGISTER_SECTION, psz_coff_buf, ul_len);
#endif
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Compress DSP buffer to conform to PC format. */
if (strstr(sz_coff_path, "iva") == NULL) {
compress_buf(psz_coff_buf, ul_len, DSPWORDSIZE);
status = -EINVAL;
DBC_ASSERT(false);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if ((strlen(sz_reg_key) + strlen(sz_obj_type)) <
DCD_MAXPATHLENGTH) {
strncat(sz_reg_key, sz_obj_type,
else
status = -EPERM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
spin_lock(&dbdcd_lock);
list_for_each_entry(dcd_key, ®_key_list, link) {
/* See if the name matches. */
0 : -ENOKEY;
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
memcpy(str_lib_name, dcd_key->path, strlen(dcd_key->path) + 1);
return status;
}
status = -EPERM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/*
* Because the node database has been updated through a
* successful object registration/de-registration operation,
}
/* Open the library */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = cod_open(dcd_mgr_obj->cod_mgr, psz_file_name,
COD_NOLOAD, &lib);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get dependent library section information. */
status = cod_get_section(lib, DEPLIBSECT, &ul_addr, &ul_len);
disp_obj->hdev_obj = hdev_obj;
/* Get Channel manager and Bridge function interface */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_chnl_mgr(hdev_obj, &(disp_obj->hchnl_mgr));
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
(void)dev_get_intf_fxns(hdev_obj, &intf_fxns);
disp_obj->intf_fxns = intf_fxns;
}
CHNL_MODETODSP, ul_chnl_id,
&chnl_attr_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
ul_chnl_id = disp_attrs->ul_chnl_offset + CHNLFROMRMSOFFSET;
status =
(*intf_fxns->pfn_chnl_open) (&(disp_obj->chnl_from_dsp),
CHNL_MODEFROMDSP, ul_chnl_id,
&chnl_attr_obj);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Allocate buffer for commands, replies */
disp_obj->ul_bufsize = disp_attrs->ul_chnl_buf_size;
disp_obj->ul_bufsize_rms = RMS_COMMANDBUFSIZE;
status = -ENOMEM;
}
func_cont:
- if (DSP_SUCCEEDED(status))
+ if (!status)
*dispatch_obj = disp_obj;
else
delete_disp(disp_obj);
DBC_ENSURE(((DSP_FAILED(status)) && ((*dispatch_obj == NULL))) ||
- ((DSP_SUCCEEDED(status)) && *dispatch_obj));
+ (!status && *dispatch_obj));
return status;
}
* Socket Args (if DAIS socket node):
*
*/
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
total = 0; /* Total number of words in buffer so far */
pdw_buf = (rms_word *) disp_obj->pbuf;
rms_cmd = (struct rms_command *)pdw_buf;
offset = total;
}
for (i = 0; (i < task_arg_obj.num_outputs) &&
- (DSP_SUCCEEDED(status)); i++) {
+ (!status); i++) {
pdw_buf[sio_out_def_offset + i] =
(offset - args_offset)
* (sizeof(rms_word) / DSPWORDSIZE);
status = -EPERM;
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
ul_bytes = total * sizeof(rms_word);
DBC_ASSERT(ul_bytes < (RMS_COMMANDBUFSIZE * sizeof(rms_word)));
status = send_message(disp_obj, node_get_timeout(hnode),
ul_bytes, node_env);
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
/*
* Message successfully received from RMS.
* Return the status of the Node's create function
status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (dev_type == DSP_UNIT) {
status = send_message(disp_obj, node_get_timeout(hnode),
sizeof(struct rms_command),
&dw_arg);
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
/*
* Message successfully received from RMS.
* Return the status of the Node's delete
status = dev_get_dev_type(disp_obj->hdev_obj, &dev_type);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (dev_type == DSP_UNIT) {
status = send_message(disp_obj, node_get_timeout(hnode),
sizeof(struct rms_command),
&dw_arg);
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
/*
* Message successfully received from RMS.
* Return the status of the Node's execute
strm_def_obj->timeout = strm_def.utimeout;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/*
* Since we haven't added the device name yet, subtract
* 1 from total.
status =
(*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (!CHNL_IS_IO_COMPLETE(chnl_ioc_obj)) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj))
status = -ETIME;
status =
(*intf_fxns->pfn_chnl_get_ioc) (chnl_obj, timeout, &chnl_ioc_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (CHNL_IS_TIMED_OUT(chnl_ioc_obj)) {
status = -ETIME;
} else if (chnl_ioc_obj.byte_size < ul_bytes) {
if (*node_res_obj == NULL)
status = -EFAULT;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (mutex_lock_interruptible(&ctxt->node_mutex)) {
kfree(*node_res_obj);
return -EPERM;
if (*pstrm_res == NULL)
status = -EFAULT;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (mutex_lock_interruptible(&ctxt->strm_mutex)) {
kfree(*pstrm_res);
return -EPERM;
status = -ENOMEM;
}
/* Store the DRV Object in the Registry */
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = cfg_set_object((u32) pdrv_object, REG_DRV_OBJECT);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*drv_obj = pdrv_object;
} else {
kfree(pdrv_object->dev_list);
u32 dw_dev_object = 0;
struct drv_object *pdrv_obj;
- if (DSP_SUCCEEDED(cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT))) {
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
if ((pdrv_obj->dev_list != NULL) &&
!LST_IS_EMPTY(pdrv_obj->dev_list))
dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
u32 dw_dev_extension = 0;
struct drv_object *pdrv_obj;
- if (DSP_SUCCEEDED(cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT))) {
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
if ((pdrv_obj->dev_node_string != NULL) &&
!LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
DBC_REQUIRE(hdev_obj != 0);
- if (DSP_SUCCEEDED(cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT))) {
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
if ((pdrv_obj->dev_list != NULL) &&
!LST_IS_EMPTY(pdrv_obj->dev_list)) {
DBC_REQUIRE(dev_extension != 0);
- if (DSP_SUCCEEDED(cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT))) {
+ if (!cfg_get_object((u32 *) &pdrv_obj, REG_DRV_OBJECT)) {
if ((pdrv_obj->dev_node_string != NULL) &&
!LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
dw_dev_extension =
int drv_insert_dev_object(struct drv_object *driver_obj,
struct dev_object *hdev_obj)
{
- int status = 0;
struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
DBC_REQUIRE(refs > 0);
lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
- DBC_ENSURE(DSP_SUCCEEDED(status)
- && !LST_IS_EMPTY(pdrv_object->dev_list));
+ DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list));
- return status;
+ return 0;
}
/*
*/
status = cfg_get_object((u32 *) &pdrv_object, REG_DRV_OBJECT);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
if (pszdev_node) {
lst_init_elem(&pszdev_node->link);
*dev_node_strg = 0;
}
- DBC_ENSURE((DSP_SUCCEEDED(status) && dev_node_strg != NULL &&
+ DBC_ENSURE((!status && dev_node_strg != NULL &&
!LST_IS_EMPTY(pdrv_object->dev_node_string)) ||
(DSP_FAILED(status) && *dev_node_strg == 0));
*/
static int request_bridge_resources(struct cfg_hostres *res)
{
- int status = 0;
struct cfg_hostres *host_res = res;
/* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
host_res->dw_num_chnls = CHNL_MAXCHANNELS;
host_res->dw_chnl_buf_size = 0x400;
- return status;
+ return 0;
}
/*
dma_addr, shm_size);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* These are hard-coded values */
host_res->birq_registers = 0;
host_res->birq_attrib = 0;
status = api_call_dev_ioctl(code, &buf_in, &retval,
filp->private_data);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = retval;
} else {
dev_dbg(bridge, "%s: IOCTL Failed, code: 0x%x "
/* End drv_create */
/* Request Resources */
status = drv_request_resources((u32) &dev_node, &device_node_string);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Attempt to Start the Device */
status = dev_start_device((struct cfg_devnode *)
device_node_string);
} /* Unwinding the loaded drivers */
func_cont:
/* Attempt to Start the Board */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* BRD_AutoStart could fail if the dsp execuetable is not the
* correct one. We should not propagate that error
* into the device loader. */
} else {
dev_dbg(bridge, "%s: Failed\n", __func__);
} /* End api_init_complete2 */
- DBC_ENSURE((DSP_SUCCEEDED(status) && drv_obj != NULL) ||
+ DBC_ENSURE((!status && drv_obj != NULL) ||
(DSP_FAILED(status) && drv_obj == NULL));
*init_status = status;
/* Return the Driver Object */
/* Get the Manager Object from Registry
* MGR Destroy will unload the DCD dll */
- if (DSP_SUCCEEDED(cfg_get_object((u32 *) &mgr_obj, REG_MGR_OBJECT)))
+ if (!cfg_get_object((u32 *) &mgr_obj, REG_MGR_OBJECT))
(void)mgr_destroy(mgr_obj);
api_exit();
pmgr_obj = kzalloc(sizeof(struct mgr_object), GFP_KERNEL);
if (pmgr_obj) {
status = dcd_create_manager(ZLDLLNAME, &pmgr_obj->hdcd_mgr);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* If succeeded store the handle in the MGR Object */
status = cfg_set_object((u32) pmgr_obj, REG_MGR_OBJECT);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*mgr_obj = pmgr_obj;
} else {
dcd_destroy_manager(pmgr_obj->hdcd_mgr);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (node_id > (node_index - 1)) {
status = -EINVAL;
} else {
(struct dsp_uuid *)
&node_uuid, DSP_DCDNODETYPE,
&gen_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get the Obj def */
*pndb_props =
gen_obj.obj_data.node_obj.ndb_props;
}
func_cont:
- DBC_ENSURE((DSP_SUCCEEDED(status) && *pu_num_nodes > 0) ||
+ DBC_ENSURE((!status && *pu_num_nodes > 0) ||
(DSP_FAILED(status) && *pu_num_nodes == 0));
return status;
*pu_num_procs = 0;
status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_dev_type(hdev_obj, (u8 *) &dev_type);
status = dev_get_dev_node(hdev_obj, &dev_node);
if (dev_type != DSP_UNIT)
status = -EPERM;
- if (DSP_SUCCEEDED(status))
+ if (!status)
processor_info->processor_type = DSPTYPE64;
}
}
status2 = dcd_get_object_def(pmgr_obj->hdcd_mgr,
(struct dsp_uuid *)&temp_uuid,
DSP_DCDPROCESSORTYPE, &gen_obj);
- if (DSP_SUCCEEDED(status2)) {
+ if (!status2) {
/* Get the Obj def */
if (processor_info_size <
sizeof(struct mgr_processorextinfo)) {
*dcd_handle = (u32) pmgr_obj->hdcd_mgr;
status = 0;
}
- DBC_ENSURE((DSP_SUCCEEDED(status) && *dcd_handle != (u32) NULL) ||
+ DBC_ENSURE((!status && *dcd_handle != (u32) NULL) ||
(DSP_FAILED(status) && *dcd_handle == (u32) NULL));
return status;
if (DSP_FAILED(status) && nldr_node_obj)
kfree(nldr_node_obj);
- DBC_ENSURE((DSP_SUCCEEDED(status) && *nldr_nodeobj)
+ DBC_ENSURE((!status && *nldr_nodeobj)
|| (DSP_FAILED(status) && *nldr_nodeobj == NULL));
return status;
}
dev_get_cod_mgr(hdev_obj, &cod_mgr);
if (cod_mgr) {
status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
- DBC_ASSERT(DSP_SUCCEEDED(status));
+ DBC_ASSERT(!status);
status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
- DBC_ASSERT(DSP_SUCCEEDED(status));
+ DBC_ASSERT(!status);
status =
cod_get_base_name(cod_mgr, sz_zl_file,
COD_MAXPATHLENGTH);
- DBC_ASSERT(DSP_SUCCEEDED(status));
+ DBC_ASSERT(!status);
}
status = 0;
/* end lazy status checking */
status = -ENOMEM;
}
/* Create the DCD Manager */
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
/* Get dynamic loading memory sections from base lib */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status =
nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
DYNMEMSECT, &ul_addr,
&ul_len);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
psz_coff_buf =
kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
GFP_KERNEL);
"segments: 0x%x\n", __func__, status);
}
}
- if (DSP_SUCCEEDED(status) && ul_len > 0) {
+ if (!status && ul_len > 0) {
/* Read section containing dynamic load mem segments */
status =
nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
DYNMEMSECT, psz_coff_buf,
ul_len);
}
- if (DSP_SUCCEEDED(status) && ul_len > 0) {
+ if (!status && ul_len > 0) {
/* Parse memory segment data */
dload_segs = (u16) (*((u32 *) psz_coff_buf));
if (dload_segs > MAXMEMSEGS)
status = -EBADF;
}
/* Parse dynamic load memory segments */
- if (DSP_SUCCEEDED(status) && dload_segs > 0) {
+ if (!status && dload_segs > 0) {
rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
GFP_KERNEL);
nldr_obj->seg_table =
}
}
/* Create Remote memory manager */
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* set the alloc, free, write functions for loader */
nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
new_attrs = save_attrs;
kfree(psz_coff_buf);
/* Get overlay nodes */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status =
cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
/* lazy check */
- DBC_ASSERT(DSP_SUCCEEDED(status));
+ DBC_ASSERT(!status);
/* First count number of overlay nodes */
status =
dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
add_ovly_node, (void *)nldr_obj);
/* Now build table of overlay nodes */
- if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
+ if (!status && nldr_obj->ovly_nodes > 0) {
/* Allocate table for overlay nodes */
nldr_obj->ovly_table =
kzalloc(sizeof(struct ovly_node) *
}
}
/* Do a fake reload of the base image to get overlay section info */
- if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
+ if (!status && nldr_obj->ovly_nodes > 0) {
save_attrs.write = fake_ovly_write;
save_attrs.log_write = add_ovly_info;
save_attrs.log_write_handle = nldr_obj;
status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
&save_attrs, &ul_entry);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*nldr = (struct nldr_object *)nldr_obj;
} else {
if (nldr_obj)
*nldr = NULL;
}
/* FIXME:Temp. Fix. Must be removed */
- DBC_ENSURE((DSP_SUCCEEDED(status) && *nldr)
+ DBC_ENSURE((!status && *nldr)
|| (DSP_FAILED(status) && (*nldr == NULL)));
return status;
}
status = -EFAULT;
}
- DBC_ENSURE(DSP_SUCCEEDED(status) || ((rmm_mgr != NULL) &&
- (*rmm_mgr == NULL)));
+ DBC_ENSURE(!status || (rmm_mgr != NULL && *rmm_mgr == NULL));
return status;
}
load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
false, nldr_node_obj->lib_path, phase, 0);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (*nldr_node_obj->pf_phase_split) {
switch (phase) {
case NLDR_CREATE:
&nldr_obj->
ovly_table[i].create_sects_list,
sect_info, §_exists, addr, bytes);
- if (DSP_SUCCEEDED(status) && !sect_exists)
+ if (!status && !sect_exists)
nldr_obj->ovly_table[i].create_sects++;
} else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
&nldr_obj->
ovly_table[i].delete_sects_list,
sect_info, §_exists, addr, bytes);
- if (DSP_SUCCEEDED(status) && !sect_exists)
+ if (!status && !sect_exists)
nldr_obj->ovly_table[i].delete_sects++;
} else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
&nldr_obj->
ovly_table[i].execute_sects_list,
sect_info, §_exists, addr, bytes);
- if (DSP_SUCCEEDED(status) && !sect_exists)
+ if (!status && !sect_exists)
nldr_obj->ovly_table[i].execute_sects++;
} else {
&nldr_obj->
ovly_table[i].other_sects_list,
sect_info, §_exists, addr, bytes);
- if (DSP_SUCCEEDED(status) && !sect_exists)
+ if (!status && !sect_exists)
nldr_obj->ovly_table[i].other_sects++;
}
}
/* Add to the list */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (*lst == NULL) {
/* First in the list */
*lst = new_sect;
if (psz_file_name == NULL)
status = -ENOMEM;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get the name of the library */
if (depth == 0) {
status =
NULL);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Open the library, don't load symbols */
status =
nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
kfree(psz_file_name);
/* Check to see if library not already loaded */
- if (DSP_SUCCEEDED(status) && root_prstnt) {
+ if (!status && root_prstnt) {
lib_status =
find_in_persistent_lib_array(nldr_node_obj, root->lib);
/* Close library */
return 0;
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Check for circular dependencies. */
for (i = 0; i < depth; i++) {
if (root->lib == lib_path[i]) {
}
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Add library to current path in dependency tree */
lib_path[depth] = root->lib;
depth++;
&uuid, &nd_libs, &np_libs, phase);
}
DBC_ASSERT(nd_libs >= np_libs);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (!(*nldr_node_obj->pf_phase_split))
np_libs = 0;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get the dependent library UUIDs */
status =
dcd_get_dep_libs(nldr_node_obj->
/*
* Recursively load dependent libraries.
*/
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
for (i = 0; i < nd_libs; i++) {
/* If root library is NOT persistent, and dep library
* is, then record it. If root library IS persistent,
persistent_dep_libs[i], lib_path,
phase, depth);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if ((status != 0) &&
!root_prstnt && persistent_dep_libs[i] &&
*nldr_node_obj->pf_phase_split) {
}
/* Now we can load the root library */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
new_attrs = nldr_obj->ldr_attrs;
new_attrs.sym_arg = root;
new_attrs.rmm_handle = nldr_node_obj;
/* reserve *//* align */
status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
&(ovly_section->sect_run_addr), true);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
ovly_section = ovly_section->next_sect;
alloc_num++;
} else {
if (other_ref && *other_ref == 0) {
/* 'Allocate' memory for other overlay sections
* (create phase) */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
ovly_section = other_sects_list;
while (ovly_section) {
/* page not supported *//* align */
ovly_section->size, 0,
&(ovly_section->sect_run_addr),
true);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
ovly_section = ovly_section->next_sect;
other_alloc++;
} else {
}
}
if (*ref_count == 0) {
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Load sections for this phase */
ovly_section = phase_sects;
- while (ovly_section && DSP_SUCCEEDED(status)) {
+ while (ovly_section && !status) {
bytes =
(*nldr_obj->ovly_fxn) (nldr_node_obj->
priv_ref,
}
}
if (other_ref && *other_ref == 0) {
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Load other sections (create phase) */
ovly_section = other_sects_list;
- while (ovly_section && DSP_SUCCEEDED(status)) {
+ while (ovly_section && !status) {
bytes =
(*nldr_obj->ovly_fxn) (nldr_node_obj->
priv_ref,
free_sects(nldr_obj, other_sects_list, other_alloc);
}
func_end:
- if (DSP_SUCCEEDED(status) && (ref_count != NULL)) {
+ if (!status && (ref_count != NULL)) {
*ref_count += 1;
if (other_ref)
*other_ref += 1;
continue;
status = rmm_alloc(rmm, i, word_size, align,
- dsp_address, false);
- if (DSP_SUCCEEDED(status)) {
+ dsp_address, false);
+ if (!status) {
/* Save segid for freeing later */
rmm_addr_obj->segid = i;
break;
status = rmm_alloc(rmm, i, word_size, align,
dsp_address, false);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Save segid */
rmm_addr_obj->segid = i;
break;
goto func_end;
status = proc_get_dev_object(hprocessor, &hdev_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_node_manager(hdev_obj, &hnode_mgr);
if (hnode_mgr == NULL)
status = -EPERM;
pnode->prio = attr_in->prio;
}
/* Create object to manage notifications */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (pnode->ntfy_obj)
status = -ENOMEM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
node_type = node_get_type(pnode);
/* Allocate dsp_streamconnect array for device, task, and
* dais socket nodes. */
status = -ENOMEM;
}
- if (DSP_SUCCEEDED(status) && (node_type == NODE_TASK ||
+ if (!status && (node_type == NODE_TASK ||
node_type == NODE_DAISSOCKET)) {
/* Allocate arrays for maintainig stream connections */
pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
status = -ENOMEM;
}
}
- if (DSP_SUCCEEDED(status) && (node_type != NODE_DEVICE)) {
+ if (!status && (node_type != NODE_DEVICE)) {
/* Create an event that will be posted when RMS_EXIT is
* received. */
pnode->sync_done = kzalloc(sizeof(struct sync_object),
else
status = -ENOMEM;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/*Get the shared mem mgr for this nodes dev object */
status = cmm_get_handle(hprocessor, &hcmm_mgr);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Allocate a SM addr translator for this node
* w/ deflt attr */
status = cmm_xlator_create(&pnode->xlator,
hcmm_mgr, NULL);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Fill in message args */
if ((pargs != NULL) && (pargs->cb_data > 0)) {
pmsg_args =
}
}
- if (DSP_SUCCEEDED(status) && node_type != NODE_DEVICE) {
+ if (!status && node_type != NODE_DEVICE) {
/* Create a message queue for this node */
intf_fxns = hnode_mgr->intf_fxns;
status =
pnode);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Create object for dynamic loading */
status = hnode_mgr->nldr_fxns.pfn_allocate(hnode_mgr->nldr_obj,
* STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
* GPP Address, Read the value in that address and override the
* stack_seg value in task args */
- if (DSP_SUCCEEDED(status) &&
+ if (!status &&
(char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name != NULL) {
if (strcmp((char *)
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Add the node to the node manager's list of allocated
* nodes. */
lst_init_elem((struct list_head *)pnode);
* (for overlay and dll) */
pnode->phase_split = true;
- if (DSP_SUCCEEDED(status))
+ if (!status)
*ph_node = pnode;
/* Notify all clients registered for DSP_NODESTATECHANGE. */
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
drv_insert_node_res_element(*ph_node, &node_res, pr_ctxt);
drv_proc_node_update_heap_status(node_res, true);
drv_proc_node_update_status(node_res, true);
}
DBC_ENSURE((DSP_FAILED(status) && (*ph_node == NULL)) ||
- (DSP_SUCCEEDED(status) && *ph_node));
+ (!status && *ph_node));
func_end:
dev_dbg(bridge, "%s: hprocessor: %p node_uuid: %p pargs: %p attr_in:"
" %p ph_node: %p status: 0x%x\n", __func__, hprocessor,
status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
pattr->segment_id, set_info);
}
- if (DSP_SUCCEEDED(status) && (!va_flag)) {
+ if (!status && (!va_flag)) {
if (pattr->segment_id != 1) {
/* Node supports single SM segment only. */
status = -EBADR;
status = -EPERM;
break;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* allocate physical buffer from seg_id in node's
* translator */
(void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
[RMSCHANGENODEPRIORITY],
hnode->node_env, prio);
}
- if (DSP_SUCCEEDED(status))
+ if (status >= 0)
NODE_SET_PRIORITY(hnode, prio);
}
(node2 != (struct node_object *)DSP_HGPPNODE && !node2))
status = -EFAULT;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* The two nodes must be on the same processor */
if (node1 != (struct node_object *)DSP_HGPPNODE &&
node2 != (struct node_object *)DSP_HGPPNODE &&
status = -EPERM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* node_get_type() will return NODE_GPP if hnode =
* DSP_HGPPNODE. */
node1_type = node_get_type(node1);
MAX_INPUTS(node2)))
status = -EINVAL;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/*
* Only the following types of connections are allowed:
* task/dais socket < == > task/dais socket
/*
* Check stream mode. Default is STRMMODE_PROCCOPY.
*/
- if (DSP_SUCCEEDED(status) && pattrs) {
+ if (!status && pattrs) {
if (pattrs->strm_mode != STRMMODE_PROCCOPY)
status = -EPERM; /* illegal stream mode */
if (node2_type != NODE_GPP && node_get_state(node2) != NODE_ALLOCATED)
status = -EBADR;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Check that stream indices for task and dais socket nodes
* are not already be used. (Device nodes checked later) */
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
}
}
/* Connecting two task nodes? */
- if (DSP_SUCCEEDED(status) && ((node1_type == NODE_TASK ||
+ if (!status && ((node1_type == NODE_TASK ||
node1_type == NODE_DAISSOCKET)
&& (node2_type == NODE_TASK
|| node2_type == NODE_DAISSOCKET))) {
}
}
/* Connecting task node to host? */
- if (DSP_SUCCEEDED(status) && (node1_type == NODE_GPP ||
+ if (!status && (node1_type == NODE_GPP ||
node2_type == NODE_GPP)) {
if (node1_type == NODE_GPP) {
chnl_mode = CHNL_MODETODSP;
}
status = -ENOMEM;
func_cont2:
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (node1 == (struct node_object *)DSP_HGPPNODE) {
node2->inputs[stream2].type = HOSTCONNECT;
node2->inputs[stream2].dev_id = chnl_id;
}
}
/* Connecting task node to device node? */
- if (DSP_SUCCEEDED(status) && ((node1_type == NODE_DEVICE) ||
+ if (!status && ((node1_type == NODE_DEVICE) ||
(node2_type == NODE_DEVICE))) {
if (node2_type == NODE_DEVICE) {
/* node1 == > device */
dev_node_obj->device_owner = hnode;
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Fill in create args */
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
node1->create_args.asa.task_arg_obj.num_outputs++;
if (node_get_state(hnode) != NODE_ALLOCATED)
status = -EBADR;
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = proc_get_processor_id(pnode->hprocessor, &proc_id);
if (DSP_FAILED(status))
hnode->num_outputs - 1))
status = -ENOTCONN;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* If node's create function is not loaded, load it */
/* Boost the OPP level to max level that DSP can be requested */
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
status = hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
NLDR_CREATE);
/* Get address of node's create function */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
hnode->loaded = true;
if (node_type != NODE_DEVICE) {
status = get_fxn_address(hnode, &ul_create_fxn,
(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
#endif
/* Get address of iAlg functions, if socket node */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (node_type == NODE_DAISSOCKET) {
status = hnode_mgr->nldr_fxns.pfn_get_fxn_addr
(hnode->nldr_node_obj,
}
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (node_type != NODE_DEVICE) {
status = disp_node_create(hnode_mgr->disp_obj, hnode,
hnode_mgr->ul_fxn_addrs
ul_create_fxn,
&(hnode->create_args),
&(hnode->node_env));
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
/* Set the message queue id to the node env
* pointer */
intf_fxns = hnode_mgr->intf_fxns;
__func__, status1);
func_cont2:
/* Update node state and node manager state */
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
NODE_SET_STATE(hnode, NODE_CREATED);
hnode_mgr->num_created++;
goto func_cont;
/* Free access to node dispatcher */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
status = -ENOMEM;
}
/* get devNodeType */
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = dev_get_dev_type(hdev_obj, &dev_type);
/* Create the DCD Manager */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status =
dcd_create_manager(sz_zl_file, &node_mgr_obj->hdcd_mgr);
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = get_proc_props(node_mgr_obj, hdev_obj);
}
/* Create NODE Dispatcher */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
disp_attr_obj.ul_chnl_offset = node_mgr_obj->ul_chnl_offset;
disp_attr_obj.ul_chnl_buf_size = node_mgr_obj->ul_chnl_buf_size;
disp_attr_obj.proc_family = node_mgr_obj->proc_family;
&disp_attr_obj);
}
/* Create a STRM Manager */
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
/* Get msg_ctrl queue manager */
dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
node_mgr_obj->ul_chnl_offset + 1);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* NO RM Server on the IVA */
if (dev_type != IVA_UNIT) {
/* Get addresses of any RMS functions loaded */
}
/* Get loader functions and create loader */
- if (DSP_SUCCEEDED(status))
+ if (!status)
node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
nldr_attrs_obj.pfn_ovly = ovly;
nldr_attrs_obj.pfn_write = mem_write;
nldr_attrs_obj.us_dsp_word_size = node_mgr_obj->udsp_word_size;
hdev_obj,
&nldr_attrs_obj);
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
*node_man = node_mgr_obj;
else
delete_node_mgr(node_mgr_obj);
DBC_ENSURE((DSP_FAILED(status) && (*node_man == NULL)) ||
- (DSP_SUCCEEDED(status) && *node_man));
+ (!status && *node_man));
return status;
}
status =
hnode_mgr->nldr_fxns.
pfn_load(hnode->nldr_node_obj, NLDR_DELETE);
- if (DSP_SUCCEEDED(status))
+ if (!status)
hnode->loaded = true;
else
pr_err("%s: fail - load delete code:"
}
}
func_cont1:
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Unblock a thread trying to terminate the node */
(void)sync_set_event(hnode->sync_done);
if (proc_id == DSP_UNIT) {
DELETEPHASE);
} else if (proc_id == IVA_UNIT)
ul_delete_fxn = (u32) hnode->node_env;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = proc_get_state(hprocessor,
&proc_state,
sizeof(struct
}
status = proc_get_processor_id(pnode->hprocessor, &proc_id);
if (proc_id == DSP_UNIT) {
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (pattr == NULL) {
/* set defaults */
pattr = &node_dfltbufattrs;
else
*nldr_ovlyobj = node_mgr_obj->nldr_obj;
- DBC_ENSURE(DSP_SUCCEEDED(status) || ((nldr_ovlyobj != NULL) &&
- (*nldr_ovlyobj == NULL)));
+ DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
return status;
}
if (proc_id == IVA_UNIT)
status = -ENOSYS;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
hnode_mgr = hnode->hnode_mgr;
/* Enter critical section */
hnode->node_env, NODE_SUSPENDEDPRI);
/* Update state */
- if (DSP_SUCCEEDED(status))
+ if (status >= 0)
NODE_SET_STATE(hnode, NODE_PAUSED);
func_cont:
/* End of sync_enter_cs */
/* Leave critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
proc_notify_clients(hnode->hprocessor,
DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
node_type != NODE_DAISSOCKET)
status = -EPERM;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Check node state. Can't send messages to a node after
* we've sent the RMS_EXIT command. There is still the
* possibility that node_terminate can be called after we've
status = -ESRCH;
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
intf_fxns = hnode_mgr->intf_fxns;
status = (*intf_fxns->pfn_msg_put) (hnode->msg_queue_obj,
&new_msg, utimeout);
if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
status = -EINVAL;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (event_mask == DSP_NODESTATECHANGE) {
status = ntfy_register(hnode->ntfy_obj, hnotification,
event_mask & DSP_NODESTATECHANGE,
if (state != NODE_CREATED && state != NODE_PAUSED)
status = -EBADR;
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = proc_get_processor_id(pnode->hprocessor, &proc_id);
if (DSP_FAILED(status))
status =
hnode_mgr->nldr_fxns.pfn_load(hnode->nldr_node_obj,
NLDR_EXECUTE);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
hnode->loaded = true;
} else {
pr_err("%s: fail - load execute code: 0x%x\n",
__func__, status);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get address of node's execute function */
if (proc_id == IVA_UNIT)
ul_execute_fxn = (u32) hnode->node_env;
EXECUTEPHASE);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
ul_fxn_addr = hnode_mgr->ul_fxn_addrs[RMSEXECUTENODE];
status =
disp_node_run(hnode_mgr->disp_obj, hnode,
}
func_cont1:
/* Update node state. */
- if (DSP_SUCCEEDED(status))
+ if (status >= 0)
NODE_SET_STATE(hnode, NODE_RUNNING);
else /* Set state back to previous value */
NODE_SET_STATE(hnode, state);
/*End of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
- if (DSP_SUCCEEDED(status)) {
+ if (status >= 0) {
proc_notify_clients(hnode->hprocessor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
}
status = proc_get_processor_id(pnode->hprocessor, &proc_id);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
hnode_mgr = hnode->hnode_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Check node state */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
/* end of sync_enter_cs */
mutex_unlock(&hnode_mgr->node_mgr_lock);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/*
* Send exit message. Do not change state to NODE_DONE
* here. That will be done in callback.
}
}
func_cont:
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Enter CS before getting exit status, in case node was
* deleted. */
mutex_lock(&hnode_mgr->node_mgr_lock);
status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
DSP_DCDNODETYPE, dcd_prop);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
hnode->ntype = node_type = pndb_props->ntype;
/* Create UUID value to set in registry. */
}
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Fill in create args that come from NDB */
if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
if (!pbridge_context)
status = -EFAULT;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
host_res = pbridge_context->resources;
if (!host_res)
return -EPERM;
(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
(struct dcd_genericobj *)&dcd_node_props);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*node_props = dcd_node_props.ndb_props;
kfree(dcd_node_props.pstr_create_phase_fxn);
/* Call new MemCopy function */
intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->hdev_obj, &hbridge_context);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status =
(*intf_fxns->pfn_brd_mem_copy) (hbridge_context,
dsp_run_addr, dsp_load_addr,
ul_num_bytes, (u32) mem_space);
- if (DSP_SUCCEEDED(status))
+ if (!status)
ul_bytes = ul_num_bytes;
else
pr_debug("%s: failed to copy brd memory, status 0x%x\n",
status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
offset_range, sym_addr_output, sym_name);
- if (DSP_SUCCEEDED(status))
+ if (!status)
break;
node_obj = (struct node_object *) (node_obj->list_elem.next);
/* Get the Driver and Manager Object Handles */
status = cfg_get_object((u32 *) &hdrv_obj, REG_DRV_OBJECT);
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = cfg_get_object((u32 *) &hmgr_obj, REG_MGR_OBJECT);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get the Device Object */
status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj);
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = dev_get_dev_type(hdev_obj, &dev_type);
if (DSP_FAILED(status))
p_proc_object->utimeout = PROC_DFLT_TIMEOUT;
status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_bridge_context(hdev_obj,
&p_proc_object->hbridge_context);
if (DSP_FAILED(status))
else
status = -ENOMEM;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Insert the Processor Object into the DEV List.
* Return handle to this Processor Object:
* Find out if the Device is already attached to a
(u32) p_proc_object,
&p_proc_object->
is_already_attached);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (p_proc_object->is_already_attached)
status = 0;
} else {
kfree(p_proc_object);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*ph_processor = (void *)p_proc_object;
pr_ctxt->hprocessor = *ph_processor;
(void)proc_notify_clients(p_proc_object,
}
func_end:
DBC_ENSURE((status == -EPERM && *ph_processor == NULL) ||
- (DSP_SUCCEEDED(status) && p_proc_object) ||
+ (!status && p_proc_object) ||
(status == 0 && p_proc_object));
return status;
p_proc_object->hdev_obj = hdev_obj;
p_proc_object->hmgr_obj = hmgr_obj;
status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns);
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = dev_get_bridge_context(hdev_obj,
&p_proc_object->hbridge_context);
if (DSP_FAILED(status))
p_proc_object->processor_id = dev_type;
status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file),
sz_exec_file);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
argv[0] = sz_exec_file;
argv[1] = NULL;
/* ...and try to load it: */
status = proc_load(p_proc_object, 1, (const char **)argv, NULL);
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = proc_start(p_proc_object);
}
kfree(p_proc_object->psz_last_coff);
/* timeout = arg->cb_data; */
status = pwr_wake_dsp(timeout);
} else
- if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_dev_cntrl)
+ if (!((*p_proc_object->intf_fxns->pfn_dev_cntrl)
(p_proc_object->hbridge_context, dw_cmd,
arg))) {
status = 0;
DBC_REQUIRE(pu_allocated != NULL);
if (p_proc_object) {
- if (DSP_SUCCEEDED(dev_get_node_manager(p_proc_object->hdev_obj,
+ if (!(dev_get_node_manager(p_proc_object->hdev_obj,
&hnode_mgr))) {
if (hnode_mgr) {
status = node_enum_nodes(hnode_mgr, node_tab,
}
status = node_get_nldr_obj(hnode_mgr, &nldr_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = nldr_get_rmm_manager(nldr_obj, &rmm);
if (rmm) {
if (!rmm_stat(rmm,
status = -EFAULT;
}
- DBC_ENSURE((DSP_SUCCEEDED(status) && *device_obj != NULL) ||
+ DBC_ENSURE((!status && *device_obj != NULL) ||
(DSP_FAILED(status) && *device_obj == NULL));
return status;
/* First, retrieve BRD state information */
status = (*p_proc_object->intf_fxns->pfn_brd_status)
(p_proc_object->hbridge_context, &brd_status);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
switch (brd_status) {
case BRD_STOPPED:
proc_state_obj->proc_state = PROC_STOPPED;
/* Get the DCD Handle */
status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
(u32 *) &hdcd_handle);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Before proceeding with new load,
* check if a previously registered COFF
* exists.
} else {
status = -ENOMEM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Auto-register data base */
/* Get the DCD Handle */
status = mgr_get_dcd_handle(p_proc_object->hmgr_obj,
(u32 *) &hdcd_handle);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Auto register nodes in specified COFF
* file. If registration did not fail,
* (status = 0 or -EACCES)
}
}
/* Update shared memory address and size */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Create the message manager. This must be done
* before calling the IOOnLoaded function. */
dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
if (!hmsg_mgr) {
status = msg_create(&hmsg_mgr, p_proc_object->hdev_obj,
(msg_onexit) node_on_exit);
- DBC_ASSERT(DSP_SUCCEEDED(status));
+ DBC_ASSERT(!status);
dev_set_msg_mgr(p_proc_object->hdev_obj, hmsg_mgr);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Set the Device object's message manager */
status = dev_get_io_mgr(p_proc_object->hdev_obj, &hio_mgr);
if (hio_mgr)
else
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Now, attempt to load an exec: */
/* Boost the OPP level to Maximum level supported by baseport */
#endif
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Update the Processor status to loaded */
status = (*p_proc_object->intf_fxns->pfn_brd_set_state)
(p_proc_object->hbridge_context, BRD_LOADED);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
p_proc_object->proc_state = PROC_LOADED;
if (p_proc_object->ntfy_obj)
proc_notify_clients(p_proc_object,
DSP_PROCESSORSTATECHANGE);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = proc_get_processor_id(hprocessor, &proc_id);
if (proc_id == DSP_UNIT) {
/* Use all available DSP address space after EXTMEM
* for DMM */
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = cod_get_sym_value(cod_mgr, EXTEND,
&dw_ext_end);
/* Reset DMM structs and add an initial free chunk */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status =
dev_get_dmm_mgr(p_proc_object->hdev_obj,
&dmm_mgr);
/* Restore the original argv[0] */
kfree(new_envp);
user_args[0] = pargv0;
- if (DSP_SUCCEEDED(status)) {
- if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_status)
+ if (!status) {
+ if (!((*p_proc_object->intf_fxns->pfn_brd_status)
(p_proc_object->hbridge_context, &brd_state))) {
pr_info("%s: Processor Loaded %s\n", __func__, pargv0);
kfree(drv_datap->base_img);
if (DSP_FAILED(status))
pr_err("%s: Processor failed to load\n", __func__);
- DBC_ENSURE((DSP_SUCCEEDED(status)
+ DBC_ENSURE((!status
&& p_proc_object->proc_state == PROC_LOADED)
|| DSP_FAILED(status));
#ifdef OPT_LOAD_TIME_INSTRUMENTATION
status = -EFAULT;
/* Add mapping to the page tables. */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Mapped address = MSB of VA | LSB of PA */
tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
(p_proc_object->hbridge_context, pa_align, va_align,
size_align, ul_map_attr, map_obj->pages);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Mapped address = MSB of VA | LSB of PA */
*pp_map_addr = (void *) tmp_addr;
} else {
if (notify_type != DSP_SIGNALEVENT)
status = -EINVAL;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* If event mask is not DSP_SYSERROR, DSP_MMUFAULT,
* or DSP_PWRERROR then register event immediately. */
if (event_mask &
/* Call dev_create2 */
status = dev_create2(p_proc_object->hdev_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
p_proc_object->proc_state = PROC_RUNNING;
/* Deep sleep switces off the peripheral clocks.
* we just put the DSP CPU in idle in the idle loop.
p_proc_object->proc_state = PROC_STOPPED;
}
func_cont:
- if (DSP_SUCCEEDED(status)) {
- if (DSP_SUCCEEDED((*p_proc_object->intf_fxns->pfn_brd_status)
+ if (!status) {
+ if (!((*p_proc_object->intf_fxns->pfn_brd_status)
(p_proc_object->hbridge_context, &brd_state))) {
pr_info("%s: dsp in running state\n", __func__);
DBC_ASSERT(brd_state != BRD_HIBERNATION);
}
func_end:
- DBC_ENSURE((DSP_SUCCEEDED(status) && p_proc_object->proc_state ==
+ DBC_ENSURE((!status && p_proc_object->proc_state ==
PROC_RUNNING) || DSP_FAILED(status));
return status;
}
}
/* check if there are any running nodes */
status = dev_get_node_manager(p_proc_object->hdev_obj, &hnode_mgr);
- if (DSP_SUCCEEDED(status) && hnode_mgr) {
+ if (!status && hnode_mgr) {
status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size,
&num_nodes, &nodes_allocated);
if ((status == -EINVAL) || (nodes_allocated > 0)) {
status =
(*p_proc_object->intf_fxns->
pfn_brd_stop) (p_proc_object->hbridge_context);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
dev_dbg(bridge, "%s: processor in standby mode\n", __func__);
p_proc_object->proc_state = PROC_STOPPED;
/* Destory the Node Manager, msg_ctrl Manager */
- if (DSP_SUCCEEDED(dev_destroy2(p_proc_object->hdev_obj))) {
+ if (!(dev_destroy2(p_proc_object->hdev_obj))) {
/* Destroy the msg_ctrl by calling msg_delete */
dev_get_msg_mgr(p_proc_object->hdev_obj, &hmsg_mgr);
if (hmsg_mgr) {
msg_delete(hmsg_mgr);
dev_set_msg_mgr(p_proc_object->hdev_obj, NULL);
}
- if (DSP_SUCCEEDED
- ((*p_proc_object->
+ if (!((*p_proc_object->
intf_fxns->pfn_brd_status) (p_proc_object->
hbridge_context,
&brd_state)))
*/
status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
/* Remove mapping from the page tables. */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
(p_proc_object->hbridge_context, va_align, size_align);
}
/* This is needed only when Device is loaded when it is
* already 'ACTIVE' */
/* Destory the Node Manager, msg_ctrl Manager */
- if (DSP_SUCCEEDED(dev_destroy2(proc_obj->hdev_obj))) {
+ if (!dev_destroy2(proc_obj->hdev_obj)) {
/* Destroy the msg_ctrl by calling msg_delete */
dev_get_msg_mgr(proc_obj->hdev_obj, &hmsg_mgr);
if (hmsg_mgr) {
}
}
/* Place the Board in the Monitor State */
- if (DSP_SUCCEEDED((*proc_obj->intf_fxns->pfn_brd_monitor)
+ if (!((*proc_obj->intf_fxns->pfn_brd_monitor)
(proc_obj->hbridge_context))) {
status = 0;
- if (DSP_SUCCEEDED((*proc_obj->intf_fxns->pfn_brd_status)
+ if (!((*proc_obj->intf_fxns->pfn_brd_status)
(proc_obj->hbridge_context, &brd_state)))
DBC_ASSERT(brd_state == BRD_IDLE);
}
- DBC_ENSURE((DSP_SUCCEEDED(status) && brd_state == BRD_IDLE) ||
+ DBC_ENSURE((!status && brd_state == BRD_IDLE) ||
DSP_FAILED(status));
return status;
}
hdev_obj != NULL;
hdev_obj = (struct dev_object *)drv_get_next_dev_object
((u32) hdev_obj)) {
- if (DSP_SUCCEEDED(dev_get_bridge_context(hdev_obj,
+ if (!(dev_get_bridge_context(hdev_obj,
(struct bridge_dev_context
**)&dw_context))) {
- if (DSP_SUCCEEDED
- (dev_get_intf_fxns
- (hdev_obj,
+ if (!(dev_get_intf_fxns(hdev_obj,
(struct bridge_drv_interface **)&intf_fxns))) {
status =
(*intf_fxns->pfn_dev_cntrl) (dw_context,
hdev_obj != NULL;
hdev_obj = (struct dev_object *)drv_get_next_dev_object
((u32) hdev_obj)) {
- if (DSP_SUCCEEDED(dev_get_bridge_context(hdev_obj,
+ if (!(dev_get_bridge_context(hdev_obj,
(struct bridge_dev_context
**)&dw_context))) {
- if (DSP_SUCCEEDED
- (dev_get_intf_fxns
- (hdev_obj,
+ if (!(dev_get_intf_fxns(hdev_obj,
(struct bridge_drv_interface **)&intf_fxns))) {
status =
(*intf_fxns->pfn_dev_cntrl) (dw_context,
hdev_obj != NULL;
hdev_obj = (struct dev_object *)drv_get_next_dev_object
((u32) hdev_obj)) {
- if (DSP_SUCCEEDED(dev_get_bridge_context(hdev_obj,
+ if (!(dev_get_bridge_context(hdev_obj,
(struct bridge_dev_context
**)&dw_context))) {
- if (DSP_SUCCEEDED
- (dev_get_intf_fxns
- (hdev_obj,
+ if (!(dev_get_intf_fxns(hdev_obj,
(struct bridge_drv_interface **)&intf_fxns))) {
status =
(*intf_fxns->pfn_dev_cntrl) (dw_context,
(struct list_head *)
sect);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* No overlap - allocate list element for new section. */
new_sect = kzalloc(sizeof(struct rmm_ovly_sect), GFP_KERNEL);
if (new_sect == NULL) {
}
func_cont:
/* Initialize overlay memory list */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
target->ovly_list = kzalloc(sizeof(struct lst_list),
GFP_KERNEL);
if (target->ovly_list == NULL)
INIT_LIST_HEAD(&target->ovly_list->head);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*target_obj = target;
} else {
*target_obj = NULL;
}
- DBC_ENSURE((DSP_SUCCEEDED(status) && *target_obj)
+ DBC_ENSURE((!status && *target_obj)
|| (DSP_FAILED(status) && *target_obj == NULL));
return status;
status =
(*intf_fxns->pfn_chnl_get_info) (stream_obj->chnl_obj,
&chnl_info_obj);
- DBC_ASSERT(DSP_SUCCEEDED(status));
+ DBC_ASSERT(!status);
if (chnl_info_obj.cio_cs > 0 || chnl_info_obj.cio_reqs > 0)
status = -EPIPE;
strm_mgr_obj->dev_obj = dev_obj;
/* Get Channel manager and Bridge function interface */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_chnl_mgr(dev_obj, &(strm_mgr_obj->hchnl_mgr));
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
(void)dev_get_intf_fxns(dev_obj,
&(strm_mgr_obj->intf_fxns));
DBC_ASSERT(strm_mgr_obj->intf_fxns != NULL);
}
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
*strm_man = strm_mgr_obj;
else
kfree(strm_mgr_obj);
- DBC_ENSURE((DSP_SUCCEEDED(status) && *strm_man) ||
+ DBC_ENSURE((!status && *strm_man) ||
(DSP_FAILED(status) && *strm_man == NULL));
return status;
if (!stream_obj)
status = -EFAULT;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
for (i = 0; i < num_bufs; i++) {
DBC_ASSERT(stream_obj->xlator != NULL);
status =
status = -ESRCH;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = (*intf_fxns->pfn_chnl_add_io_req)
(stream_obj->chnl_obj, pbuf, ul_bytes, ul_buf_size,
(u32) tmp_buf, dw_arg);
/* Get the channel id from the node (set in node_connect()) */
status = node_get_channel_id(hnode, dir, index, &ul_chnl_id);
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = node_get_strm_mgr(hnode, &strm_mgr_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
strm_obj = kzalloc(sizeof(struct strm_object), GFP_KERNEL);
if (strm_obj == NULL) {
status = -ENOMEM;
DBC_ASSERT(strm_obj->strm_mode != STRMMODE_LDMA);
/* Get the shared mem mgr for this streams dev object */
status = dev_get_cmm_mgr(strm_mgr_obj->dev_obj, &hcmm_mgr);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/*Allocate a SM addr translator for this strm. */
status = cmm_xlator_create(&strm_obj->xlator, hcmm_mgr, NULL);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
DBC_ASSERT(strm_obj->segment_id > 0);
/* Set translators Virt Addr attributes */
status = cmm_xlator_info(strm_obj->xlator,
}
}
func_cont:
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Open channel */
chnl_mode = (dir == DSP_TONODE) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
}
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*strm_objct = strm_obj;
drv_proc_insert_strm_res_element(*strm_objct, &hstrm_res,
pr_ctxt);
}
/* ensure we return a documented error code */
- DBC_ENSURE((DSP_SUCCEEDED(status) && *strm_objct) ||
+ DBC_ENSURE((!status && *strm_objct) ||
(*strm_objct == NULL && (status == -EFAULT ||
status == -EPERM
|| status == -EINVAL)));
(*intf_fxns->pfn_chnl_get_ioc) (stream_obj->chnl_obj,
stream_obj->utimeout,
&chnl_ioc_obj);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
*nbytes = chnl_ioc_obj.byte_size;
if (buff_size)
*buff_size = chnl_ioc_obj.buf_size;
}
}
/* Translate zerocopy buffer if channel not canceled. */
- if (DSP_SUCCEEDED(status)
+ if (!status
&& (!CHNL_IS_IO_CANCELLED(chnl_ioc_obj))
&& (stream_obj->strm_mode == STRMMODE_ZEROCOPY)) {
/*
}
func_end:
/* ensure we return a documented return code */
- DBC_ENSURE(DSP_SUCCEEDED(status) || status == -EFAULT ||
+ DBC_ENSURE(!status || status == -EFAULT ||
status == -ETIME || status == -ESRCH ||
status == -EPERM);
status = -ENOSYS;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
intf_fxns = stream_obj->strm_mgr_obj->intf_fxns;
status =
hnotification);
}
/* ensure we return a documented return code */
- DBC_ENSURE(DSP_SUCCEEDED(status) || status == -EFAULT ||
+ DBC_ENSURE(!status || status == -EFAULT ||
status == -ETIME || status == -ESRCH ||
status == -ENOSYS || status == -EPERM);
return status;
}
}
- if (DSP_SUCCEEDED(status) && utimeout > 0 && *pmask == 0) {
+ if (!status && utimeout > 0 && *pmask == 0) {
/* Non-zero timeout */
sync_events = kmalloc(strms * sizeof(struct sync_object *),
GFP_KERNEL);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status =
sync_wait_on_multiple_events(sync_events, strms,
utimeout, &index);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Since we waited on the event, we have to
* reset it */
sync_set_event(sync_events[index]);
func_end:
kfree(sync_events);
- DBC_ENSURE((DSP_SUCCEEDED(status) && (*pmask != 0 || utimeout == 0)) ||
+ DBC_ENSURE((!status && (*pmask != 0 || utimeout == 0)) ||
(DSP_FAILED(status) && *pmask == 0));
return status;
status = (*intf_fxns->pfn_chnl_close)
(stream_obj->chnl_obj);
/* Free all SM address translator resources */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (stream_obj->xlator) {
/* force free */
(void)cmm_xlator_delete(stream_obj->