omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX);
if (pchnl->chnl_type == CHNL_PCPY) {
/* This is a processor-copy channel. */
- if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
+ if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) {
/* Check buffer size on output channels for fit. */
if (byte_size >
io_buf_size(pchnl->chnl_mgr_obj->hio_mgr))
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get a free chirp: */
chnl_packet_obj =
(struct chnl_irp *)lst_get_head(pchnl->free_packets_list);
status = -EIO;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Enqueue the chirp on the chnl's IORequest queue: */
chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf =
host_buf;
status = bridge_chnl_cancel_io(chnl_obj);
}
func_cont:
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Assert I/O on this channel is now cancelled: Protects
* from io_dpc. */
DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL));
chnl_mgr_obj->dw_output_mask = 0;
chnl_mgr_obj->dw_last_output = 0;
chnl_mgr_obj->hdev_obj = hdev_obj;
- if (DSP_SUCCEEDED(status))
- spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
+ spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock);
} else {
status = -ENOMEM;
}
} else {
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Note: Currently, if another thread continues to add IO
* requests to this channel, this function will continue to
* flush all such queued IO requests. */
&& (pchnl->chnl_type == CHNL_PCPY)) {
/* Wait for IO completions, up to the specified
* timeout: */
- while (!LST_IS_EMPTY(pchnl->pio_requests) &&
- DSP_SUCCEEDED(status)) {
+ while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) {
status = bridge_chnl_get_ioc(chnl_obj,
timeout, &chnl_ioc_obj);
if (DSP_FAILED(status))
else
status = -ENOMEM;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (pchnl->ntfy_obj)
status = -ENOMEM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (pchnl->pio_completions && pchnl->pio_requests &&
pchnl->free_packets_list) {
/* Initialize CHNL object fields: */
*chnl = pchnl;
}
func_end:
- DBC_ENSURE((DSP_SUCCEEDED(status) && pchnl) || (*chnl == NULL));
+ DBC_ENSURE((!status && pchnl) || (*chnl == NULL));
return status;
}
status = -EPERM;
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
set_dsp_clk_active(&dsp_clocks, clk_id);
out:
status = -EPERM;
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
set_dsp_clk_inactive(&dsp_clocks, clk_id);
out:
spin_lock_init(&pio_mgr->dpc_lock);
- if (DSP_SUCCEEDED(status))
- status = dev_get_dev_node(hdev_obj, &dev_node_obj);
+ status = dev_get_dev_node(hdev_obj, &dev_node_obj);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
pio_mgr->hbridge_context = hbridge_context;
pio_mgr->shared_irq = mgr_attrts->irq_shared;
if (dsp_wdt_init())
dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
__func__, (ul_shm_length - sizeof(struct shm)));
- if (DSP_SUCCEEDED(status)) {
- /* Get start and length of message part of shared memory */
- status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
+ /* Get start and length of message part of shared memory */
+ status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
&ul_msg_base);
- }
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
&ul_msg_limit);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (ul_msg_limit <= ul_msg_base) {
status = -EINVAL;
} else {
} else {
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
status =
cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
if (DSP_FAILED(status))
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status =
cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
if (DSP_FAILED(status))
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
if (DSP_FAILED(status))
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get memory reserved in host resources */
(void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
&hio_mgr->ext_proc_info,
goto func_end;
}
/* Get end of 1st SM Heap region */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get start and length of message part of shared memory */
status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
&shm0_end);
}
}
/* Start of Gpp reserved region */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Get start and length of message part of shared memory */
status =
cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
}
}
/* Register with CMM */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
CMM_ALLSEGMENTS);
}
}
/* Register new SM region(s) */
- if (DSP_SUCCEEDED(status) && (shm0_end - ul_shm0_base) > 0) {
+ if (!status && (shm0_end - ul_shm0_base) > 0) {
/* Calc size (bytes) of SM the GPP can alloc from */
ul_rsrvd_size =
(shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
} else {
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
status =
cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
- if (DSP_SUCCEEDED(status))
+ if (!status)
/* trace_cur_pos will hold the address of a DSP pointer */
status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
&trace_cur_pos);
status = -EFAULT;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_node_manager(dev_object, &node_mgr);
if (!node_mgr) {
pr_debug("%s: Failed on dev_get_node_manager.\n",
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Look for SYS_PUTCBEG/SYS_PUTCEND: */
status =
cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
pr_debug("%s: Failed on cod_get_sym_value.\n",
__func__);
}
- if (DSP_SUCCEEDED(status))
+ if (!status)
status = dev_get_intf_fxns(dev_object, &intf_fxns);
/*
* Check for the "magic number" in the trace buffer. If it has
*/
mmu_fault_dbg_info.head[0] = 0;
mmu_fault_dbg_info.head[1] = 0;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
poll_cnt = 0;
while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
total_size = mmu_fault_dbg_info.size;
/* Limit the size in case DSP went crazy */
if (total_size > MAX_MMU_DBGBUFF)
else
sync_init_event(msg_mgr_obj->sync_event);
- if (DSP_SUCCEEDED(status))
+ if (!status)
*msg_man = msg_mgr_obj;
else
delete_msg_mgr(msg_mgr_obj);
/* Create event that will be signalled when a message from
* the DSP is available. */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
msg_q->sync_event = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (msg_q->sync_event)
}
/* Create a notification list for message ready notification. */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (msg_q->ntfy_obj)
* unblock threads in MSG_Put() or MSG_Get(). sync_done_ack
* will be set by the unblocked thread to signal that it
* is unblocked and will no longer reference the object. */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
msg_q->sync_done = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (msg_q->sync_done)
status = -ENOMEM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (msg_q->sync_done_ack)
status = -ENOMEM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Enter critical section */
spin_lock_bh(&hmsg_mgr->msg_mgr_lock);
/* Initialize message frames and put in appropriate queues */
- for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) {
+ for (i = 0; i < max_msgs && !status; i++) {
status = add_new_msg(hmsg_mgr->msg_free_list);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
num_allocated++;
status = add_new_msg(msg_q->msg_free_list);
}
}
/* Exit critical section */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
- if (DSP_SUCCEEDED(status) && !got_msg) {
+ if (!status && !got_msg) {
/* Wait til message is available, timeout, or done. We don't
* have to schedule the DPC, since the DSP will send messages
* when they are available. */
(void)sync_set_event(msg_queue_obj->sync_done_ack);
status = -EPERM;
} else {
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
DBC_ASSERT(!LST_IS_EMPTY
(msg_queue_obj->msg_used_list));
/* Get msg from used list */
spin_unlock_bh(&hmsg_mgr->msg_mgr_lock);
}
- if (DSP_SUCCEEDED(status) && !put_msg) {
+ if (!status && !put_msg) {
/* Wait til a free message frame is available, timeout,
* or done */
syncs[0] = hmsg_mgr->sync_event;
*/
static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
{
- int status = 0;
struct bridge_dev_context *dev_context = dev_ctxt;
u32 temp;
struct dspbridge_platform_data *pdata =
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
dsp_clk_enable(DSP_CLK_IVA2);
- if (DSP_SUCCEEDED(status)) {
- /* set the device state to IDLE */
- dev_context->dw_brd_state = BRD_IDLE;
- }
- return status;
+ /* set the device state to IDLE */
+ dev_context->dw_brd_state = BRD_IDLE;
+
+ return 0;
}
/*
} else
__raw_writel(0xffffffff, dw_sync_addr);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
resources = dev_context->resources;
if (!resources)
status = -EPERM;
/* Assert RST1 i.e only the RST only for DSP megacell */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
OMAP2_RM_RSTCTRL);
OMAP343X_CONTROL_IVA2_BOOTMOD));
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Reset and Unreset the RST2, so that BOOTADDR is copied to
* IVA2 SYSC register */
(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
/* Lock the above TLB entries and get the BIOS and load monitor timer
* information */
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
hw_mmu_ttb_set(resources->dw_dmmu_base,
&ul_load_monitor_timer);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (ul_load_monitor_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_load_monitor_timer;
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (ul_bios_gp_timer != 0xFFFF) {
clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
ul_bios_gp_timer;
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Set the DSP clock rate */
(void)dev_get_symbol(dev_context->hdev_obj,
"_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
}
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
/*PM_IVA2GRPSEL_PER = 0xC0;*/
else
status = -ENOMEM;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
spin_lock_init(&pt_attrs->pg_lock);
dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
* resources struct */
dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
dev_context->hdev_obj = hdev_obj;
/* Store current board state. */
dev_context->dw_brd_state = BRD_STOPPED;
u32 total_bytes = ul_num_bytes;
u8 host_buf[BUFFERSIZE];
struct bridge_dev_context *dev_context = dev_ctxt;
- while ((total_bytes > 0) && DSP_SUCCEEDED(status)) {
+ while (total_bytes > 0 && !status) {
copy_bytes =
total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
/* Read from External memory */
status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
copy_bytes, mem_type);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if (dest_addr < (dev_context->dw_dsp_start_add +
dev_context->dw_internal_size)) {
/* Write to Internal memory */
u32 ul_remain_bytes = 0;
u32 ul_bytes = 0;
ul_remain_bytes = ul_num_bytes;
- while (ul_remain_bytes > 0 && DSP_SUCCEEDED(status)) {
+ while (ul_remain_bytes > 0 && !status) {
ul_bytes =
ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
if (dsp_addr < (dev_context->dw_dsp_start_add +
}
up_read(&mm->mmap_sem);
func_cont:
- if (DSP_SUCCEEDED(status)) {
- status = 0;
- } else {
+ if (status) {
/*
* Roll out the mapped pages incase it failed in middle of
* mapping
"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
ul_num_bytes, l1_base_va, pte_addr_l1);
- while (rem_bytes && (DSP_SUCCEEDED(status))) {
+ while (rem_bytes && !status) {
u32 va_curr_orig = va_curr;
/* Find whether the L1 PTE points to a valid L2 PT */
pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
* entry. Similar checking is done for L1 PTEs too
* below
*/
- while (rem_bytes_l2 && (DSP_SUCCEEDED(status))) {
+ while (rem_bytes_l2 && !status) {
pte_val = *(u32 *) pte_addr_l2;
pte_size = hw_mmu_pte_size_l2(pte_val);
/* va_curr aligned to pte_size? */
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
- while (num_bytes && DSP_SUCCEEDED(status)) {
+ while (num_bytes && !status) {
/* To find the max. page size with which both PA & VA are
* aligned */
all_bits = pa_curr | va_curr;
* Should not overwrite it. */
status = -EPERM;
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
pg_tbl_va = l2_base_va;
if (size == HW_PAGE_SIZE64KB)
pt->pg_info[l2_page_num].num_entries += 16;
}
spin_unlock(&pt->pg_lock);
}
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
pg_tbl_va, pa, va, size);
dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
va_curr = ul_mpu_addr;
page[0] = vmalloc_to_page((void *)va_curr);
pa_next = page_to_phys(page[0]);
- while (DSP_SUCCEEDED(status) && (i < num_pages)) {
+ while (!status && (i < num_pages)) {
/*
* Reuse pa_next from the previous iteraion to avoid
* an extra va2pa call
hw_attrs);
va_curr += size_curr;
}
- if (DSP_SUCCEEDED(status))
- status = 0;
- else
- status = -EPERM;
-
/*
* In any case, flush the TLB
* This is called from here instead from pte_update to avoid unnecessary
/* Disable wdt on hibernation. */
dsp_wdt_enable(false);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
/* Update the Bridger Driver state */
dev_context->dw_brd_state = BRD_DSP_HIBERNATION;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
false);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
(dev_context->dsp_per_clks) &=
(~((u32) (1 << bpwr_clks[clk_id_index].clk)));
}
case BPWR_ENABLE_CLOCK:
status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
- if (DSP_SUCCEEDED(status))
+ if (!status)
(dev_context->dsp_per_clks) |=
(1 << bpwr_clks[clk_id_index].clk);
break;
DBC_ASSERT(ul_shm_base_virt != 0);
/* Check if it is a read of Trace section */
- if (DSP_SUCCEEDED(status) && !ul_trace_sec_beg) {
+ if (!status && !ul_trace_sec_beg) {
status = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
}
DBC_ASSERT(ul_trace_sec_beg != 0);
- if (DSP_SUCCEEDED(status) && !ul_trace_sec_end) {
+ if (!status && !ul_trace_sec_end) {
status = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_END, &ul_trace_sec_end);
}
DBC_ASSERT(ul_trace_sec_end != 0);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
if ((dsp_addr <= ul_trace_sec_end) &&
(dsp_addr >= ul_trace_sec_beg))
trace_read = true;
ul_ext_end = 0;
/* Get DYNEXT_BEG, EXT_BEG and EXT_END. */
- if (DSP_SUCCEEDED(status) && !ul_dyn_ext_base) {
+ if (!status && !ul_dyn_ext_base) {
status = dev_get_symbol(dev_context->hdev_obj,
DYNEXTBASE, &ul_dyn_ext_base);
}
DBC_ASSERT(ul_dyn_ext_base != 0);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_symbol(dev_context->hdev_obj,
EXTBASE, &ul_ext_base);
}
DBC_ASSERT(ul_ext_base != 0);
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
status = dev_get_symbol(dev_context->hdev_obj,
EXTEND, &ul_ext_end);
}
if (ul_ext_end < ul_ext_base)
status = -EPERM;
- if (DSP_SUCCEEDED(status)) {
+ if (!status) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
offset = dsp_addr - ul_ext_base;
- if (DSP_SUCCEEDED(status))
+ if (!status)
memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes);
return status;
/* Check if it is a load to Trace section */
ret = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_BEG, &ul_trace_sec_beg);
- if (DSP_SUCCEEDED(ret))
+ if (!ret)
ret = dev_get_symbol(dev_context->hdev_obj,
DSP_TRACESEC_END,
&ul_trace_sec_end);
}
- if (DSP_SUCCEEDED(ret)) {
+ if (!ret) {
if ((dsp_addr <= ul_trace_sec_end) &&
(dsp_addr >= ul_trace_sec_beg))
trace_load = true;
SHMBASENAME, &ul_shm_base_virt);
DBC_ASSERT(ul_shm_base_virt != 0);
if (dynamic_load) {
- if (DSP_SUCCEEDED(ret)) {
+ if (!ret) {
if (symbols_reloaded)
ret =
dev_get_symbol
&ul_ext_base);
}
DBC_ASSERT(ul_ext_base != 0);
- if (DSP_SUCCEEDED(ret)) {
+ if (!ret) {
/* DR OMAPS00013235 : DLModules array may be
* in EXTMEM. It is expected that DYNEXTMEM and
* EXTMEM are contiguous, so checking for the
}
} else {
if (symbols_reloaded) {
- if (DSP_SUCCEEDED(ret))
+ if (!ret)
ret =
dev_get_symbol
(dev_context->hdev_obj, EXTBASE,
&ul_ext_base);
DBC_ASSERT(ul_ext_base != 0);
- if (DSP_SUCCEEDED(ret))
+ if (!ret)
ret =
dev_get_symbol
(dev_context->hdev_obj, EXTEND,
if (ul_ext_end < ul_ext_base)
ret = -EPERM;
- if (DSP_SUCCEEDED(ret)) {
+ if (!ret) {
ul_tlb_base_virt =
dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
if (symbols_reloaded) {
- if (DSP_SUCCEEDED(ret)) {
- ret =
- dev_get_symbol
+ ret = dev_get_symbol
(dev_context->hdev_obj,
DSP_TRACESEC_END, &shm0_end);
- }
- if (DSP_SUCCEEDED(ret)) {
+ if (!ret) {
ret =
dev_get_symbol
(dev_context->hdev_obj, DYNEXTBASE,
if (!dw_base_addr || !ul_ext_base || !ul_ext_end)
ret = -EPERM;
- if (DSP_SUCCEEDED(ret)) {
+ if (!ret) {
for (i = 0; i < 4; i++)
remain_byte[i] = 0x0;
if (dsp_addr > ul_ext_end || dw_offset > dsp_addr)
ret = -EPERM;
}
- if (DSP_SUCCEEDED(ret)) {
+ if (!ret) {
if (ul_num_bytes)
memcpy((u8 *) dw_base_addr + dw_offset, host_buff,
ul_num_bytes);