All the callers used silent = false.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
/* Make sure device is powered up for SRAM reads */
- if (!iwl_trans_grab_nic_access(priv->trans, false, ®_flags))
+ if (!iwl_trans_grab_nic_access(priv->trans, ®_flags))
return;
/* Set starting address; reads will auto-increment */
ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
/* Make sure device is powered up for SRAM reads */
- if (!iwl_trans_grab_nic_access(trans, false, ®_flags))
+ if (!iwl_trans_grab_nic_access(trans, ®_flags))
return pos;
/* Set starting address; reads will auto-increment */
priv->thermal_throttle.ct_kill_toggle = true;
}
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
- if (iwl_trans_grab_nic_access(priv->trans, false, &flags))
+ if (iwl_trans_grab_nic_access(priv->trans, &flags))
iwl_trans_release_nic_access(priv->trans, &flags);
/* Reschedule the ct_kill timer to occur in
{
u32 value = 0x5a5a5a5a;
unsigned long flags;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
value = iwl_read32(trans, reg);
iwl_trans_release_nic_access(trans, &flags);
}
{
unsigned long flags;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, reg, value);
iwl_trans_release_nic_access(trans, &flags);
}
unsigned long flags;
u32 val = 0x5a5a5a5a;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
val = iwl_read_prph_no_grab(trans, ofs);
iwl_trans_release_nic_access(trans, &flags);
}
{
unsigned long flags;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write_prph_no_grab(trans, ofs, val);
iwl_trans_release_nic_access(trans, &flags);
}
{
unsigned long flags;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write_prph_no_grab(trans, ofs,
iwl_read_prph_no_grab(trans, ofs) |
mask);
{
unsigned long flags;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write_prph_no_grab(trans, ofs,
(iwl_read_prph_no_grab(trans, ofs) &
mask) | bits);
unsigned long flags;
u32 val;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
val = iwl_read_prph_no_grab(trans, ofs);
iwl_write_prph_no_grab(trans, ofs, (val & ~mask));
iwl_trans_release_nic_access(trans, &flags);
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
- bool (*grab_nic_access)(struct iwl_trans *trans, bool silent,
- unsigned long *flags);
+ bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
void (*release_nic_access)(struct iwl_trans *trans,
unsigned long *flags);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
trans->ops->set_bits_mask(trans, reg, mask, value);
}
-#define iwl_trans_grab_nic_access(trans, silent, flags) \
+#define iwl_trans_grab_nic_access(trans, flags) \
__cond_lock(nic_access, \
- likely((trans)->ops->grab_nic_access(trans, silent, flags)))
+ likely((trans)->ops->grab_nic_access(trans, flags)))
static inline void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
unsigned long flags;
int i, j;
- if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
+ if (!iwl_trans_grab_nic_access(mvm->trans, &flags))
return;
/* Pull RXF data from all RXFs */
unsigned long flags;
u32 prph_len = 0, i;
- if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ if (!iwl_trans_grab_nic_access(trans, &flags))
return 0;
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
clear_bit(STATUS_TPOWER_PMI, &trans->status);
}
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
- unsigned long *flags)
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
+ unsigned long *flags)
{
int ret;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
- if (!silent) {
- u32 val = iwl_read32(trans, CSR_GP_CNTRL);
- WARN_ONCE(1,
- "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
- val);
- spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
- return false;
- }
+ WARN_ONCE(1,
+ "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+ iwl_read32(trans, CSR_GP_CNTRL));
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+ return false;
}
out:
int offs, ret = 0;
u32 *vals = buf;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
for (offs = 0; offs < dwords; offs++)
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
int offs, ret = 0;
const u32 *vals = buf;
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT,
__le32 *val;
int i;
- if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ if (!iwl_trans_grab_nic_access(trans, &flags))
return 0;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
unsigned long flags;
u32 i;
- if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ if (!iwl_trans_grab_nic_access(trans, &flags))
return 0;
iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
goto out_pci_disable_msi;
}
- if (iwl_trans_grab_nic_access(trans, false, &flags)) {
+ if (iwl_trans_grab_nic_access(trans, &flags)) {
u32 hw_step;
hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
spin_lock(&trans_pcie->irq_lock);
- if (!iwl_trans_grab_nic_access(trans, false, &flags))
+ if (!iwl_trans_grab_nic_access(trans, &flags))
goto out;
/* Stop each Tx DMA channel */