void ath10k_bmi_start(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_BMI, "bmi start\n");
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
}
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
int ret;
- ath10k_dbg(ATH10K_DBG_BMI, "bmi done\n");
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
if (ar->bmi.done_sent) {
- ath10k_dbg(ATH10K_DBG_BMI, "bmi skipped\n");
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
return 0;
}
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
- ath10k_warn("unable to write to the device: %d\n", ret);
+ ath10k_warn(ar, "unable to write to the device: %d\n", ret);
return ret;
}
u32 resplen = sizeof(resp.get_target_info);
int ret;
- ath10k_dbg(ATH10K_DBG_BMI, "bmi get target info\n");
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
if (ar->bmi.done_sent) {
- ath10k_warn("BMI Get Target Info Command disallowed\n");
+ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
return -EBUSY;
}
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
- ath10k_warn("unable to get target info from device\n");
+ ath10k_warn(ar, "unable to get target info from device\n");
return ret;
}
if (resplen < sizeof(resp.get_target_info)) {
- ath10k_warn("invalid get_target_info response length (%d)\n",
+ ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
resplen);
return -EIO;
}
u32 rxlen;
int ret;
- ath10k_dbg(ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
- ath10k_warn("command disallowed\n");
+ ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
&resp, &rxlen);
if (ret) {
- ath10k_warn("unable to read from the device (%d)\n",
+ ath10k_warn(ar, "unable to read from the device (%d)\n",
ret);
return ret;
}
u32 txlen;
int ret;
- ath10k_dbg(ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
address, length);
if (ar->bmi.done_sent) {
- ath10k_warn("command disallowed\n");
+ ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
- ath10k_warn("unable to write to the device (%d)\n",
+ ath10k_warn(ar, "unable to write to the device (%d)\n",
ret);
return ret;
}
u32 resplen = sizeof(resp.execute);
int ret;
- ath10k_dbg(ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
address, param);
if (ar->bmi.done_sent) {
- ath10k_warn("command disallowed\n");
+ ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
if (ret) {
- ath10k_warn("unable to read from the device\n");
+ ath10k_warn(ar, "unable to read from the device\n");
return ret;
}
if (resplen < sizeof(resp.execute)) {
- ath10k_warn("invalid execute response length (%d)\n",
+ ath10k_warn(ar, "invalid execute response length (%d)\n",
resplen);
return -EIO;
}
*result = __le32_to_cpu(resp.execute.result);
- ath10k_dbg(ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
return 0;
}
u32 txlen;
int ret;
- ath10k_dbg(ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
buffer, length);
if (ar->bmi.done_sent) {
- ath10k_warn("command disallowed\n");
+ ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
- ath10k_warn("unable to write to the device\n");
+ ath10k_warn(ar, "unable to write to the device\n");
return ret;
}
u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
int ret;
- ath10k_dbg(ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
address);
if (ar->bmi.done_sent) {
- ath10k_warn("command disallowed\n");
+ ath10k_warn(ar, "command disallowed\n");
return -EBUSY;
}
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
if (ret) {
- ath10k_warn("unable to Start LZ Stream to the device\n");
+ ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
return ret;
}
u32 trailer_len = length - head_len;
int ret;
- ath10k_dbg(ATH10K_DBG_BMI,
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
"bmi fast download address 0x%x buffer 0x%p length %d\n",
address, buffer, length);
int ret = 0;
if (nbytes > ce_state->src_sz_max)
- ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
if (unlikely(CE_RING_DELTA(nentries_mask,
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
- ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+ ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
return ret;
}
if (attr->dest_nentries) {
ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
- ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
return ret;
}
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
- ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+ ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
- ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+ ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;
static void ath10k_send_suspend_complete(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_BOOT, "boot suspend complete\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
complete(&ar->target_suspend);
}
ret = ath10k_bmi_write32(ar, hi_app_host_interest,
HTC_PROTOCOL_VERSION);
if (ret) {
- ath10k_err("settings HTC version failed\n");
+ ath10k_err(ar, "settings HTC version failed\n");
return ret;
}
/* set the firmware mode to STA/IBSS/AP */
ret = ath10k_bmi_read32(ar, hi_option_flag, ¶m_host);
if (ret) {
- ath10k_err("setting firmware mode (1/2) failed\n");
+ ath10k_err(ar, "setting firmware mode (1/2) failed\n");
return ret;
}
ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
if (ret) {
- ath10k_err("setting firmware mode (2/2) failed\n");
+ ath10k_err(ar, "setting firmware mode (2/2) failed\n");
return ret;
}
/* We do all byte-swapping on the host */
ret = ath10k_bmi_write32(ar, hi_be, 0);
if (ret) {
- ath10k_err("setting host CPU BE mode failed\n");
+ ath10k_err(ar, "setting host CPU BE mode failed\n");
return ret;
}
ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
if (ret) {
- ath10k_err("setting FW data/desc swap flags failed\n");
+ ath10k_err(ar, "setting FW data/desc swap flags failed\n");
return ret;
}
ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
if (ret) {
- ath10k_err("could not read board ext data addr (%d)\n", ret);
+ ath10k_err(ar, "could not read board ext data addr (%d)\n",
+ ret);
return ret;
}
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot push board extended data addr 0x%x\n",
board_ext_data_addr);
return 0;
if (ar->board_len != (board_data_size + board_ext_data_size)) {
- ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
+ ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
ar->board_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ar->board_data + board_data_size,
board_ext_data_size);
if (ret) {
- ath10k_err("could not write board ext data (%d)\n", ret);
+ ath10k_err(ar, "could not write board ext data (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
(board_ext_data_size << 16) | 1);
if (ret) {
- ath10k_err("could not write board ext data bit (%d)\n", ret);
+ ath10k_err(ar, "could not write board ext data bit (%d)\n",
+ ret);
return ret;
}
ret = ath10k_push_board_ext_data(ar);
if (ret) {
- ath10k_err("could not push board ext data (%d)\n", ret);
+ ath10k_err(ar, "could not push board ext data (%d)\n", ret);
goto exit;
}
ret = ath10k_bmi_read32(ar, hi_board_data, &address);
if (ret) {
- ath10k_err("could not read board data addr (%d)\n", ret);
+ ath10k_err(ar, "could not read board data addr (%d)\n", ret);
goto exit;
}
min_t(u32, board_data_size,
ar->board_len));
if (ret) {
- ath10k_err("could not write board data (%d)\n", ret);
+ ath10k_err(ar, "could not write board data (%d)\n", ret);
goto exit;
}
ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
if (ret) {
- ath10k_err("could not write board data bit (%d)\n", ret);
+ ath10k_err(ar, "could not write board data bit (%d)\n", ret);
goto exit;
}
/* OTP is optional */
if (!ar->otp_data || !ar->otp_len) {
- ath10k_warn("Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
ar->otp_data, ar->otp_len);
return 0;
}
- ath10k_dbg(ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
address, ar->otp_len);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
- ath10k_err("could not write otp (%d)\n", ret);
+ ath10k_err(ar, "could not write otp (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_execute(ar, address, 0, &result);
if (ret) {
- ath10k_err("could not execute otp (%d)\n", ret);
+ ath10k_err(ar, "could not execute otp (%d)\n", ret);
return ret;
}
- ath10k_dbg(ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (result != 0) {
- ath10k_err("otp calibration failed: %d", result);
+ ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
}
ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
ar->firmware_len);
if (ret) {
- ath10k_err("could not write fw (%d)\n", ret);
+ ath10k_err(ar, "could not write fw (%d)\n", ret);
goto exit;
}
int ret = 0;
if (ar->hw_params.fw.fw == NULL) {
- ath10k_err("firmware file not defined\n");
+ ath10k_err(ar, "firmware file not defined\n");
return -EINVAL;
}
if (ar->hw_params.fw.board == NULL) {
- ath10k_err("board data file not defined");
+ ath10k_err(ar, "board data file not defined");
return -EINVAL;
}
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
- ath10k_err("could not fetch board data (%d)\n", ret);
+ ath10k_err(ar, "could not fetch board data (%d)\n", ret);
goto err;
}
ar->hw_params.fw.fw);
if (IS_ERR(ar->firmware)) {
ret = PTR_ERR(ar->firmware);
- ath10k_err("could not fetch firmware (%d)\n", ret);
+ ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
goto err;
}
ar->hw_params.fw.otp);
if (IS_ERR(ar->otp)) {
ret = PTR_ERR(ar->otp);
- ath10k_err("could not fetch otp (%d)\n", ret);
+ ath10k_err(ar, "could not fetch otp (%d)\n", ret);
goto err;
}
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) {
- ath10k_err("could not fetch firmware file '%s/%s': %ld\n",
+ ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware);
}
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
- ath10k_err("firmware file '%s/%s' too small to contain magic: %zu\n",
+ ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
ar->hw_params.fw.dir, name, len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
- ath10k_err("invalid firmware magic\n");
+ ath10k_err(ar, "invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
data += sizeof(*hdr);
if (len < ie_len) {
- ath10k_err("invalid length for FW IE %d (%zu < %zu)\n",
+ ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
memcpy(ar->hw->wiphy->fw_version, data, ie_len);
ar->hw->wiphy->fw_version[ie_len] = '\0';
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw version %s\n",
ar->hw->wiphy->fw_version);
break;
timestamp = (__le32 *)data;
- ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
le32_to_cpup(timestamp));
break;
case ATH10K_FW_IE_FEATURES:
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found firmware features ie (%zd B)\n",
ie_len);
break;
if (data[index] & (1 << bit)) {
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Enabling feature bit: %i\n",
i);
__set_bit(i, ar->fw_features);
}
}
- ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
+ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
ar->fw_features,
sizeof(ar->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw image ie (%zd B)\n",
ie_len);
break;
case ATH10K_FW_IE_OTP_IMAGE:
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found otp image ie (%zd B)\n",
ie_len);
break;
default:
- ath10k_warn("Unknown FW IE: %u\n",
+ ath10k_warn(ar, "Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
break;
}
}
if (!ar->firmware_data || !ar->firmware_len) {
- ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
goto err;
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
!test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
- ath10k_err("feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
+ ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
ret = -EINVAL;
goto err;
}
/* now fetch the board file */
if (ar->hw_params.fw.board == NULL) {
- ath10k_err("board data file not defined");
+ ath10k_err(ar, "board data file not defined");
ret = -EINVAL;
goto err;
}
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
- ath10k_err("could not fetch board data '%s/%s' (%d)\n",
+ ath10k_err(ar, "could not fetch board data '%s/%s' (%d)\n",
ar->hw_params.fw.dir, ar->hw_params.fw.board,
ret);
goto err;
int ret;
ar->fw_api = 3;
- ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
if (ret == 0)
goto success;
ar->fw_api = 2;
- ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
if (ret == 0)
goto success;
ar->fw_api = 1;
- ath10k_dbg(ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret)
return ret;
success:
- ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
}
ret = ath10k_download_board_data(ar);
if (ret) {
- ath10k_err("failed to download board data: %d\n", ret);
+ ath10k_err(ar, "failed to download board data: %d\n", ret);
return ret;
}
ret = ath10k_download_and_run_otp(ar);
if (ret) {
- ath10k_err("failed to run otp: %d\n", ret);
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
return ret;
}
ret = ath10k_download_fw(ar);
if (ret) {
- ath10k_err("failed to download firmware: %d\n", ret);
+ ath10k_err(ar, "failed to download firmware: %d\n", ret);
return ret;
}
*/
ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
if (ret) {
- ath10k_warn("could not disable UART prints (%d)\n", ret);
+ ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, 7);
if (ret) {
- ath10k_warn("could not enable UART prints (%d)\n", ret);
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
return ret;
}
ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
if (ret) {
- ath10k_warn("could not enable UART prints (%d)\n", ret);
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
return ret;
}
/* Set the UART baud rate to 19200. */
ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
if (ret) {
- ath10k_warn("could not set the baud rate (%d)\n", ret);
+ ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
return ret;
}
- ath10k_info("UART prints enabled\n");
+ ath10k_info(ar, "UART prints enabled\n");
return 0;
}
}
if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
- ath10k_err("Unsupported hardware version: 0x%x\n",
+ ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
ar->target_version);
return -EINVAL;
}
ar->hw_params = *hw_params;
- ath10k_dbg(ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
ar->hw_params.name, ar->target_version);
return 0;
case ATH10K_STATE_OFF:
/* this can happen if driver is being unloaded
* or if the crash happens during FW probing */
- ath10k_warn("cannot restart a device that hasn't been started\n");
+ ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
break;
case ATH10K_STATE_RESTARTING:
/* hw restart might be requested from multiple places */
ar->state = ATH10K_STATE_WEDGED;
/* fall through */
case ATH10K_STATE_WEDGED:
- ath10k_warn("device is wedged, will not restart\n");
+ ath10k_warn(ar, "device is wedged, will not restart\n");
break;
}
status = ath10k_htc_init(ar);
if (status) {
- ath10k_err("could not init HTC (%d)\n", status);
+ ath10k_err(ar, "could not init HTC (%d)\n", status);
goto err;
}
status = ath10k_wmi_attach(ar);
if (status) {
- ath10k_err("WMI attach failed: %d\n", status);
+ ath10k_err(ar, "WMI attach failed: %d\n", status);
goto err;
}
status = ath10k_htt_init(ar);
if (status) {
- ath10k_err("failed to init htt: %d\n", status);
+ ath10k_err(ar, "failed to init htt: %d\n", status);
goto err_wmi_detach;
}
status = ath10k_htt_tx_alloc(&ar->htt);
if (status) {
- ath10k_err("failed to alloc htt tx: %d\n", status);
+ ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
goto err_wmi_detach;
}
status = ath10k_htt_rx_alloc(&ar->htt);
if (status) {
- ath10k_err("failed to alloc htt rx: %d\n", status);
+ ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
goto err_htt_tx_detach;
}
status = ath10k_hif_start(ar);
if (status) {
- ath10k_err("could not start HIF: %d\n", status);
+ ath10k_err(ar, "could not start HIF: %d\n", status);
goto err_htt_rx_detach;
}
status = ath10k_htc_wait_target(&ar->htc);
if (status) {
- ath10k_err("failed to connect to HTC: %d\n", status);
+ ath10k_err(ar, "failed to connect to HTC: %d\n", status);
goto err_hif_stop;
}
status = ath10k_htt_connect(&ar->htt);
if (status) {
- ath10k_err("failed to connect htt (%d)\n", status);
+ ath10k_err(ar, "failed to connect htt (%d)\n", status);
goto err_hif_stop;
}
status = ath10k_wmi_connect(ar);
if (status) {
- ath10k_err("could not connect wmi: %d\n", status);
+ ath10k_err(ar, "could not connect wmi: %d\n", status);
goto err_hif_stop;
}
status = ath10k_htc_start(&ar->htc);
if (status) {
- ath10k_err("failed to start htc: %d\n", status);
+ ath10k_err(ar, "failed to start htc: %d\n", status);
goto err_hif_stop;
}
status = ath10k_wmi_wait_for_service_ready(ar);
if (status <= 0) {
- ath10k_warn("wmi service ready event not received");
+ ath10k_warn(ar, "wmi service ready event not received");
status = -ETIMEDOUT;
goto err_hif_stop;
}
- ath10k_dbg(ATH10K_DBG_BOOT, "firmware %s booted\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
status = ath10k_wmi_cmd_init(ar);
if (status) {
- ath10k_err("could not send WMI init command (%d)\n", status);
+ ath10k_err(ar, "could not send WMI init command (%d)\n",
+ status);
goto err_hif_stop;
}
status = ath10k_wmi_wait_for_unified_ready(ar);
if (status <= 0) {
- ath10k_err("wmi unified ready event not received\n");
+ ath10k_err(ar, "wmi unified ready event not received\n");
status = -ETIMEDOUT;
goto err_hif_stop;
}
status = ath10k_htt_setup(&ar->htt);
if (status) {
- ath10k_err("failed to setup htt: %d\n", status);
+ ath10k_err(ar, "failed to setup htt: %d\n", status);
goto err_hif_stop;
}
ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
if (ret) {
- ath10k_warn("could not suspend target (%d)\n", ret);
+ ath10k_warn(ar, "could not suspend target (%d)\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
if (ret == 0) {
- ath10k_warn("suspend timed out - target pause event never came\n");
+ ath10k_warn(ar, "suspend timed out - target pause event never came\n");
return -ETIMEDOUT;
}
ret = ath10k_hif_power_up(ar);
if (ret) {
- ath10k_err("could not start pci hif (%d)\n", ret);
+ ath10k_err(ar, "could not start pci hif (%d)\n", ret);
return ret;
}
memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info(ar, &target_info);
if (ret) {
- ath10k_err("could not get target info (%d)\n", ret);
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
ath10k_hif_power_down(ar);
return ret;
}
ret = ath10k_init_hw_params(ar);
if (ret) {
- ath10k_err("could not get hw params (%d)\n", ret);
+ ath10k_err(ar, "could not get hw params (%d)\n", ret);
ath10k_hif_power_down(ar);
return ret;
}
ret = ath10k_core_fetch_firmware_files(ar);
if (ret) {
- ath10k_err("could not fetch firmware files (%d)\n", ret);
+ ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
ath10k_hif_power_down(ar);
return ret;
}
ret = ath10k_core_start(ar);
if (ret) {
- ath10k_err("could not init core (%d)\n", ret);
+ ath10k_err(ar, "could not init core (%d)\n", ret);
ath10k_core_free_firmware_files(ar);
ath10k_hif_power_down(ar);
mutex_unlock(&ar->conf_mutex);
{
u32 hw_revision = MS(ar->chip_id, SOC_CHIP_ID_REV);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip_id 0x%08x hw_revision 0x%x\n",
ar->chip_id, hw_revision);
/* Check that we are not using hw1.0 (some of them have same pci id
* due to missing hw1.0 workarounds. */
switch (hw_revision) {
case QCA988X_HW_1_0_CHIP_ID_REV:
- ath10k_err("ERROR: qca988x hw1.0 is not supported\n");
+ ath10k_err(ar, "ERROR: qca988x hw1.0 is not supported\n");
return -EOPNOTSUPP;
case QCA988X_HW_2_0_CHIP_ID_REV:
return 0;
default:
- ath10k_warn("Warning: hardware revision unknown (0x%x), expect problems\n",
+ ath10k_warn(ar, "Warning: hardware revision unknown (0x%x), expect problems\n",
ar->chip_id);
return 0;
}
status = ath10k_core_probe_fw(ar);
if (status) {
- ath10k_err("could not probe fw (%d)\n", status);
+ ath10k_err(ar, "could not probe fw (%d)\n", status);
goto err;
}
status = ath10k_mac_register(ar);
if (status) {
- ath10k_err("could not register to mac80211 (%d)\n", status);
+ ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
goto err_release_fw;
}
status = ath10k_debug_create(ar);
if (status) {
- ath10k_err("unable to initialize debugfs\n");
+ ath10k_err(ar, "unable to initialize debugfs\n");
goto err_unregister_mac;
}
status = ath10k_spectral_create(ar);
if (status) {
- ath10k_err("failed to initialize spectral\n");
+ ath10k_err(ar, "failed to initialize spectral\n");
goto err_debug_destroy;
}
status = ath10k_core_check_chip_id(ar);
if (status) {
- ath10k_err("Unsupported chip id 0x%08x\n", ar->chip_id);
+ ath10k_err(ar, "Unsupported chip id 0x%08x\n", ar->chip_id);
return status;
}
u8 data[0];
} __packed;
-static int ath10k_printk(const char *level, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
- int rtn;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- rtn = printk("%sath10k: %pV", level, &vaf);
-
- va_end(args);
-
- return rtn;
-}
-
-int ath10k_info(const char *fmt, ...)
+int ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
va_start(args, fmt);
vaf.va = &args;
- ret = ath10k_printk(KERN_INFO, "%pV", &vaf);
+ ret = dev_info(ar->dev, "%pV", &vaf);
trace_ath10k_log_info(&vaf);
va_end(args);
void ath10k_print_driver_info(struct ath10k *ar)
{
- ath10k_info("%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
+ ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);
- ath10k_info("debug %d debugfs %d tracing %d dfs %d\n",
+ ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
config_enabled(CONFIG_ATH10K_TRACING),
}
EXPORT_SYMBOL(ath10k_print_driver_info);
-int ath10k_err(const char *fmt, ...)
+int ath10k_err(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
va_start(args, fmt);
vaf.va = &args;
- ret = ath10k_printk(KERN_ERR, "%pV", &vaf);
+ ret = dev_err(ar->dev, "%pV", &vaf);
trace_ath10k_log_err(&vaf);
va_end(args);
}
EXPORT_SYMBOL(ath10k_err);
-int ath10k_warn(const char *fmt, ...)
+int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
- int ret = 0;
va_start(args, fmt);
vaf.va = &args;
-
- if (net_ratelimit())
- ret = ath10k_printk(KERN_WARNING, "%pV", &vaf);
-
+ dev_warn_ratelimited(ar->dev, "%pV", &vaf);
trace_ath10k_log_warn(&vaf);
va_end(args);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(ath10k_warn);
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
if (ret) {
- ath10k_warn("could not request stats (%d)\n", ret);
+ ath10k_warn(ar, "could not request stats (%d)\n", ret);
goto exit;
}
}
if (!strcmp(buf, "soft")) {
- ath10k_info("simulating soft firmware crash\n");
+ ath10k_info(ar, "simulating soft firmware crash\n");
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
} else if (!strcmp(buf, "hard")) {
- ath10k_info("simulating hard firmware crash\n");
+ ath10k_info(ar, "simulating hard firmware crash\n");
/* 0x7fff is vdev id, and it is always out of range for all
* firmware variants in order to force a firmware crash.
*/
}
if (ret) {
- ath10k_warn("failed to simulate firmware crash: %d\n", ret);
+ ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
goto exit;
}
ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
cookie);
if (ret) {
- ath10k_warn("failed to send htt stats request: %d\n", ret);
+ ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
return ret;
}
if (ar->state == ATH10K_STATE_ON) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
if (ret) {
- ath10k_warn("dbglog cfg failed from debugfs: %d\n",
+ ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
ret);
goto exit;
}
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
/* continue normally anyway, this isn't serious */
- ath10k_warn("failed to start htt stats workqueue: %d\n", ret);
+ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
+ ret);
if (ar->debug.fw_dbglog_mask) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask);
if (ret)
/* not serious */
- ath10k_warn("failed to enable dbglog during start: %d",
+ ath10k_warn(ar, "failed to enable dbglog during start: %d",
ret);
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
-void ath10k_dbg(enum ath10k_debug_mask mask, const char *fmt, ...)
+void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
+ const char *fmt, ...)
{
struct va_format vaf;
va_list args;
vaf.va = &args;
if (ath10k_debug_mask & mask)
- ath10k_printk(KERN_DEBUG, "%pV", &vaf);
+ dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
trace_ath10k_log_dbg(mask, &vaf);
}
EXPORT_SYMBOL(ath10k_dbg);
-void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
if (ath10k_debug_mask & mask) {
if (msg)
- ath10k_dbg(mask, "%s\n", msg);
+ ath10k_dbg(ar, mask, "%s\n", msg);
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
}
extern unsigned int ath10k_debug_mask;
-__printf(1, 2) int ath10k_info(const char *fmt, ...);
-__printf(1, 2) int ath10k_err(const char *fmt, ...);
-__printf(1, 2) int ath10k_warn(const char *fmt, ...);
+__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
-__printf(2, 3) void ath10k_dbg(enum ath10k_debug_mask mask,
+__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
const char *fmt, ...);
-void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len);
#else /* CONFIG_ATH10K_DEBUG */
-static inline int ath10k_dbg(enum ath10k_debug_mask dbg_mask,
+static inline int ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask dbg_mask,
const char *fmt, ...)
{
return 0;
}
-static inline void ath10k_dbg_dump(enum ath10k_debug_mask mask,
+static inline void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
if (!skb) {
- ath10k_warn("Unable to allocate ctrl skb\n");
+ ath10k_warn(ar, "Unable to allocate ctrl skb\n");
return NULL;
}
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
- ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
return skb;
}
static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ struct ath10k *ar = ep->htc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
ep->eid, skb);
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
- ath10k_warn("no tx handler for eid %d\n", ep->eid);
+ ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
dev_kfree_skb_any(skb);
return;
}
/* assumes tx_lock is held */
static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
{
+ struct ath10k *ar = ep->htc->ar;
+
if (!ep->tx_credit_flow_enabled)
return false;
if (ep->tx_credits >= ep->tx_credits_per_max_message)
return false;
- ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
ep->eid);
return true;
}
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
+ struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
return -ECOMM;
if (eid >= ATH10K_HTC_EP_COUNT) {
- ath10k_warn("Invalid endpoint id: %d\n", eid);
+ ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
goto err_pull;
}
ep->tx_credits -= credits;
- ath10k_dbg(ATH10K_DBG_HTC,
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->tx_credit_flow_enabled) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
- ath10k_dbg(ATH10K_DBG_HTC,
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back (total %d)\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
int len,
enum ath10k_htc_ep_id eid)
{
+ struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
- ath10k_warn("Uneven credit report len %d", len);
+ ath10k_warn(ar, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
- ath10k_dbg(ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
int length,
enum ath10k_htc_ep_id src_eid)
{
+ struct ath10k *ar = htc->ar;
int status = 0;
struct ath10k_htc_record *record;
u8 *orig_buffer;
if (record->hdr.len > length) {
/* no room left in buffer for record */
- ath10k_warn("Invalid record length: %d\n",
+ ath10k_warn(ar, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
case ATH10K_HTC_RECORD_CREDITS:
len = sizeof(struct ath10k_htc_credit_report);
if (record->hdr.len < len) {
- ath10k_warn("Credit report too long\n");
+ ath10k_warn(ar, "Credit report too long\n");
status = -EINVAL;
break;
}
src_eid);
break;
default:
- ath10k_warn("Unhandled record: id:%d length:%d\n",
+ ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
}
if (status)
- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
orig_buffer, orig_length);
return status;
eid = hdr->eid;
if (eid >= ATH10K_HTC_EP_COUNT) {
- ath10k_warn("HTC Rx: invalid eid %d\n", eid);
- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
+ ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
payload_len = __le16_to_cpu(hdr->len);
if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
- ath10k_warn("HTC rx frame too long, len: %zu\n",
+ ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
}
if (skb->len < payload_len) {
- ath10k_dbg(ATH10K_DBG_HTC,
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
- ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
"", hdr, sizeof(*hdr));
status = -EINVAL;
goto out;
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
- ath10k_warn("Invalid trailer length: %d\n",
+ ath10k_warn(ar, "Invalid trailer length: %d\n",
trailer_len);
status = -EPROTO;
goto out;
* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
- ath10k_warn("HTC rx ctrl still processing\n");
+ ath10k_warn(ar, "HTC rx ctrl still processing\n");
status = -EINVAL;
complete(&htc->ctl_resp);
goto out;
goto out;
}
- ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
{
/* This is unexpected. FW is not supposed to send regular rx on this
* endpoint. */
- ath10k_warn("unexpected htc rx\n");
+ ath10k_warn(ar, "unexpected htc rx\n");
kfree_skb(skb);
}
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
+ struct ath10k *ar = htc->ar;
int i, status = 0;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
* iomap writes unmasking PCI CE irqs aren't propagated
* properly in KVM PCI-passthrough sometimes.
*/
- ath10k_warn("failed to receive control response completion, polling..\n");
+ ath10k_warn(ar, "failed to receive control response completion, polling..\n");
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
}
if (status < 0) {
- ath10k_err("ctl_resp never came in (%d)\n", status);
+ ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
- ath10k_err("Invalid HTC ready msg len:%d\n",
+ ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
credit_size = __le16_to_cpu(msg->ready.credit_size);
if (message_id != ATH10K_HTC_MSG_READY_ID) {
- ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
+ ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
htc->total_transmit_credits = credit_count;
htc->target_credit_size = credit_size;
- ath10k_dbg(ATH10K_DBG_HTC,
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
"Target ready! transmit resources: %d size:%d\n",
htc->total_transmit_credits,
htc->target_credit_size);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
- ath10k_err("Invalid credit size received\n");
+ ath10k_err(ar, "Invalid credit size received\n");
return -ECOMM;
}
/* connect fake service */
status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
if (status) {
- ath10k_err("could not connect to htc service (%d)\n", status);
+ ath10k_err(ar, "could not connect to htc service (%d)\n",
+ status);
return status;
}
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
{
+ struct ath10k *ar = htc->ar;
struct ath10k_htc_msg *msg;
struct ath10k_htc_conn_svc *req_msg;
struct ath10k_htc_conn_svc_response resp_msg_dummy;
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb) {
- ath10k_err("Failed to allocate HTC packet\n");
+ ath10k_err(ar, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
if (status <= 0) {
if (status == 0)
status = -ETIMEDOUT;
- ath10k_err("Service connect timeout: %d\n", status);
+ ath10k_err(ar, "Service connect timeout: %d\n", status);
return status;
}
if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(msg->hdr) +
sizeof(msg->connect_service_response))) {
- ath10k_err("Invalid resp message ID 0x%x", message_id);
+ ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
- ath10k_dbg(ATH10K_DBG_HTC,
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
htc_service_name(service_id),
resp_msg->status, resp_msg->eid);
/* check response status */
if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
- ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
+ ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
resp_msg->status);
return -EPROTO;
if (status)
return status;
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc ep %d ul polled %d dl polled %d\n",
ep->eid, ep->ul_is_polled, ep->dl_is_polled);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
return status;
}
-struct sk_buff *ath10k_htc_alloc_skb(int size)
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
if (!skb) {
- ath10k_warn("could not allocate HTC tx skb\n");
+ ath10k_warn(ar, "could not allocate HTC tx skb\n");
return NULL;
}
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
- ath10k_warn("Unaligned HTC tx skb\n");
+ ath10k_warn(ar, "Unaligned HTC tx skb\n");
return skb;
}
int ath10k_htc_start(struct ath10k_htc *htc)
{
+ struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int status = 0;
struct ath10k_htc_msg *msg;
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
- ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
struct ath10k_htc_svc_conn_resp *conn_resp);
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
-struct sk_buff *ath10k_htc_alloc_skb(int size);
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
#endif
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
- ath10k_dbg(ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+ struct ath10k *ar = htt->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
htt->target_version_major, htt->target_version_minor);
if (htt->target_version_major != 2 &&
htt->target_version_major != 3) {
- ath10k_err("unsupported htt major version %d. supported versions are 2 and 3\n",
+ ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
htt->target_version_major);
return -ENOTSUPP;
}
int ath10k_htt_setup(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
int status;
init_completion(&htt->target_version_received);
status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ);
if (status <= 0) {
- ath10k_warn("htt version request timed out\n");
+ ath10k_warn(ar, "htt version request timed out\n");
return -ETIMEDOUT;
}
static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
int idx;
struct sk_buff *msdu;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_ring.fill_cnt == 0) {
- ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
+ ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
return NULL;
}
struct sk_buff **tail_msdu,
u32 *attention)
{
+ struct ath10k *ar = htt->ar;
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused) {
- ath10k_warn("htt is confused. refusing rx\n");
+ ath10k_warn(ar, "htt is confused. refusing rx\n");
return -1;
}
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
rx_desc = (struct htt_rx_desc *)msdu->data;
ath10k_htt_rx_free_msdu_chain(*head_msdu);
*head_msdu = NULL;
msdu = NULL;
- ath10k_err("htt rx stopped. cannot recover\n");
+ ath10k_err(ar, "htt rx stopped. cannot recover\n");
htt->rx_confused = true;
break;
}
next->len + skb_tailroom(next),
DMA_FROM_DEVICE);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL,
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"htt rx chained: ", next->data,
next->len + skb_tailroom(next));
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
dma_addr_t paddr;
void *vaddr;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
htt->rx_ring.size = ath10k_htt_rx_ring_size(htt);
if (!is_power_of_2(htt->rx_ring.size)) {
- ath10k_warn("htt rx ring size is not power of 2\n");
+ ath10k_warn(ar, "htt rx ring size is not power of 2\n");
return -EINVAL;
}
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
- ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
return -ENOMEM;
}
-static int ath10k_htt_rx_crypto_param_len(enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_WEP40:
return 0;
}
- ath10k_warn("unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unknown encryption type %d\n", type);
return 0;
}
-static int ath10k_htt_rx_crypto_tail_len(enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
return 8;
}
- ath10k_warn("unknown encryption type %d\n", type);
+ ath10k_warn(ar, "unknown encryption type %d\n", type);
return 0;
}
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
- ath10k_dbg(ATH10K_DBG_DATA,
+ ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
ieee80211_rx(ar->hw, skb);
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb_in)
{
+ struct ath10k *ar = htt->ar;
struct htt_rx_desc *rxd;
struct sk_buff *skb = skb_in;
struct sk_buff *first;
/* First frame in an A-MSDU chain has more decapped data. */
if (skb == first) {
len = round_up(ieee80211_hdrlen(hdr->frame_control), 4);
- len += round_up(ath10k_htt_rx_crypto_param_len(enctype),
- 4);
+ len += round_up(ath10k_htt_rx_crypto_param_len(ar,
+ enctype), 4);
decap_hdr += len;
}
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
+ struct ath10k *ar = htt->ar;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum rx_msdu_decap_format fmt;
/* This shouldn't happen. If it does than it may be a FW bug. */
if (skb->next) {
- ath10k_warn("htt rx received chained non A-MSDU frame\n");
+ ath10k_warn(ar, "htt rx received chained non A-MSDU frame\n");
ath10k_htt_rx_free_msdu_chain(skb->next);
skb->next = NULL;
}
rfc1042 = hdr;
rfc1042 += roundup(hdr_len, 4);
- rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(enctype), 4);
+ rfc1042 += roundup(ath10k_htt_rx_crypto_param_len(ar,
+ enctype), 4);
skb_pull(skb, sizeof(struct ethhdr));
memcpy(skb_push(skb, sizeof(struct rfc1042_hdr)),
bool channel_set,
u32 attention)
{
+ struct ath10k *ar = htt->ar;
+
if (head->len == 0) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx dropping due to zero-len\n");
return false;
}
if (attention & RX_ATTENTION_FLAGS_DECRYPT_ERR) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx dropping due to decrypt-err\n");
return false;
}
if (!channel_set) {
- ath10k_warn("no channel configured; ignoring frame!\n");
+ ath10k_warn(ar, "no channel configured; ignoring frame!\n");
return false;
}
/* Skip mgmt frames while we handle this in WMI */
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
attention & RX_ATTENTION_FLAGS_MGMT_TYPE) {
- ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
return false;
}
status != HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR &&
status != HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER &&
!htt->ar->monitor_started) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx ignoring frame w/ status %d\n",
status);
return false;
}
if (test_bit(ATH10K_CAC_RUNNING, &htt->ar->dev_flags)) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx CAC running\n");
return false;
}
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
+ struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct htt_rx_desc *rxd;
rx_status);
}
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, sizeof(*rx) +
(sizeof(struct htt_rx_indication_mpdu_range) *
num_mpdu_ranges));
&attention);
if (ret < 0) {
- ath10k_warn("failed to pop amsdu from htt rx ring %d\n",
+ ath10k_warn(ar, "failed to pop amsdu from htt rx ring %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
continue;
static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *frag)
{
+ struct ath10k *ar = htt->ar;
struct sk_buff *msdu_head, *msdu_tail;
enum htt_rx_mpdu_encrypt_type enctype;
struct htt_rx_desc *rxd;
&attention);
spin_unlock_bh(&htt->rx_ring.lock);
- ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
+ ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
if (ret) {
- ath10k_warn("failed to pop amsdu from httr rx ring for fragmented rx %d\n",
+ ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
ret);
ath10k_htt_rx_free_msdu_chain(msdu_head);
return;
RX_MSDU_START_INFO1_DECAP_FORMAT);
if (fmt != RX_MSDU_DECAP_RAW) {
- ath10k_warn("we dont support non-raw fragmented rx yet\n");
+ ath10k_warn(ar, "we dont support non-raw fragmented rx yet\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
msdu_head->ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
if (tkip_mic_err)
- ath10k_warn("tkip mic error\n");
+ ath10k_warn(ar, "tkip mic error\n");
if (decrypt_err) {
- ath10k_warn("decryption err in fragmented rx\n");
+ ath10k_warn(ar, "decryption err in fragmented rx\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
if (enctype != HTT_RX_MPDU_ENCRYPT_NONE) {
hdrlen = ieee80211_hdrlen(hdr->frame_control);
- paramlen = ath10k_htt_rx_crypto_param_len(enctype);
+ paramlen = ath10k_htt_rx_crypto_param_len(ar, enctype);
/* It is more efficient to move the header than the payload */
memmove((void *)msdu_head->data + paramlen,
trim = 4;
/* remove crypto trailer */
- trim += ath10k_htt_rx_crypto_tail_len(enctype);
+ trim += ath10k_htt_rx_crypto_tail_len(ar, enctype);
/* last fragment of TKIP frags has MIC */
if (!ieee80211_has_morefrags(hdr->frame_control) &&
trim += 8;
if (trim > msdu_head->len) {
- ath10k_warn("htt rx fragment: trailer longer than the frame itself? drop\n");
+ ath10k_warn(ar, "htt rx fragment: trailer longer than the frame itself? drop\n");
dev_kfree_skb_any(msdu_head);
goto end;
}
skb_trim(msdu_head, msdu_head->len - trim);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx frag mpdu: ",
msdu_head->data, msdu_head->len);
ath10k_process_rx(htt->ar, rx_status, msdu_head);
end:
if (fw_desc_len > 0) {
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"expecting more fragmented rx in one indication %d\n",
fw_desc_len);
}
tx_done.discard = true;
break;
default:
- ath10k_warn("unhandled tx completion status %d\n", status);
+ ath10k_warn(ar, "unhandled tx completion status %d\n", status);
tx_done.discard = true;
break;
}
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx addba tid %hu peer_id %hu size %hhu\n",
tid, peer_id, ev->window_size);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
- ath10k_warn("received addba event for invalid peer_id: %hu\n",
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
- ath10k_warn("received addba event for invalid vdev_id: %u\n",
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx start rx ba session sta %pM tid %hu size %hhu\n",
peer->addr, tid, ev->window_size);
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx delba tid %hu peer_id %hu\n",
tid, peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
- ath10k_warn("received addba event for invalid peer_id: %hu\n",
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
- ath10k_warn("received addba event for invalid vdev_id: %u\n",
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx stop rx ba session sta %pM tid %hu\n",
peer->addr, tid);
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
- ath10k_warn("unaligned htt message, expect trouble\n");
+ ath10k_warn(ar, "unaligned htt message, expect trouble\n");
- ath10k_dbg(ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
switch (resp->hdr.msg_type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"sec ind peer_id %d unicast %d type %d\n",
__le16_to_cpu(ev->peer_id),
!!(ev->flags & HTT_SECURITY_IS_UNICAST),
break;
}
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
break;
* sends all tx frames as already inspected so this shouldn't
* happen unless fw has a bug.
*/
- ath10k_warn("received an unexpected htt tx inspect event\n");
+ ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
break;
case HTT_T2H_MSG_TYPE_RX_ADDBA:
ath10k_htt_rx_addba(ar, resp);
break;
}
default:
- ath10k_dbg(ATH10K_DBG_HTT, "htt event (%d) not handled\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n",
resp->hdr.msg_type);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
};
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
int msdu_id;
lockdep_assert_held(&htt->tx_lock);
if (msdu_id == htt->max_num_pending_tx)
return -ENOBUFS;
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id);
__set_bit(msdu_id, htt->used_msdu_ids);
return msdu_id;
}
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
{
+ struct ath10k *ar = htt->ar;
+
lockdep_assert_held(&htt->tx_lock);
if (!test_bit(msdu_id, htt->used_msdu_ids))
- ath10k_warn("trying to free unallocated msdu_id %d\n", msdu_id);
+ ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
+ msdu_id);
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
__clear_bit(msdu_id, htt->used_msdu_ids);
}
int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
+
spin_lock_init(&htt->tx_lock);
init_waitqueue_head(&htt->empty_tx_wq);
else
htt->max_num_pending_tx = TARGET_NUM_MSDU_DESC;
- ath10k_dbg(ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) *
static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
struct htt_tx_done tx_done = {0};
int msdu_id;
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
- ath10k_dbg(ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n",
msdu_id);
tx_done.discard = 1;
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
int len = 0;
len += sizeof(cmd->hdr);
len += sizeof(cmd->ver_req);
- skb = ath10k_htc_alloc_skb(len);
+ skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
{
+ struct ath10k *ar = htt->ar;
struct htt_stats_req *req;
struct sk_buff *skb;
struct htt_cmd *cmd;
len += sizeof(cmd->hdr);
len += sizeof(cmd->stats_req);
- skb = ath10k_htc_alloc_skb(len);
+ skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
- ath10k_warn("failed to send htt type stats request: %d", ret);
+ ath10k_warn(ar, "failed to send htt type stats request: %d",
+ ret);
dev_kfree_skb_any(skb);
return ret;
}
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
{
+ struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring *ring;
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
+ (sizeof(*ring) * num_rx_ring);
- skb = ath10k_htc_alloc_skb(len);
+ skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu)
{
+ struct ath10k *ar = htt->ar;
struct htt_aggr_conf *aggr_conf;
struct sk_buff *skb;
struct htt_cmd *cmd;
len = sizeof(cmd->hdr);
len += sizeof(cmd->aggr_conf);
- skb = ath10k_htc_alloc_skb(len);
+ skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
- ath10k_dbg(ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
aggr_conf->max_num_amsdu_subframes,
aggr_conf->max_num_ampdu_subframes);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
- struct device *dev = htt->ar->dev;
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock);
- txdesc = ath10k_htc_alloc_skb(len);
+ txdesc = ath10k_htc_alloc_skb(ar, len);
if (!txdesc) {
res = -ENOMEM;
goto err_free_msdu_id;
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
- struct device *dev = htt->ar->dev;
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
- ath10k_dbg(ATH10K_DBG_HTT,
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
(u32)skb_cb->paddr, vdev_id, tid);
- ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
enum set_key_cmd cmd,
const u8 *macaddr)
{
+ struct ath10k *ar = arvif->ar;
struct wmi_vdev_install_key_arg arg = {
.vdev_id = arvif->vdev_id,
.key_idx = key->keyidx,
arg.key_flags = WMI_KEY_PAIRWISE;
break;
default:
- ath10k_warn("cipher %d is not supported\n", key->cipher);
+ ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
return -EOPNOTSUPP;
}
first_errno = ret;
if (ret)
- ath10k_warn("failed to remove peer wep key %d: %d\n",
+ ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
i, ret);
peer->keys[i] = NULL;
first_errno = ret;
if (ret)
- ath10k_warn("failed to remove key for %pM: %d\n",
+ ath10k_warn(ar, "failed to remove key for %pM: %d\n",
addr, ret);
}
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("failed to create wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
if (ret) {
- ath10k_warn("failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
addr, vdev_id, ret);
return ret;
}
ret = ath10k_wmi_pdev_set_param(ar, param,
ATH10K_KICKOUT_THRESHOLD);
if (ret) {
- ath10k_warn("failed to set kickout threshold on vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MIN_IDLE);
if (ret) {
- ath10k_warn("failed to set keepalive minimum idle time on vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_IDLE);
if (ret) {
- ath10k_warn("failed to set keepalive maximum idle time on vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
if (ret) {
- ath10k_warn("failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
if (peer->vdev_id != vdev_id)
continue;
- ath10k_warn("removing stale peer %pM from vdev_id %d\n",
+ ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
list_del(&peer->list);
{
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac monitor refs: promisc %d monitor %d cac %d\n",
ar->promisc, ar->monitor,
test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags));
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("failed to request monitor vdev %i start: %d\n",
+ ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("failed to synchronize setup for monitor vdev %i: %d\n",
+ ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i: %d\n",
vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
- ath10k_warn("failed to put up monitor vdev %i: %d\n",
+ ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
goto vdev_stop;
}
ar->monitor_vdev_id = vdev_id;
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
ar->monitor_vdev_id);
return 0;
vdev_stop:
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("failed to stop monitor vdev %i after start failure: %d\n",
+ ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
ar->monitor_vdev_id, ret);
return ret;
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("failed to put down monitor vdev %i: %d\n",
+ ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
- ath10k_warn("failed to to request monitor vdev %i stop: %d\n",
+ ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
ar->monitor_vdev_id, ret);
ret = ath10k_vdev_setup_sync(ar);
if (ret)
- ath10k_warn("failed to synchronise monitor vdev %i: %d\n",
+ ath10k_warn(ar, "failed to synchronise monitor vdev %i: %d\n",
ar->monitor_vdev_id, ret);
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
ar->monitor_vdev_id);
return ret;
}
lockdep_assert_held(&ar->conf_mutex);
if (ar->free_vdev_map == 0) {
- ath10k_warn("failed to find free vdev id for monitor vdev\n");
+ ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
return -ENOMEM;
}
WMI_VDEV_TYPE_MONITOR,
0, ar->mac_addr);
if (ret) {
- ath10k_warn("failed to request monitor vdev %i creation: %d\n",
+ ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
return 0;
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
if (ret) {
- ath10k_warn("failed to request wmi monitor vdev %i removal: %d\n",
+ ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
ar->monitor_vdev_id, ret);
return ret;
}
ar->free_vdev_map |= 1 << ar->monitor_vdev_id;
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
return ret;
}
lockdep_assert_held(&ar->conf_mutex);
if (!ath10k_monitor_is_enabled(ar)) {
- ath10k_warn("trying to start monitor with no references\n");
+ ath10k_warn(ar, "trying to start monitor with no references\n");
return 0;
}
if (ar->monitor_started) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor already started\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor already started\n");
return 0;
}
ret = ath10k_monitor_vdev_create(ar);
if (ret) {
- ath10k_warn("failed to create monitor vdev: %d\n", ret);
+ ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
return ret;
}
ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
if (ret) {
- ath10k_warn("failed to start monitor vdev: %d\n", ret);
+ ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
ath10k_monitor_vdev_delete(ar);
return ret;
}
ar->monitor_started = true;
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor started\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
return 0;
}
lockdep_assert_held(&ar->conf_mutex);
if (ath10k_monitor_is_enabled(ar)) {
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac monitor will be stopped later\n");
return;
}
if (!ar->monitor_started) {
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac monitor probably failed to start earlier\n");
return;
}
ret = ath10k_monitor_vdev_stop(ar);
if (ret)
- ath10k_warn("failed to stop monitor vdev: %d\n", ret);
+ ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
ret = ath10k_monitor_vdev_delete(ar);
if (ret)
- ath10k_warn("failed to delete monitor vdev: %d\n", ret);
+ ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
ar->monitor_started = false;
- ath10k_dbg(ATH10K_DBG_MAC, "mac monitor stopped\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
}
static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
ret = ath10k_monitor_start(ar);
if (ret) {
- ath10k_warn("failed to start monitor (cac): %d\n", ret);
+ ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
return ret;
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
ar->monitor_vdev_id);
return 0;
clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
ath10k_monitor_stop(ar);
- ath10k_dbg(ATH10K_DBG_MAC, "mac cac finished\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
return 0;
}
* radiation is not allowed, make this channel DFS_UNAVAILABLE
* by indicating that radar was detected.
*/
- ath10k_warn("failed to start CAC: %d\n", ret);
+ ath10k_warn(ar, "failed to start CAC: %d\n", ret);
ieee80211_radar_detected(ar->hw);
}
}
arg.ssid_len = arvif->vif->bss_conf.ssid_len;
}
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d start center_freq %d phymode %s\n",
arg.vdev_id, arg.channel.freq,
ath10k_wmi_phymode_str(arg.channel.mode));
ret = ath10k_wmi_vdev_start(ar, &arg);
if (ret) {
- ath10k_warn("failed to start WMI vdev %i: %d\n",
+ ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
arg.vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("failed to synchronise setup for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to synchronise setup for vdev %i: %d\n",
arg.vdev_id, ret);
return ret;
}
ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
if (ret) {
- ath10k_warn("failed to stop WMI vdev %i: %d\n",
+ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_vdev_setup_sync(ar);
if (ret) {
- ath10k_warn("failed to syncronise setup for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
static void ath10k_control_beaconing(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info)
{
+ struct ath10k *ar = arvif->ar;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("failed to bring up vdev %d: %i\n",
+ ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
ath10k_vdev_stop(arvif);
return;
arvif->is_started = true;
arvif->is_up = true;
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
static void ath10k_control_ibss(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info,
const u8 self_peer[ETH_ALEN])
{
+ struct ath10k *ar = arvif->ar;
u32 vdev_param;
int ret = 0;
if (!info->ibss_joined) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
if (ret)
- ath10k_warn("failed to delete IBSS self peer %pM for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to delete IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
if (is_zero_ether_addr(arvif->bssid))
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
arvif->bssid);
if (ret) {
- ath10k_warn("failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
arvif->bssid, arvif->vdev_id, ret);
return;
}
ret = ath10k_peer_create(arvif->ar, arvif->vdev_id, self_peer);
if (ret) {
- ath10k_warn("failed to create IBSS self peer %pM for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to create IBSS self peer %pM for vdev %d: %d\n",
self_peer, arvif->vdev_id, ret);
return;
}
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
- ath10k_warn("failed to set IBSS ATIM for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
arvif->vdev_id, ret);
}
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
- ath10k_warn("failed to set inactivity time for vdev %d: %i\n",
+ ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
arvif->vdev_id, ret);
return ret;
}
psmode = WMI_STA_PS_MODE_DISABLED;
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
if (ret) {
- ath10k_warn("failed to set PS Mode %d for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
psmode, arvif->vdev_id, ret);
return ret;
}
/* FIXME: base on RSN IE/WPA IE is a correct idea? */
if (rsnie || wpaie) {
- ath10k_dbg(ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
}
if (wpaie) {
- ath10k_dbg(ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
}
}
arg->peer_num_spatial_streams = sta->rx_nss;
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
arg->addr,
arg->peer_ht_rates.num_rates,
arg->peer_num_spatial_streams);
lockdep_assert_held(&ar->conf_mutex);
if (sta->wme && sta->uapsd_queues) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
sta->uapsd_queues, sta->max_sp);
if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
WMI_AP_PS_PEER_PARAM_UAPSD,
uapsd);
if (ret) {
- ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
WMI_AP_PS_PEER_PARAM_MAX_SP,
max_sp);
if (ret) {
- ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
if (ret) {
- ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
arg->peer_vht_rates.tx_mcs_set =
__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
- ath10k_dbg(ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
sta->addr, arg->peer_max_mpdu, arg->peer_flags);
}
break;
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
sta->addr, ath10k_wmi_phymode_str(phymode));
arg->peer_phymode = phymode;
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!ap_sta) {
- ath10k_warn("failed to find station entry for bss %pM vdev %i\n",
+ ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
bss_conf->bssid, arvif->vdev_id);
rcu_read_unlock();
return;
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
- ath10k_warn("failed to prepare peer assoc for %pM vdev %i: %d\n",
+ ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
rcu_read_unlock();
return;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("failed to run peer assoc for %pM vdev %i: %d\n",
+ ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
return;
}
ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
if (ret) {
- ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
arvif->vdev_id, ret);
return;
}
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
if (ret) {
- ath10k_warn("failed to set vdev %d up: %d\n",
+ ath10k_warn(ar, "failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
return;
}
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
arvif->vdev_id);
/* FIXME: check return value */
* interfaces as it expects there is no rx when no interface is
* running.
*/
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
/* FIXME: why don't we print error if wmi call fails? */
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
- ath10k_warn("failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
+ ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
peer_arg.peer_reassoc = reassoc;
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
- ath10k_warn("failed to run peer assoc for STA %pM vdev %i: %d\n",
+ ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
if (ret) {
- ath10k_warn("failed to setup peer SMPS for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
arvif->num_legacy_stations++;
ret = ath10k_recalc_rtscts_prot(arvif);
if (ret) {
- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("failed to install peer wep keys for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
- ath10k_warn("failed to set qos params for STA %pM for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
arvif->num_legacy_stations--;
ret = ath10k_recalc_rtscts_prot(arvif);
if (ret) {
- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
- ath10k_warn("failed to clear all peer wep keys for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
continue;
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
ch - arg.channels, arg.n_channels,
ch->freq, ch->max_power, ch->max_reg_power,
ret = ath10k_update_channel_list(ar);
if (ret)
- ath10k_warn("failed to update channel list: %d\n", ret);
+ ath10k_warn(ar, "failed to update channel list: %d\n", ret);
regpair = ar->ath_common.regulatory.regpair;
regpair->reg_5ghz_ctl,
wmi_dfs_reg);
if (ret)
- ath10k_warn("failed to set pdev regdomain: %d\n", ret);
+ ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
}
static void ath10k_reg_notifier(struct wiphy *wiphy,
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
request->dfs_region);
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
request->dfs_region);
if (!result)
- ath10k_warn("DFS region 0x%X not supported, will trigger radar for every pulse\n",
+ ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
request->dfs_region);
}
if (ar->monitor_started)
return ar->monitor_vdev_id;
- ath10k_warn("failed to resolve vdev id\n");
+ ath10k_warn(ar, "failed to resolve vdev id\n");
return 0;
}
{
struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
wep_key_work);
+ struct ath10k *ar = arvif->ar;
int ret, keyidx = arvif->def_wep_key_newidx;
mutex_lock(&arvif->ar->conf_mutex);
if (arvif->def_wep_key_idx == keyidx)
goto unlock;
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
arvif->vdev_id, keyidx);
ret = ath10k_wmi_vdev_set_param(arvif->ar,
arvif->ar->wmi.vdev_param->def_keyid,
keyidx);
if (ret) {
- ath10k_warn("failed to update wep key index for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
arvif->vdev_id,
ret);
goto unlock;
ar->fw_features)) {
if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
ATH10K_MAX_NUM_MGMT_PENDING) {
- ath10k_warn("reached WMI management transmit queue limit\n");
+ ath10k_warn(ar, "reached WMI management transmit queue limit\n");
ret = -EBUSY;
goto exit;
}
exit:
if (ret) {
- ath10k_warn("failed to transmit packet, dropping: %d\n", ret);
+ ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
+ ret);
ieee80211_free_txskb(ar->hw, skb);
}
}
mutex_lock(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC, "mac offchannel skb %p\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n",
skb);
hdr = (struct ieee80211_hdr *)skb->data;
if (peer)
/* FIXME: should this use ath10k_warn()? */
- ath10k_dbg(ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
peer_addr, vdev_id);
if (!peer) {
ret = ath10k_peer_create(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("failed to create peer %pM on vdev %d: %d\n",
+ ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
ret = wait_for_completion_timeout(&ar->offchan_tx_completed,
3 * HZ);
if (ret <= 0)
- ath10k_warn("timed out waiting for offchannel skb %p\n",
+ ath10k_warn(ar, "timed out waiting for offchannel skb %p\n",
skb);
if (!peer) {
ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
if (ret)
- ath10k_warn("failed to delete peer %pM on vdev %d: %d\n",
+ ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
peer_addr, vdev_id, ret);
}
ret = ath10k_wmi_mgmt_tx(ar, skb);
if (ret) {
- ath10k_warn("failed to transmit management frame via WMI: %d\n",
+ ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
ret);
ieee80211_free_txskb(ar->hw, skb);
}
ret = ath10k_wmi_stop_scan(ar, &arg);
if (ret) {
- ath10k_warn("failed to stop wmi scan: %d\n", ret);
+ ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
goto out;
}
ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
if (ret == 0) {
- ath10k_warn("failed to receive scan abortion completion: timed out\n");
+ ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
ret = -ETIMEDOUT;
} else if (ret > 0) {
ret = 0;
break;
case ATH10K_SCAN_STARTING:
case ATH10K_SCAN_ABORTING:
- ath10k_warn("refusing scan abortion due to invalid scan state: %s (%d)\n",
+ ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
ath10k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
ret = ath10k_scan_stop(ar);
if (ret)
- ath10k_warn("failed to abort scan: %d\n", ret);
+ ath10k_warn(ar, "failed to abort scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
break;
if (ret == 0) {
ret = ath10k_scan_stop(ar);
if (ret)
- ath10k_warn("failed to stop scan: %d\n", ret);
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
return -ETIMEDOUT;
}
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
- ath10k_dbg(ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr);
ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
spin_unlock_bh(&ar->data_lock);
- ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ skb);
skb_queue_tail(&ar->offchan_tx_queue, skb);
ieee80211_queue_work(hw, &ar->offchan_tx_work);
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
tx_ant);
if (ret) {
- ath10k_warn("failed to set tx-chainmask: %d, req 0x%x\n",
+ ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
ret, tx_ant);
return ret;
}
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
rx_ant);
if (ret) {
- ath10k_warn("failed to set rx-chainmask: %d, req 0x%x\n",
+ ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
ret, rx_ant);
return ret;
}
ret = ath10k_hif_power_up(ar);
if (ret) {
- ath10k_err("Could not init hif: %d\n", ret);
+ ath10k_err(ar, "Could not init hif: %d\n", ret);
goto err_off;
}
ret = ath10k_core_start(ar);
if (ret) {
- ath10k_err("Could not init core: %d\n", ret);
+ ath10k_err(ar, "Could not init core: %d\n", ret);
goto err_power_down;
}
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
if (ret) {
- ath10k_warn("failed to enable PMF QOS: %d\n", ret);
+ ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
goto err_core_stop;
}
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1);
if (ret) {
- ath10k_warn("failed to enable dynamic BW: %d\n", ret);
+ ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
goto err_core_stop;
}
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->arp_ac_override, 0);
if (ret) {
- ath10k_warn("failed to set arp ac override parameter: %d\n",
+ ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
ret);
goto err_core_stop;
}
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_mac_vif_setup_ps(arvif);
if (ret) {
- ath10k_warn("failed to setup powersave: %d\n", ret);
+ ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
break;
}
}
lockdep_assert_held(&ar->conf_mutex);
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac config channel to %dMHz (cf1 %dMHz cf2 %dMHz width %s)\n",
ar->chandef.chan->center_freq,
ar->chandef.center_freq1,
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret) {
- ath10k_warn("failed to down vdev %d: %d\n",
+ ath10k_warn(ar, "failed to down vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath10k_vdev_restart(arvif);
if (ret) {
- ath10k_warn("failed to restart vdev %d: %d\n",
+ ath10k_warn(ar, "failed to restart vdev %d: %d\n",
arvif->vdev_id, ret);
continue;
}
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
- ath10k_warn("failed to bring vdev up %d: %d\n",
+ ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac config channel %dMHz flags 0x%x radar %d\n",
conf->chandef.chan->center_freq,
conf->chandef.chan->flags,
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac config power %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n",
hw->conf.power_level);
param = ar->wmi.pdev_param->txpower_limit2g;
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("failed to set 2g txpower %d: %d\n",
+ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
hw->conf.power_level, ret);
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
- ath10k_warn("failed to set 5g txpower %d: %d\n",
+ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
hw->conf.power_level, ret);
}
ar->monitor = true;
ret = ath10k_monitor_start(ar);
if (ret) {
- ath10k_warn("failed to start monitor (config): %d\n",
+ ath10k_warn(ar, "failed to start monitor (config): %d\n",
ret);
ar->monitor = false;
}
INIT_LIST_HEAD(&arvif->list);
if (ar->free_vdev_map == 0) {
- ath10k_warn("Free vdev map is empty, no more interfaces allowed.\n");
+ ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
ret = -EBUSY;
goto err;
}
break;
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
if (ret) {
- ath10k_warn("failed to create WMI vdev %i: %d\n",
+ ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
arvif->vdev_id, ret);
goto err;
}
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
- ath10k_warn("failed to set vdev %i default key id: %d\n",
+ ath10k_warn(ar, "failed to set vdev %i default key id: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
ATH10K_HW_TXRX_NATIVE_WIFI);
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
- ath10k_warn("failed to set vdev %i TX encapsulation: %d\n",
+ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
- ath10k_warn("failed to create vdev %i peer for AP: %d\n",
+ ath10k_warn(ar, "failed to create vdev %i peer for AP: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
ret = ath10k_mac_set_kickout(arvif);
if (ret) {
- ath10k_warn("failed to set vdev %i kickout parameters: %d\n",
+ ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("failed to set vdev %i RX wake policy: %d\n",
+ ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("failed to set vdev %i TX wake thresh: %d\n",
+ ath10k_warn(ar, "failed to set vdev %i TX wake thresh: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret) {
- ath10k_warn("failed to set vdev %i PSPOLL count: %d\n",
+ ath10k_warn(ar, "failed to set vdev %i PSPOLL count: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret) {
- ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
if (ret) {
- ath10k_warn("failed to set frag threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_spectral_vif_stop(arvif);
if (ret)
- ath10k_warn("failed to stop spectral for vdev %i: %d\n",
+ ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
arvif->vdev_id, ret);
ar->free_vdev_map |= 1 << arvif->vdev_id;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
if (ret)
- ath10k_warn("failed to remove peer for AP vdev %i: %d\n",
+ ath10k_warn(ar, "failed to remove peer for AP vdev %i: %d\n",
arvif->vdev_id, ret);
kfree(arvif->u.ap.noa_data);
}
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
arvif->vdev_id);
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
if (ret)
- ath10k_warn("failed to delete WMI vdev %i: %d\n",
+ ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
ath10k_peer_cleanup(ar, arvif->vdev_id);
ar->promisc = true;
ret = ath10k_monitor_start(ar);
if (ret) {
- ath10k_warn("failed to start monitor (promisc): %d\n",
+ ath10k_warn(ar, "failed to start monitor (promisc): %d\n",
ret);
ar->promisc = false;
}
vdev_param = ar->wmi.vdev_param->beacon_interval;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->beacon_interval);
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d beacon_interval %d\n",
arvif->vdev_id, arvif->beacon_interval);
if (ret)
- ath10k_warn("failed to set beacon interval for vdev %d: %i\n",
+ ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON) {
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"vdev %d set beacon tx mode to staggered\n",
arvif->vdev_id);
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
- ath10k_warn("failed to set beacon mode for vdev %d: %i\n",
+ ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BEACON_INFO) {
arvif->dtim_period = info->dtim_period;
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d dtim_period %d\n",
arvif->vdev_id, arvif->dtim_period);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
- ath10k_warn("failed to set dtim period for vdev %d: %i\n",
+ ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
arvif->vdev_id, ret);
}
if (changed & BSS_CHANGED_BSSID &&
vif->type != NL80211_IFTYPE_AP) {
if (!is_zero_ether_addr(info->bssid)) {
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d create peer %pM\n",
arvif->vdev_id, info->bssid);
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
- ath10k_warn("failed to add peer %pM for vdev %d when changing bssid: %i\n",
+ ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
*/
memcpy(arvif->bssid, info->bssid, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d start %pM\n",
arvif->vdev_id, info->bssid);
ret = ath10k_vdev_start(arvif);
if (ret) {
- ath10k_warn("failed to start vdev %i: %d\n",
+ ath10k_warn(ar, "failed to start vdev %i: %d\n",
arvif->vdev_id, ret);
goto exit;
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
arvif->use_cts_prot = info->use_cts_prot;
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
arvif->vdev_id, info->use_cts_prot);
ret = ath10k_recalc_rtscts_prot(arvif);
if (ret)
- ath10k_warn("failed to recalculate rts/cts prot for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
}
else
slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
arvif->vdev_id, slottime);
vdev_param = ar->wmi.vdev_param->slot_time;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
- ath10k_warn("failed to set erp slot for vdev %d: %i\n",
+ ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
arvif->vdev_id, ret);
}
else
preamble = WMI_VDEV_PREAMBLE_LONG;
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d preamble %dn",
arvif->vdev_id, preamble);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
- ath10k_warn("failed to set preamble for vdev %d: %i\n",
+ ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
arvif->vdev_id, ret);
}
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("failed to start hw scan: %d\n", ret);
+ ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH10K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
key->keyidx);
if (ret)
- ath10k_warn("failed to set vdev %i group key as default key: %d\n",
+ ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
arvif->vdev_id, ret);
}
if (!peer) {
if (cmd == SET_KEY) {
- ath10k_warn("failed to install key for non-existent peer %pM\n",
+ ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
peer_addr);
ret = -EOPNOTSUPP;
goto exit;
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
if (ret) {
- ath10k_warn("failed to install key for vdev %i peer %pM: %d\n",
+ ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
arvif->vdev_id, peer_addr, ret);
goto exit;
}
peer->keys[key->keyidx] = NULL;
else if (peer == NULL)
/* impossible unless FW goes crazy */
- ath10k_warn("Peer %pM disappeared!\n", peer_addr);
+ ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
spin_unlock_bh(&ar->data_lock);
exit:
mutex_lock(&ar->conf_mutex);
if (changed & IEEE80211_RC_BW_CHANGED) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
sta->addr, bw);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_CHAN_WIDTH, bw);
if (err)
- ath10k_warn("failed to update STA %pM peer bw %d: %d\n",
+ ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
sta->addr, bw, err);
}
if (changed & IEEE80211_RC_NSS_CHANGED) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
sta->addr, nss);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_NSS, nss);
if (err)
- ath10k_warn("failed to update STA %pM nss %d: %d\n",
+ ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
sta->addr, nss, err);
}
if (changed & IEEE80211_RC_SMPS_CHANGED) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
sta->addr, smps);
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_SMPS_STATE, smps);
if (err)
- ath10k_warn("failed to update STA %pM smps %d: %d\n",
+ ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
sta->addr, smps, err);
}
if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif, sta, true);
if (err)
- ath10k_warn("failed to reassociate station: %pM\n",
+ ath10k_warn(ar, "failed to reassociate station: %pM\n",
sta->addr);
}
max_num_peers = TARGET_NUM_PEERS;
if (ar->num_peers >= max_num_peers) {
- ath10k_warn("number of peers exceeded: peers number %d (max peers %d)\n",
+ ath10k_warn(ar, "number of peers exceeded: peers number %d (max peers %d)\n",
ar->num_peers, max_num_peers);
ret = -ENOBUFS;
goto exit;
}
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer create %pM (new sta) num_peers %d\n",
arvif->vdev_id, sta->addr, ar->num_peers);
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
* Existing station deletion.
*/
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
- ath10k_warn("failed to delete peer %pM for vdev %d: %i\n",
+ ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
/*
* New association.
*/
- ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
ret = ath10k_station_assoc(ar, arvif, sta, false);
if (ret)
- ath10k_warn("failed to associate station %pM for vdev %i: %i\n",
+ ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
/*
* Disassociation.
*/
- ath10k_dbg(ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
sta->addr);
ret = ath10k_station_disassoc(ar, arvif, sta);
if (ret)
- ath10k_warn("failed to disassociate station: %pM vdev %i: %i\n",
+ ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
}
exit:
WMI_STA_PS_PARAM_UAPSD,
arvif->u.sta.uapsd);
if (ret) {
- ath10k_warn("failed to set uapsd params: %d\n", ret);
+ ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
goto exit;
}
WMI_STA_PS_PARAM_RX_WAKE_POLICY,
value);
if (ret)
- ath10k_warn("failed to set rx wake param: %d\n", ret);
+ ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
exit:
return ret;
/* FIXME: FW accepts wmm params per hw, not per vif */
ret = ath10k_wmi_pdev_set_wmm_params(ar, &ar->wmm_params);
if (ret) {
- ath10k_warn("failed to set wmm params: %d\n", ret);
+ ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
goto exit;
}
ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
if (ret)
- ath10k_warn("failed to set sta uapsd: %d\n", ret);
+ ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
exit:
mutex_unlock(&ar->conf_mutex);
ret = ath10k_start_scan(ar, &arg);
if (ret) {
- ath10k_warn("failed to start roc scan: %d\n", ret);
+ ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
ar->scan.state = ATH10K_SCAN_IDLE;
spin_unlock_bh(&ar->data_lock);
ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
if (ret == 0) {
- ath10k_warn("failed to switch to channel for roc scan\n");
+ ath10k_warn(ar, "failed to switch to channel for roc scan\n");
ret = ath10k_scan_stop(ar);
if (ret)
- ath10k_warn("failed to stop scan: %d\n", ret);
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
ret = -ETIMEDOUT;
goto exit;
mutex_lock(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
arvif->vdev_id, value);
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("failed to set rts threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
mutex_lock(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
- ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
arvif->vdev_id, value);
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
- ath10k_warn("failed to set fragmentation threshold for vdev %d: %d\n",
+ ath10k_warn(ar, "failed to set fragmentation threshold for vdev %d: %d\n",
arvif->vdev_id, ret);
break;
}
}), ATH10K_FLUSH_TIMEOUT_HZ);
if (ret <= 0 || skip)
- ath10k_warn("failed to flush transmit queue (skip %i ar-state %i): %i\n",
+ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
skip, ar->state, ret);
skip:
ret = ath10k_hif_suspend(ar);
if (ret) {
- ath10k_warn("failed to suspend hif: %d\n", ret);
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
goto resume;
}
resume:
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret)
- ath10k_warn("failed to resume target: %d\n", ret);
+ ath10k_warn(ar, "failed to resume target: %d\n", ret);
ret = 1;
exit:
ret = ath10k_hif_resume(ar);
if (ret) {
- ath10k_warn("failed to resume hif: %d\n", ret);
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
ret = 1;
goto exit;
}
ret = ath10k_wmi_pdev_resume_target(ar);
if (ret) {
- ath10k_warn("failed to resume target: %d\n", ret);
+ ath10k_warn(ar, "failed to resume target: %d\n", ret);
ret = 1;
goto exit;
}
/* If device failed to restart it will be in a different state, e.g.
* ATH10K_STATE_WEDGED */
if (ar->state == ATH10K_STATE_RESTARTED) {
- ath10k_info("device successfully recovered\n");
+ ath10k_info(ar, "device successfully recovered\n");
ar->state = ATH10K_STATE_ON;
}
}
static bool
-ath10k_bitrate_mask_rate(const struct cfg80211_bitrate_mask *mask,
+ath10k_bitrate_mask_rate(struct ath10k *ar,
+ const struct cfg80211_bitrate_mask *mask,
enum ieee80211_band band,
u8 *fixed_rate,
u8 *fixed_nss)
nss <<= 4;
pream <<= 6;
- ath10k_dbg(ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac fixed rate pream 0x%02x nss 0x%02x rate 0x%02x\n",
pream, nss, rate);
*fixed_rate = pream | nss | rate;
return true;
}
-static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
+static bool ath10k_get_fixed_rate_nss(struct ath10k *ar,
+ const struct cfg80211_bitrate_mask *mask,
enum ieee80211_band band,
u8 *fixed_rate,
u8 *fixed_nss)
return true;
/* Next Check single rate is set */
- return ath10k_bitrate_mask_rate(mask, band, fixed_rate, fixed_nss);
+ return ath10k_bitrate_mask_rate(ar, mask, band, fixed_rate, fixed_nss);
}
static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
goto exit;
if (fixed_rate == WMI_FIXED_RATE_NONE)
- ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
if (force_sgi)
- ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac force sgi\n");
vdev_param = ar->wmi.vdev_param->fixed_rate;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
vdev_param, fixed_rate);
if (ret) {
- ath10k_warn("failed to set fixed rate param 0x%02x: %d\n",
+ ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
fixed_rate, ret);
ret = -EINVAL;
goto exit;
vdev_param, fixed_nss);
if (ret) {
- ath10k_warn("failed to set fixed nss param %d: %d\n",
+ ath10k_warn(ar, "failed to set fixed nss param %d: %d\n",
fixed_nss, ret);
ret = -EINVAL;
goto exit;
force_sgi);
if (ret) {
- ath10k_warn("failed to set sgi param %d: %d\n",
+ ath10k_warn(ar, "failed to set sgi param %d: %d\n",
force_sgi, ret);
ret = -EINVAL;
goto exit;
return -EINVAL;
if (!ath10k_default_bitrate_mask(ar, band, mask)) {
- if (!ath10k_get_fixed_rate_nss(mask, band,
+ if (!ath10k_get_fixed_rate_nss(ar, mask, band,
&fixed_rate,
&fixed_nss))
return -EINVAL;
}
if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
- ath10k_warn("failed to force SGI usage for default rate settings\n");
+ ath10k_warn(ar, "failed to force SGI usage for default rate settings\n");
return -EINVAL;
}
spin_lock_bh(&ar->data_lock);
- ath10k_dbg(ATH10K_DBG_MAC,
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
sta->addr, changed, sta->bandwidth, sta->rx_nss,
sta->smps_mode);
bw = WMI_PEER_CHWIDTH_80MHZ;
break;
case IEEE80211_STA_RX_BW_160:
- ath10k_warn("Invalid bandwith %d in rc update for %pM\n",
+ ath10k_warn(ar, "Invalid bandwith %d in rc update for %pM\n",
sta->bandwidth, sta->addr);
bw = WMI_PEER_CHWIDTH_20MHZ;
break;
smps = WMI_PEER_SMPS_DYNAMIC;
break;
case IEEE80211_SMPS_NUM_MODES:
- ath10k_warn("Invalid smps %d in sta rc update for %pM\n",
+ ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
sta->smps_mode, sta->addr);
smps = WMI_PEER_SMPS_PS_NONE;
break;
struct ieee80211_sta *sta, u16 tid, u16 *ssn,
u8 buf_size)
{
+ struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
- ath10k_dbg(ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
arvif->vdev_id, sta->addr, tid, action);
switch (action) {
ath10k_get_arvif_iter,
&arvif_iter);
if (!arvif_iter.arvif) {
- ath10k_warn("No VIF found for vdev %d\n", vdev_id);
+ ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
return NULL;
}
NL80211_DFS_UNSET);
if (!ar->dfs_detector)
- ath10k_warn("failed to initialise DFS pattern detector\n");
+ ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
}
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
- ath10k_err("failed to initialise regulatory: %i\n", ret);
+ ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
goto err_free;
}
ret = ieee80211_register_hw(ar->hw);
if (ret) {
- ath10k_err("failed to register ieee80211: %d\n", ret);
+ ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
goto err_free;
}
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ar->dev, paddr))) {
- ath10k_warn("failed to dma map pci rx buf\n");
+ ath10k_warn(ar, "failed to dma map pci rx buf\n");
dev_kfree_skb_any(skb);
return -EIO;
}
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
if (ret) {
- ath10k_warn("failed to post pci rx buf: %d\n", ret);
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
while (num--) {
ret = __ath10k_pci_rx_post_buf(pipe);
if (ret) {
- ath10k_warn("failed to post pci rx buf: %d\n", ret);
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
mod_timer(&ar_pci->rx_post_retry, jiffies +
ATH10K_PCI_RX_POST_RETRY_MS);
break;
__le32_to_cpu(((__le32 *)data_buf)[i]);
}
} else
- ath10k_warn("failed to read diag value at 0x%x: %d\n",
+ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
address, ret);
if (data_buf)
ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
if (ret != 0) {
- ath10k_warn("failed to get memcpy hi address for firmware address %d: %d\n",
+ ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
src, ret);
return ret;
}
ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
if (ret != 0) {
- ath10k_warn("failed to memcpy firmware memory from %d (%d B): %d\n",
+ ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
addr, len, ret);
return ret;
}
}
if (ret != 0)
- ath10k_warn("failed to write diag value at 0x%x: %d\n",
+ ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
return ret;
max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) {
- ath10k_warn("rxed more than expected (nbytes %d, max %d)",
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
nbytes, max_nbytes);
dev_kfree_skb_any(skb);
continue;
}
for (i = 0; i < n_items - 1; i++) {
- ath10k_dbg(ATH10K_DBG_PCI,
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
i, items[i].paddr, items[i].len, n_items);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
err = ath10k_ce_send_nolock(ce_pipe,
/* `i` is equal to `n_items -1` after for() */
- ath10k_dbg(ATH10K_DBG_PCI,
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
i, items[i].paddr, items[i].len, n_items);
- ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
err = ath10k_ce_send_nolock(ce_pipe,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
hi_failure_state,
REG_DUMP_COUNT_QCA988X * sizeof(u32));
if (ret) {
- ath10k_err("failed to read firmware dump area: %d\n", ret);
+ ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
return;
}
BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
- ath10k_err("firmware register dump:\n");
+ ath10k_err(ar, "firmware register dump:\n");
for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
- ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
i,
reg_dump_values[i],
reg_dump_values[i + 1],
else
scnprintf(uuid, sizeof(uuid), "n/a");
- ath10k_err("firmware crashed! (uuid %s)\n", uuid);
+ ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
ath10k_print_driver_info(ar);
if (!crash_data)
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
if (!force) {
int resources;
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
memcpy(&ar_pci->msg_callbacks_current, callbacks,
sizeof(ar_pci->msg_callbacks_current));
{
int ret = 0;
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
/* polling for received messages not supported */
*dl_is_polled = 0;
{
int ul_is_polled, dl_is_polled;
- ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
(void)ath10k_pci_hif_map_service_to_pipe(ar,
ATH10K_HTC_SVC_ID_RSVD_CTRL,
static int ath10k_pci_hif_start(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
ath10k_pci_irq_enable(ar);
ath10k_pci_rx_post(ar);
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
ath10k_pci_irq_disable(ar);
ath10k_pci_flush(ar);
static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
+ struct ath10k *ar = ce_state->ar;
struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
return;
if (!xfer->wait_for_resp) {
- ath10k_warn("unexpected: BMI data received; ignoring\n");
+ ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
return;
}
CORE_CTRL_ADDRESS,
&core_ctrl);
if (ret) {
- ath10k_warn("failed to read core_ctrl: %d\n", ret);
+ ath10k_warn(ar, "failed to read core_ctrl: %d\n", ret);
return ret;
}
CORE_CTRL_ADDRESS,
core_ctrl);
if (ret) {
- ath10k_warn("failed to set target CPU interrupt mask: %d\n",
+ ath10k_warn(ar, "failed to set target CPU interrupt mask: %d\n",
ret);
return ret;
}
ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
&pcie_state_targ_addr);
if (ret != 0) {
- ath10k_err("Failed to get pcie state addr: %d\n", ret);
+ ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
return ret;
}
if (pcie_state_targ_addr == 0) {
ret = -EIO;
- ath10k_err("Invalid pcie state addr\n");
+ ath10k_err(ar, "Invalid pcie state addr\n");
return ret;
}
pipe_cfg_addr),
&pipe_cfg_targ_addr);
if (ret != 0) {
- ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
+ ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
return ret;
}
if (pipe_cfg_targ_addr == 0) {
ret = -EIO;
- ath10k_err("Invalid pipe cfg addr\n");
+ ath10k_err(ar, "Invalid pipe cfg addr\n");
return ret;
}
sizeof(target_ce_config_wlan));
if (ret != 0) {
- ath10k_err("Failed to write pipe cfg: %d\n", ret);
+ ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
return ret;
}
svc_to_pipe_map),
&svc_to_pipe_map);
if (ret != 0) {
- ath10k_err("Failed to get svc/pipe map: %d\n", ret);
+ ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
return ret;
}
if (svc_to_pipe_map == 0) {
ret = -EIO;
- ath10k_err("Invalid svc_to_pipe map\n");
+ ath10k_err(ar, "Invalid svc_to_pipe map\n");
return ret;
}
target_service_to_ce_map_wlan,
sizeof(target_service_to_ce_map_wlan));
if (ret != 0) {
- ath10k_err("Failed to write svc/pipe map: %d\n", ret);
+ ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
return ret;
}
config_flags),
&pcie_config_flags);
if (ret != 0) {
- ath10k_err("Failed to get pcie config_flags: %d\n", ret);
+ ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
return ret;
}
&pcie_config_flags,
sizeof(pcie_config_flags));
if (ret != 0) {
- ath10k_err("Failed to write pcie config_flags: %d\n", ret);
+ ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
return ret;
}
ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
if (ret != 0) {
- ath10k_err("Faile to get early alloc val: %d\n", ret);
+ ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
return ret;
}
ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
if (ret != 0) {
- ath10k_err("Failed to set early alloc val: %d\n", ret);
+ ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
return ret;
}
ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
if (ret != 0) {
- ath10k_err("Failed to get option val: %d\n", ret);
+ ath10k_err(ar, "Failed to get option val: %d\n", ret);
return ret;
}
ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
if (ret != 0) {
- ath10k_err("Failed to set option val: %d\n", ret);
+ ath10k_err(ar, "Failed to set option val: %d\n", ret);
return ret;
}
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
- ath10k_err("failed to allocate copy engine pipe %d: %d\n",
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
ath10k_pci_ce_send_done,
ath10k_pci_ce_recv_data);
if (ret) {
- ath10k_err("failed to initialize copy engine pipe %d: %d\n",
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
pipe_num, ret);
return ret;
}
{
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
+ val);
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CPU_INTR_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
val);
/* disable pending irqs */
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
+ val);
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CPU_INTR_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
val);
/* CPU warm reset */
val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
+ val);
msleep(100);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
return 0;
}
ret = ath10k_pci_warm_reset(ar);
if (ret) {
- ath10k_err("failed to reset target: %d\n", ret);
+ ath10k_err(ar, "failed to reset target: %d\n", ret);
goto err;
}
ret = ath10k_pci_ce_init(ar);
if (ret) {
- ath10k_err("failed to initialize CE: %d\n", ret);
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
goto err;
}
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
- ath10k_err("failed to wait for target to init: %d\n", ret);
+ ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
goto err_ce;
}
ret = ath10k_pci_init_config(ar);
if (ret) {
- ath10k_err("failed to setup init config: %d\n", ret);
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
goto err_ce;
}
ret = ath10k_pci_wake_target_cpu(ar);
if (ret) {
- ath10k_err("could not wake up target CPU: %d\n", ret);
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce;
}
if (ret == 0)
break;
- ath10k_warn("failed to warm reset (attempt %d out of %d): %d\n",
+ ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
}
{
int ret;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
/*
* Hardware CUS232 version 2 has some issues with cold reset and the
*/
ret = ath10k_pci_hif_power_up_warm(ar);
if (ret) {
- ath10k_warn("failed to power up target using warm reset: %d\n",
+ ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
ret);
if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
return ret;
- ath10k_warn("trying cold reset\n");
+ ath10k_warn(ar, "trying cold reset\n");
ret = __ath10k_pci_hif_power_up(ar, true);
if (ret) {
- ath10k_err("failed to power up target using cold reset too (%d)\n",
+ ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
ret);
return ret;
}
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
- ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
ath10k_pci_warm_reset(ar);
}
struct ath10k *ar = (struct ath10k *)data;
if (!ath10k_pci_has_fw_crashed(ar)) {
- ath10k_warn("received unsolicited fw crash interrupt\n");
+ ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
return;
}
int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
- ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
+ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+ ce_id);
return IRQ_HANDLED;
}
ath10k_pci_msi_fw_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
+ ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
return ret;
}
ath10k_pci_per_engine_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
+ ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
ar_pci->pdev->irq + i, ret);
for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request MSI irq %d: %d\n",
+ ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
ar_pci->pdev->irq, ret);
return ret;
}
ath10k_pci_interrupt_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret) {
- ath10k_warn("failed to request legacy irq %d: %d\n",
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
ar_pci->pdev->irq, ret);
return ret;
}
return ath10k_pci_request_irq_msix(ar);
}
- ath10k_warn("unknown irq configuration upon request\n");
+ ath10k_warn(ar, "unknown irq configuration upon request\n");
return -EINVAL;
}
ath10k_pci_init_irq_tasklets(ar);
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
- ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
+ ath10k_info(ar, "limiting irq mode to: %d\n",
+ ath10k_pci_irq_mode);
/* Try MSI-X */
if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
pci_disable_msi(ar_pci->pdev);
}
- ath10k_warn("unknown irq configuration upon deinit\n");
+ ath10k_warn(ar, "unknown irq configuration upon deinit\n");
return -EINVAL;
}
unsigned long timeout;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
do {
val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
+ val);
/* target should never return this */
if (val == 0xffffffff)
} while (time_before(jiffies, timeout));
if (val == 0xffffffff) {
- ath10k_err("failed to read device register, device is gone\n");
+ ath10k_err(ar, "failed to read device register, device is gone\n");
return -EIO;
}
if (val & FW_IND_EVENT_PENDING) {
- ath10k_warn("device has crashed during init\n");
+ ath10k_warn(ar, "device has crashed during init\n");
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
return -ECOMM;
}
if (!(val & FW_IND_INITIALIZED)) {
- ath10k_err("failed to receive initialized event from target: %08x\n",
+ ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
val);
return -ETIMEDOUT;
}
- ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
return 0;
}
int i;
u32 val;
- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
/* Put Target, including PCIe, into RESET. */
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
msleep(1);
}
- ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
return 0;
}
ret = pci_enable_device(pdev);
if (ret) {
- ath10k_err("failed to enable pci device: %d\n", ret);
+ ath10k_err(ar, "failed to enable pci device: %d\n", ret);
return ret;
}
ret = pci_request_region(pdev, BAR_NUM, "ath");
if (ret) {
- ath10k_err("failed to request region BAR%d: %d\n", BAR_NUM,
+ ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
ret);
goto err_device;
}
/* Target expects 32 bit DMA. Enforce it. */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("failed to set dma mask to 32-bit: %d\n", ret);
+ ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
goto err_region;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- ath10k_err("failed to set consistent dma mask to 32-bit: %d\n",
+ ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
ret);
goto err_region;
}
/* Arrange for access to Target SoC registers. */
ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
if (!ar_pci->mem) {
- ath10k_err("failed to iomap BAR%d\n", BAR_NUM);
+ ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
ret = -EIO;
goto err_master;
}
- ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
return 0;
err_master:
struct ath10k_pci *ar_pci;
u32 chip_id;
- ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
-
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
&ath10k_pci_hif_ops);
if (!ar) {
- ath10k_err("failed to allocate core\n");
+ dev_err(&pdev->dev, "failed to allocate core\n");
return -ENOMEM;
}
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
+
ar_pci = ath10k_pci_priv(ar);
ar_pci->pdev = pdev;
ar_pci->dev = &pdev->dev;
ret = ath10k_pci_claim(ar);
if (ret) {
- ath10k_err("failed to claim device: %d\n", ret);
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
goto err_core_destroy;
}
ret = ath10k_pci_wake(ar);
if (ret) {
- ath10k_err("failed to wake up: %d\n", ret);
+ ath10k_err(ar, "failed to wake up: %d\n", ret);
goto err_release;
}
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
- ath10k_err("failed to get chip id\n");
+ ath10k_err(ar, "failed to get chip id\n");
goto err_sleep;
}
ret = ath10k_pci_alloc_ce(ar);
if (ret) {
- ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
goto err_sleep;
}
ret = ath10k_ce_disable_interrupts(ar);
if (ret) {
- ath10k_err("failed to disable copy engine interrupts: %d\n",
+ ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
ret);
goto err_free_ce;
}
ret = ath10k_pci_init_irq(ar);
if (ret) {
- ath10k_err("failed to init irqs: %d\n", ret);
+ ath10k_err(ar, "failed to init irqs: %d\n", ret);
goto err_free_ce;
}
- ath10k_info("pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
+ ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
ret = ath10k_pci_request_irq(ar);
if (ret) {
- ath10k_warn("failed to request irqs: %d\n", ret);
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
goto err_deinit_irq;
}
ret = ath10k_core_register(ar, chip_id);
if (ret) {
- ath10k_err("failed to register driver core: %d\n", ret);
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_free_irq;
}
struct ath10k *ar = pci_get_drvdata(pdev);
struct ath10k_pci *ar_pci;
- ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
if (!ar)
return;
ret = pci_register_driver(&ath10k_pci_driver);
if (ret)
- ath10k_err("failed to register PCI driver: %d\n", ret);
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+ ret);
return ret;
}
WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
WMI_SPECTRAL_ENABLE_CMD_DISABLE);
if (res < 0) {
- ath10k_warn("failed to enable spectral scan: %d\n", res);
+ ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
return res;
}
res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
if (res < 0) {
- ath10k_warn("failed to configure spectral scan: %d\n", res);
+ ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
return res;
}
res = ath10k_spectral_scan_config(ar,
ar->spectral.mode);
if (res < 0) {
- ath10k_warn("failed to reconfigure spectral scan: %d\n",
+ ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
res);
}
res = ath10k_spectral_scan_trigger(ar);
if (res < 0) {
- ath10k_warn("failed to trigger spectral scan: %d\n",
+ ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
res);
}
} else {
* offchan_tx_skb. */
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
- ath10k_warn("completed old offchannel frame\n");
+ ath10k_warn(ar, "completed old offchannel frame\n");
goto out;
}
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
- ath10k_dbg(ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
- struct device *dev = htt->ar->dev;
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
struct ath10k_skb_cb *skb_cb;
struct sk_buff *msdu;
lockdep_assert_held(&htt->tx_lock);
- ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
- ath10k_warn("warning: msdu_id %d too big, ignoring\n",
+ ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return;
}
wake_up(&ar->peer_mapping_wq);
}
- ath10k_dbg(ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
set_bit(ev->peer_id, peer->peer_ids);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
- ath10k_warn("peer-unmap-event: unknown peer id %d\n",
+ ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
ev->peer_id);
goto exit;
}
- ath10k_dbg(ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
clear_bit(ev->peer_id, peer->peer_ids);
return ret;
}
-static struct sk_buff *ath10k_wmi_alloc_skb(u32 len)
+static struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
{
struct sk_buff *skb;
u32 round_len = roundup(len, 4);
- skb = ath10k_htc_alloc_skb(WMI_SKB_HEADROOM + round_len);
+ skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
if (!skb)
return NULL;
skb_reserve(skb, WMI_SKB_HEADROOM);
if (!IS_ALIGNED((unsigned long)skb->data, 4))
- ath10k_warn("Unaligned WMI skb\n");
+ ath10k_warn(ar, "Unaligned WMI skb\n");
skb_put(skb, round_len);
memset(skb->data, 0, round_len);
might_sleep();
if (cmd_id == WMI_CMD_UNSUPPORTED) {
- ath10k_warn("wmi command %d is not supported by firmware\n",
+ ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
cmd_id);
return ret;
}
len = round_up(len, 4);
- wmi_skb = ath10k_wmi_alloc_skb(len);
+ wmi_skb = ath10k_wmi_alloc_skb(ar, len);
if (!wmi_skb)
return -ENOMEM;
memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(cmd->buf, skb->data, skb->len);
- ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
case ATH10K_SCAN_IDLE:
case ATH10K_SCAN_RUNNING:
case ATH10K_SCAN_ABORTING:
- ath10k_warn("received scan started event in an invalid scan state: %s (%d)\n",
+ ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
ath10k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
* is) ignored by the host as it may be just firmware's scan
* state machine recovering.
*/
- ath10k_warn("received scan completed event in an invalid scan state: %s (%d)\n",
+ ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
ath10k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
case ATH10K_SCAN_STARTING:
- ath10k_warn("received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
ath10k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
case ATH10K_SCAN_STARTING:
- ath10k_warn("received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
ath10k_scan_state_str(ar->scan.state),
ar->scan.state);
break;
spin_lock_bh(&ar->data_lock);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
ath10k_wmi_event_scan_type_str(event_type, reason),
event_type, reason, freq, req_id, scan_id, vdev_id,
ath10k_wmi_event_scan_foreign_chan(ar, freq);
break;
case WMI_SCAN_EVENT_START_FAILED:
- ath10k_warn("received scan start failure event\n");
+ ath10k_warn(ar, "received scan start failure event\n");
break;
case WMI_SCAN_EVENT_DEQUEUED:
case WMI_SCAN_EVENT_PREEMPTED:
memset(status, 0, sizeof(*status));
- ath10k_dbg(ATH10K_DBG_MGMT,
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx status %08x\n", rx_status);
if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
if (phy_mode == MODE_11B &&
status->band == IEEE80211_BAND_5GHZ)
- ath10k_dbg(ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
} else {
- ath10k_warn("using (unreliable) phy_mode to extract band for mgmt rx\n");
+ ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n");
status->band = phy_mode_to_band(phy_mode);
}
}
}
- ath10k_dbg(ATH10K_DBG_MGMT,
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
- ath10k_dbg(ATH10K_DBG_MGMT,
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
status->freq, status->band, status->signal,
status->rate_idx);
rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
cycle_count = __le32_to_cpu(ev->cycle_count);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
err_code, freq, cmd_flags, noise_floor, rx_clear_count,
cycle_count);
switch (ar->scan.state) {
case ATH10K_SCAN_IDLE:
case ATH10K_SCAN_STARTING:
- ath10k_warn("received chan info event without a scan request, ignoring\n");
+ ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
goto exit;
case ATH10K_SCAN_RUNNING:
case ATH10K_SCAN_ABORTING:
idx = freq_to_idx(ar, freq);
if (idx >= ARRAY_SIZE(ar->survey)) {
- ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
freq, idx);
goto exit;
}
static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ECHO_EVENTID\n");
}
static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
skb->len);
trace_ath10k_wmi_dbglog(skb->data, skb->len);
{
struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
ath10k_debug_read_target_stats(ar, ev);
}
{
struct wmi_vdev_start_response_event *ev;
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
ev = (struct wmi_vdev_start_response_event *)skb->data;
static void ath10k_wmi_event_vdev_stopped(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
complete(&ar->vdev_setup_done);
}
ev = (struct wmi_peer_sta_kickout_event *)skb->data;
- ath10k_dbg(ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
ev->peer_macaddr.addr);
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(ar->hw, ev->peer_macaddr.addr, NULL);
if (!sta) {
- ath10k_warn("Spurious quick kickout for STA %pM\n",
+ ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
ev->peer_macaddr.addr);
goto exit;
}
(u8 *)skb_tail_pointer(bcn) - ies);
if (!ie) {
if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
- ath10k_warn("no tim ie found;\n");
+ ath10k_warn(ar, "no tim ie found;\n");
return;
}
ie_len += expand_size;
pvm_len += expand_size;
} else {
- ath10k_warn("tim expansion failed\n");
+ ath10k_warn(ar, "tim expansion failed\n");
}
}
if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
- ath10k_warn("tim pvm length is too great (%d)\n", pvm_len);
+ ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
return;
}
ATH10K_SKB_CB(bcn)->bcn.deliver_cab = true;
}
- ath10k_dbg(ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
tim->dtim_count, tim->dtim_period,
tim->bitmap_ctrl, pvm_len);
}
if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
return;
- ath10k_dbg(ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
if (noa->changed & WMI_P2P_NOA_CHANGED_BIT) {
new_len = ath10k_p2p_calc_noa_ie_len(noa);
if (!new_len)
ev = (struct wmi_host_swba_event *)skb->data;
map = __le32_to_cpu(ev->vdev_map);
- ath10k_dbg(ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
ev->vdev_map);
for (; map; map >>= 1, vdev_id++) {
i++;
if (i >= WMI_MAX_AP_VDEV) {
- ath10k_warn("swba has corrupted vdev map\n");
+ ath10k_warn(ar, "swba has corrupted vdev map\n");
break;
}
bcn_info = &ev->bcn_info[i];
- ath10k_dbg(ATH10K_DBG_MGMT,
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
"mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
i,
__le32_to_cpu(bcn_info->tim_info.tim_len),
arvif = ath10k_get_arvif(ar, vdev_id);
if (arvif == NULL) {
- ath10k_warn("no vif for vdev_id %d found\n", vdev_id);
+ ath10k_warn(ar, "no vif for vdev_id %d found\n",
+ vdev_id);
continue;
}
bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
if (!bcn) {
- ath10k_warn("could not get mac80211 beacon\n");
+ ath10k_warn(ar, "could not get mac80211 beacon\n");
continue;
}
if (arvif->beacon) {
if (!arvif->beacon_sent)
- ath10k_warn("SWBA overrun on vdev %d\n",
+ ath10k_warn(ar, "SWBA overrun on vdev %d\n",
arvif->vdev_id);
dma_unmap_single(arvif->ar->dev,
ret = dma_mapping_error(arvif->ar->dev,
ATH10K_SKB_CB(bcn)->paddr);
if (ret) {
- ath10k_warn("failed to map beacon: %d\n", ret);
+ ath10k_warn(ar, "failed to map beacon: %d\n", ret);
dev_kfree_skb_any(bcn);
goto skip;
}
static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
}
static void ath10k_dfs_radar_report(struct ath10k *ar,
reg0 = __le32_to_cpu(rr->reg0);
reg1 = __le32_to_cpu(rr->reg1);
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
pe.width = width;
pe.rssi = rssi;
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
pe.freq, pe.width, pe.rssi, pe.ts);
ATH10K_DFS_STAT_INC(ar, pulses_detected);
if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe)) {
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"dfs no pulse pattern detected, yet\n");
return;
}
- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
ATH10K_DFS_STAT_INC(ar, radar_detected);
/* Control radar events reporting in debugfs file
dfs_block_radar_events */
if (ar->dfs_block_radar_events) {
- ath10k_info("DFS Radar detected, but ignored as requested\n");
+ ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
return;
}
reg1 = __le32_to_cpu(fftr->reg1);
rssi = event->hdr.rssi_combined;
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
/* false event detection */
if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
- ath10k_dbg(ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
ATH10K_DFS_STAT_INC(ar, pulses_discarded);
return -EINVAL;
}
u8 *tlv_buf;
buf_len = __le32_to_cpu(event->hdr.buf_len);
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
event->hdr.phy_err_code, event->hdr.rssi_combined,
__le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
while (i < buf_len) {
if (i + sizeof(*tlv) > buf_len) {
- ath10k_warn("too short buf for tlv header (%d)\n", i);
+ ath10k_warn(ar, "too short buf for tlv header (%d)\n",
+ i);
return;
}
tlv = (struct phyerr_tlv *)&event->bufp[i];
tlv_len = __le16_to_cpu(tlv->len);
tlv_buf = &event->bufp[i + sizeof(*tlv)];
- ath10k_dbg(ATH10K_DBG_REGULATORY,
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
tlv_len, tlv->tag, tlv->sig);
switch (tlv->tag) {
case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
- ath10k_warn("too short radar pulse summary (%d)\n",
+ ath10k_warn(ar, "too short radar pulse summary (%d)\n",
i);
return;
}
break;
case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
- ath10k_warn("too short fft report (%d)\n", i);
+ ath10k_warn(ar, "too short fft report (%d)\n",
+ i);
return;
}
while (i < buf_len) {
if (i + sizeof(*tlv) > buf_len) {
- ath10k_warn("failed to parse phyerr tlv header at byte %d\n",
+ ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
i);
return;
}
tlv_buf = &event->bufp[i + sizeof(*tlv)];
if (i + sizeof(*tlv) + tlv_len > buf_len) {
- ath10k_warn("failed to parse phyerr tlv payload at byte %d\n",
+ ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
i);
return;
}
switch (tlv->tag) {
case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
if (sizeof(*fftr) > tlv_len) {
- ath10k_warn("failed to parse fft report at byte %d\n",
+ ath10k_warn(ar, "failed to parse fft report at byte %d\n",
i);
return;
}
fftr, fftr_len,
tsf);
if (res < 0) {
- ath10k_warn("failed to process fft report: %d\n",
+ ath10k_warn(ar, "failed to process fft report: %d\n",
res);
return;
}
/* Check if combined event available */
if (left_len < sizeof(*comb_event)) {
- ath10k_warn("wmi phyerr combined event wrong len\n");
+ ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
return;
}
tsf <<= 32;
tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event phyerr count %d tsf64 0x%llX\n",
count, tsf);
for (i = 0; i < count; i++) {
/* Check if we can read event header */
if (left_len < sizeof(*event)) {
- ath10k_warn("single event (%d) wrong head len\n", i);
+ ath10k_warn(ar, "single event (%d) wrong head len\n",
+ i);
return;
}
phy_err_code = event->hdr.phy_err_code;
if (left_len < buf_len) {
- ath10k_warn("single event (%d) wrong buf len\n", i);
+ ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
return;
}
static void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_ROAM_EVENTID\n");
}
static void ath10k_wmi_event_profile_match(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
}
static void ath10k_wmi_event_debug_print(struct ath10k *ar,
}
if (i == sizeof(buf) - 1)
- ath10k_warn("wmi debug print truncated: %d\n", skb->len);
+ ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
/* for some reason the debug prints end with \n, remove that */
if (skb->data[i - 1] == '\n')
/* the last byte is always reserved for the null character */
buf[i] = '\0';
- ath10k_dbg(ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug print '%s'\n", buf);
}
static void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
}
static void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
}
static void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
}
static void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
}
static void ath10k_wmi_event_rtt_error_report(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
}
static void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
}
static void ath10k_wmi_event_dcs_interference(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
}
static void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
}
static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
}
static void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
}
static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
}
static void ath10k_wmi_event_delba_complete(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
}
static void ath10k_wmi_event_addba_complete(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
}
static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
}
static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
}
static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
}
static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
}
static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
&paddr,
GFP_ATOMIC);
if (!ar->wmi.mem_chunks[idx].vaddr) {
- ath10k_warn("failed to allocate memory chunk\n");
+ ath10k_warn(ar, "failed to allocate memory chunk\n");
return -ENOMEM;
}
DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {};
if (skb->len < sizeof(*ev)) {
- ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+ ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
skb->len, sizeof(*ev));
return;
}
set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
- ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap);
ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
- ath10k_dbg_dump(ATH10K_DBG_WMI, NULL, "ath10k: wmi svc: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
if (strlen(ar->hw->wiphy->fw_version) == 0) {
/* FIXME: it probably should be better to support this */
if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
- ath10k_warn("target requested %d memory chunks; ignoring\n",
+ ath10k_warn(ar, "target requested %d memory chunks; ignoring\n",
__le32_to_cpu(ev->num_mem_reqs));
}
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->sw_version_1),
DECLARE_BITMAP(svc_bmap, WMI_SERVICE_BM_SIZE) = {};
if (skb->len < sizeof(*ev)) {
- ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
+ ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
skb->len, sizeof(*ev));
return;
}
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
- ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap);
ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
- ath10k_dbg_dump(ATH10K_DBG_WMI, NULL, "ath10k: wmi svc: ",
+ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
if (strlen(ar->hw->wiphy->fw_version) == 0) {
num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
- ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
+ ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
num_mem_reqs);
return;
}
if (!num_mem_reqs)
goto exit;
- ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
num_mem_reqs);
for (i = 0; i < num_mem_reqs; ++i) {
else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
num_units = TARGET_10X_NUM_VDEVS + 1;
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
req_id,
__le32_to_cpu(ev->mem_reqs[i].num_units),
}
exit:
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->abi_version),
memcpy(ar->mac_addr, ev->mac_addr.addr, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->abi_version),
ath10k_wmi_ready_event_rx(ar, skb);
break;
default:
- ath10k_warn("Unknown eventid: %d\n", id);
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
}
ath10k_wmi_ready_event_rx(ar, skb);
break;
default:
- ath10k_warn("Unknown eventid: %d\n", id);
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
}
case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
case WMI_10_2_WDS_PEER_EVENTID:
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
default:
- ath10k_warn("Unknown eventid: %d\n", id);
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
}
status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
if (status) {
- ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
+ ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
status);
return status;
}
struct wmi_pdev_set_regdomain_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g);
struct wmi_pdev_set_regdomain_cmd_10x *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
cmd->dfs_domain = __cpu_to_le32(dfs_reg);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
if (arg->passive)
return -EINVAL;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->chan.reg_classid = arg->reg_class_id;
cmd->chan.antenna_max = arg->max_antenna_gain;
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi set channel mode %d freq %d\n",
arg->mode, arg->freq);
struct wmi_pdev_suspend_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
{
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(0);
+ skb = ath10k_wmi_alloc_skb(ar, 0);
if (skb == NULL)
return -ENOMEM;
struct sk_buff *skb;
if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
- ath10k_warn("pdev param %d not supported by firmware\n", id);
+ ath10k_warn(ar, "pdev param %d not supported by firmware\n",
+ id);
return -EOPNOTSUPP;
}
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->param_id = __cpu_to_le32(id);
cmd->param_value = __cpu_to_le32(value);
- ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
id, value);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
len = sizeof(*cmd) +
(sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
- buf = ath10k_wmi_alloc_skb(len);
+ buf = ath10k_wmi_alloc_skb(ar, len);
if (!buf)
return -ENOMEM;
goto out;
}
- ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%llx\n",
i,
ar->wmi.mem_chunks[i].len,
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
- ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
len = sizeof(*cmd) +
(sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
- buf = ath10k_wmi_alloc_skb(len);
+ buf = ath10k_wmi_alloc_skb(ar, len);
if (!buf)
return -ENOMEM;
goto out;
}
- ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%llx\n",
i,
ar->wmi.mem_chunks[i].len,
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
- ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
len = sizeof(*cmd) +
(sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
- buf = ath10k_wmi_alloc_skb(len);
+ buf = ath10k_wmi_alloc_skb(ar, len);
if (!buf)
return -ENOMEM;
goto out;
}
- ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%llx\n",
i,
ar->wmi.mem_chunks[i].len,
out:
memcpy(&cmd->resource_config.common, &config, sizeof(config));
- ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10.2\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
if (len < 0)
return len; /* len contains error code here */
- skb = ath10k_wmi_alloc_skb(len);
+ skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
return -EINVAL;
}
- ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
}
if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
return -EINVAL;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->scan_id = __cpu_to_le32(scan_id);
cmd->scan_req_id = __cpu_to_le32(req_id);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
arg->req_id, arg->req_type, arg->u.scan_id);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
struct wmi_vdev_create_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->vdev_subtype = __cpu_to_le32(subtype);
memcpy(cmd->vdev_macaddr.addr, macaddr, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
vdev_id, type, subtype, macaddr);
struct wmi_vdev_delete_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_delete_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"WMI vdev delete id %d\n", vdev_id);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
else
return -EINVAL; /* should not happen, we already check cmd_id */
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->chan.reg_classid = arg->channel.reg_class_id;
cmd->chan.antenna_max = arg->channel.max_antenna_gain;
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, "
"ch_flags: 0x%0X, max_power: %d\n", cmdname, arg->vdev_id,
flags, arg->channel.freq, arg->channel.mode,
struct wmi_vdev_stop_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_stop_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
}
struct wmi_vdev_up_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->vdev_assoc_id = __cpu_to_le32(aid);
memcpy(&cmd->vdev_bssid.addr, bssid, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
struct wmi_vdev_down_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_vdev_down_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(vdev_id);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mgmt vdev down id 0x%x\n", vdev_id);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
struct sk_buff *skb;
if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"vdev param %d not supported by firmware\n",
param_id);
return -EOPNOTSUPP;
}
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(param_value);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev id 0x%x set param %d value %d\n",
vdev_id, param_id, param_value);
if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
return -EINVAL;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd) + arg->key_len);
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
if (!skb)
return -ENOMEM;
if (arg->key_data)
memcpy(cmd->key_data, arg->key_data, arg->key_len);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
return ath10k_wmi_cmd_send(ar, skb,
struct sk_buff *skb;
u32 cmdid;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
struct sk_buff *skb;
u32 cmdid;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
struct wmi_peer_create_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->vdev_id = __cpu_to_le32(vdev_id);
memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer create vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
struct wmi_peer_delete_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->vdev_id = __cpu_to_le32(vdev_id);
memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
struct wmi_peer_flush_tids_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
memcpy(cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
vdev_id, peer_addr, tid_bitmap);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
struct wmi_peer_set_param_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->param_value = __cpu_to_le32(param_value);
memcpy(&cmd->peer_macaddr.addr, peer_addr, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_value);
struct wmi_sta_powersave_mode_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->vdev_id = __cpu_to_le32(vdev_id);
cmd->sta_ps_mode = __cpu_to_le32(psmode);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi set powersave id 0x%x mode %d\n",
vdev_id, psmode);
struct wmi_sta_powersave_param_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->param_id = __cpu_to_le32(param_id);
cmd->param_value = __cpu_to_le32(value);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
return ath10k_wmi_cmd_send(ar, skb,
if (!mac)
return -EINVAL;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->param_value = __cpu_to_le32(value);
memcpy(&cmd->peer_macaddr, mac, ETH_ALEN);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
vdev_id, param_id, value, mac);
len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
- skb = ath10k_wmi_alloc_skb(len);
+ skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return -EINVAL;
len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
}
- skb = ath10k_wmi_alloc_skb(len);
+ skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
}
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM (%s)\n",
arg->vdev_id, arg->addr,
arg->peer_reassoc ? "reassociate" : "new");
int ret;
u16 fc;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
struct wmi_pdev_set_wmm_params *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
- ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_wmm_params_cmdid);
}
struct wmi_request_stats_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_request_stats_cmd *)skb->data;
cmd->stats_id = __cpu_to_le32(stats_id);
- ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
}
struct wmi_force_fw_hang_cmd *cmd;
struct sk_buff *skb;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->type = __cpu_to_le32(type);
cmd->delay_ms = __cpu_to_le32(delay_ms);
- ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
type, delay_ms);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
struct sk_buff *skb;
u32 cfg;
- skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd->config_enable = __cpu_to_le32(cfg);
cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
- ath10k_dbg(ATH10K_DBG_WMI,
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi dbglog cfg modules %08x %08x config %08x %08x\n",
__le32_to_cpu(cmd->module_enable),
__le32_to_cpu(cmd->module_valid),