]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'iwlwifi-next-for-kalle-2015-08-04' of https://git.kernel.org/pub/scm/linux...
authorKalle Valo <kvalo@codeaurora.org>
Thu, 6 Aug 2015 07:27:59 +0000 (10:27 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Thu, 6 Aug 2015 07:27:59 +0000 (10:27 +0300)
* Deprecate -10.ucode
* Clean ups towards multiple Rx queues
* Add support for longer CMD IDs. This will be required by new
firmwares since we are getting close to the u8 limit.
* bugfixes for the D0i3 power state
* Add basic support for FTM
* More random that doesn't really stand out

1  2 
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/sta.c
drivers/net/wireless/iwlwifi/mvm/time-event.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/rx.c
drivers/net/wireless/iwlwifi/pcie/trans.c

index 737774a01c74a500b3934928449531889701d7f5,be9b665608017dbbe2c7a9f422db672b7b552535..660cc1c93e192654345b96b5b4a6a9c9589dc746
@@@ -87,41 -87,6 +87,6 @@@ struct iwl_ssid_ie 
        u8 ssid[IEEE80211_MAX_SSID_LEN];
  } __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
  
- /* How many statistics are gathered for each channel */
- #define SCAN_RESULTS_STATISTICS 1
- /**
-  * enum iwl_scan_complete_status - status codes for scan complete notifications
-  * @SCAN_COMP_STATUS_OK:  scan completed successfully
-  * @SCAN_COMP_STATUS_ABORT: scan was aborted by user
-  * @SCAN_COMP_STATUS_ERR_SLEEP: sending null sleep packet failed
-  * @SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT: timeout before channel is ready
-  * @SCAN_COMP_STATUS_ERR_PROBE: sending probe request failed
-  * @SCAN_COMP_STATUS_ERR_WAKEUP: sending null wakeup packet failed
-  * @SCAN_COMP_STATUS_ERR_ANTENNAS: invalid antennas chosen at scan command
-  * @SCAN_COMP_STATUS_ERR_INTERNAL: internal error caused scan abort
-  * @SCAN_COMP_STATUS_ERR_COEX: medium was lost ot WiMax
-  * @SCAN_COMP_STATUS_P2P_ACTION_OK: P2P public action frame TX was successful
-  *    (not an error!)
-  * @SCAN_COMP_STATUS_ITERATION_END: indicates end of one repetition the driver
-  *    asked for
-  * @SCAN_COMP_STATUS_ERR_ALLOC_TE: scan could not allocate time events
- */
- enum iwl_scan_complete_status {
-       SCAN_COMP_STATUS_OK = 0x1,
-       SCAN_COMP_STATUS_ABORT = 0x2,
-       SCAN_COMP_STATUS_ERR_SLEEP = 0x3,
-       SCAN_COMP_STATUS_ERR_CHAN_TIMEOUT = 0x4,
-       SCAN_COMP_STATUS_ERR_PROBE = 0x5,
-       SCAN_COMP_STATUS_ERR_WAKEUP = 0x6,
-       SCAN_COMP_STATUS_ERR_ANTENNAS = 0x7,
-       SCAN_COMP_STATUS_ERR_INTERNAL = 0x8,
-       SCAN_COMP_STATUS_ERR_COEX = 0x9,
-       SCAN_COMP_STATUS_P2P_ACTION_OK = 0xA,
-       SCAN_COMP_STATUS_ITERATION_END = 0x0B,
-       SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
- };
  /* scan offload */
  #define IWL_SCAN_MAX_BLACKLIST_LEN    64
  #define IWL_SCAN_SHORT_BLACKLIST_LEN  16
@@@ -143,71 -108,6 +108,6 @@@ enum scan_framework_client 
        SCAN_CLIENT_ASSET_TRACKING      = BIT(2),
  };
  
- /**
-  * struct iwl_scan_offload_cmd - SCAN_REQUEST_FIXED_PART_API_S_VER_6
-  * @scan_flags:               see enum iwl_scan_flags
-  * @channel_count:    channels in channel list
-  * @quiet_time:               dwell time, in milliseconds, on quiet channel
-  * @quiet_plcp_th:    quiet channel num of packets threshold
-  * @good_CRC_th:      passive to active promotion threshold
-  * @rx_chain:         RXON rx chain.
-  * @max_out_time:     max TUs to be out of associated channel
-  * @suspend_time:     pause scan this TUs when returning to service channel
-  * @flags:            RXON flags
-  * @filter_flags:     RXONfilter
-  * @tx_cmd:           tx command for active scan; for 2GHz and for 5GHz.
-  * @direct_scan:      list of SSIDs for directed active scan
-  * @scan_type:                see enum iwl_scan_type.
-  * @rep_count:                repetition count for each scheduled scan iteration.
-  */
- struct iwl_scan_offload_cmd {
-       __le16 len;
-       u8 scan_flags;
-       u8 channel_count;
-       __le16 quiet_time;
-       __le16 quiet_plcp_th;
-       __le16 good_CRC_th;
-       __le16 rx_chain;
-       __le32 max_out_time;
-       __le32 suspend_time;
-       /* RX_ON_FLAGS_API_S_VER_1 */
-       __le32 flags;
-       __le32 filter_flags;
-       struct iwl_tx_cmd tx_cmd[2];
-       /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
-       struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
-       __le32 scan_type;
-       __le32 rep_count;
- } __packed;
- enum iwl_scan_offload_channel_flags {
-       IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE         = BIT(0),
-       IWL_SCAN_OFFLOAD_CHANNEL_NARROW         = BIT(22),
-       IWL_SCAN_OFFLOAD_CHANNEL_FULL           = BIT(24),
-       IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL        = BIT(25),
- };
- /* channel configuration for struct iwl_scan_offload_cfg. Each channels needs:
-  * __le32 type:       bitmap; bits 1-20 are for directed scan to i'th ssid and
-  *    see enum iwl_scan_offload_channel_flags.
-  * __le16 channel_number: channel number 1-13 etc.
-  * __le16 iter_count: repetition count for the channel.
-  * __le32 iter_interval: interval between two iterations on one channel.
-  * u8 active_dwell.
-  * u8 passive_dwell.
-  */
- #define IWL_SCAN_CHAN_SIZE 14
- /**
-  * iwl_scan_offload_cfg - SCAN_OFFLOAD_CONFIG_API_S
-  * @scan_cmd:         scan command fixed part
-  * @data:             scan channel configuration and probe request frames
-  */
- struct iwl_scan_offload_cfg {
-       struct iwl_scan_offload_cmd scan_cmd;
-       u8 data[0];
- } __packed;
  /**
   * iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
   * @ssid:             MAC address to filter out
@@@ -297,35 -197,6 +197,6 @@@ enum iwl_scan_ebs_status 
        IWL_SCAN_EBS_INACTIVE,
  };
  
- /**
-  * iwl_scan_offload_complete - SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
-  * @last_schedule_line:               last schedule line executed (fast or regular)
-  * @last_schedule_iteration:  last scan iteration executed before scan abort
-  * @status:                   enum iwl_scan_offload_compleate_status
-  * @ebs_status: last EBS status, see IWL_SCAN_EBS_*
-  */
- struct iwl_scan_offload_complete {
-       u8 last_schedule_line;
-       u8 last_schedule_iteration;
-       u8 status;
-       u8 ebs_status;
- } __packed;
- /**
-  * iwl_sched_scan_results - SCAN_OFFLOAD_MATCH_FOUND_NTF_API_S_VER_1
-  * @ssid_bitmap:      SSIDs indexes found in this iteration
-  * @client_bitmap:    clients that are active and wait for this notification
-  */
- struct iwl_sched_scan_results {
-       __le16 ssid_bitmap;
-       u8 client_bitmap;
-       u8 reserved;
- };
- /* Unified LMAC scan API */
- #define IWL_MVM_BASIC_PASSIVE_DWELL 110
  /**
   * iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
   * @tx_flags: combination of TX_CMD_FLG_*
@@@ -550,18 -421,6 +421,6 @@@ struct iwl_periodic_scan_complete 
  
  /* UMAC Scan API */
  
- /**
-  * struct iwl_mvm_umac_cmd_hdr - Command header for UMAC commands
-  * @size:     size of the command (not including header)
-  * @reserved0:        for future use and alignment
-  * @ver:      API version number
-  */
- struct iwl_mvm_umac_cmd_hdr {
-       __le16 size;
-       u8 reserved0;
-       u8 ver;
- } __packed;
  /* The maximum of either of these cannot exceed 8, because we use an
   * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
   */
@@@ -621,7 -480,6 +480,6 @@@ enum iwl_channel_flags 
  
  /**
   * struct iwl_scan_config
-  * @hdr: umac command header
   * @flags:                    enum scan_config_flags
   * @tx_chains:                        valid_tx antenna - ANT_* definitions
   * @rx_chains:                        valid_rx antenna - ANT_* definitions
   * @channel_array:            default supported channels
   */
  struct iwl_scan_config {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
   * iwl_umac_scan_flags
   *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
   *    can be preempted by other scan requests with higher priority.
 - *    The low priority scan is aborted.
 + *    The low priority scan will be resumed when the higher proirity scan is
 + *    completed.
   *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
   *    when scan starts.
   */
@@@ -735,7 -591,6 +592,6 @@@ struct iwl_scan_req_umac_tail 
  
  /**
   * struct iwl_scan_req_umac
-  * @hdr: umac command header
   * @flags: &enum iwl_umac_scan_flags
   * @uid: scan id, &enum iwl_umac_scan_uid_offsets
   * @ooc_priority: out of channel priority - &enum iwl_scan_priority
   *    &struct iwl_scan_req_umac_tail
   */
  struct iwl_scan_req_umac {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 flags;
        __le32 uid;
        __le32 ooc_priority;
  
  /**
   * struct iwl_umac_scan_abort
-  * @hdr: umac command header
   * @uid: scan id, &enum iwl_umac_scan_uid_offsets
   * @flags: reserved
   */
  struct iwl_umac_scan_abort {
-       struct iwl_mvm_umac_cmd_hdr hdr;
        __le32 uid;
        __le32 flags;
  } __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
index 5000bfcded617f32ca177ae7a0711f13a3ce65d3,8d7fefe77ddd346dadda490d36f849db80fb8020..95678e773c6ff0bf188624392c8b7f0a54f86cd0
@@@ -90,11 -90,9 +90,9 @@@ struct iwl_mvm_scan_params 
        int n_match_sets;
        struct iwl_scan_probe_req preq;
        struct cfg80211_match_set *match_sets;
-       struct _dwell {
-               u16 passive;
-               u16 active;
-               u16 fragmented;
-       } dwell[IEEE80211_NUM_BANDS];
+       u16 passive_dwell;
+       u16 active_dwell;
+       u16 fragmented_dwell;
        struct {
                u8 iterations;
                u8 full_scan_mul; /* not used for UMAC */
@@@ -147,34 -145,6 +145,6 @@@ iwl_mvm_scan_rate_n_flags(struct iwl_mv
                return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
  }
  
- /*
-  * If req->n_ssids > 0, it means we should do an active scan.
-  * In case of active scan w/o directed scan, we receive a zero-length SSID
-  * just to notify that this scan is active and not passive.
-  * In order to notify the FW of the number of SSIDs we wish to scan (including
-  * the zero-length one), we need to set the corresponding bits in chan->type,
-  * one for each SSID, and set the active bit (first). If the first SSID is
-  * already included in the probe template, so we need to set only
-  * req->n_ssids - 1 bits in addition to the first bit.
-  */
- static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
-                                   enum ieee80211_band band, int n_ssids)
- {
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-               return 10;
-       if (band == IEEE80211_BAND_2GHZ)
-               return 20  + 3 * (n_ssids + 1);
-       return 10  + 2 * (n_ssids + 1);
- }
- static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
-                                    enum ieee80211_band band)
- {
-       if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BASIC_DWELL))
-                       return 110;
-       return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
- }
  static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
  {
@@@ -191,7 -161,6 +161,6 @@@ static void iwl_mvm_scan_calc_dwell(str
                                    struct iwl_mvm_scan_params *params)
  {
        int global_cnt = 0;
-       enum ieee80211_band band;
        u8 frag_passive_dwell = 0;
  
        ieee80211_iterate_active_interfaces_atomic(mvm->hw,
                /*
                 * P2P device scan should not be fragmented to avoid negative
                 * impact on P2P device discovery. Configure max_out_time to be
-                * equal to dwell time on passive channel. Take a longest
-                * possible value, one that corresponds to 2GHz band
+                * equal to dwell time on passive channel.
                 */
                if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-                       u32 passive_dwell =
-                               iwl_mvm_get_passive_dwell(mvm,
-                                                         IEEE80211_BAND_2GHZ);
-                       params->max_out_time = passive_dwell;
+                       params->max_out_time = 120;
                } else {
                        params->passive_fragmented = true;
                }
  
  not_bound:
  
-       for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
-               if (params->passive_fragmented)
-                       params->dwell[band].fragmented = frag_passive_dwell;
+       if (params->passive_fragmented)
+               params->fragmented_dwell = frag_passive_dwell;
+       /*
+        * use only basic dwell time in scan command, regardless of the band or
+        * the number of the probes. FW will calculate the actual dwell time.
+        */
+       params->passive_dwell = 110;
+       params->active_dwell = 10;
  
-               params->dwell[band].passive = iwl_mvm_get_passive_dwell(mvm,
-                                                                       band);
-               params->dwell[band].active =
-                       iwl_mvm_get_active_dwell(mvm, band, params->n_ssids);
-       }
  
        IWL_DEBUG_SCAN(mvm,
                       "scan parameters: max_out_time %d, suspend_time %d, passive_fragmented %d\n",
                       params->max_out_time, params->suspend_time,
                       params->passive_fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_2GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_2GHZ].passive,
-                      params->dwell[IEEE80211_BAND_2GHZ].active,
-                      params->dwell[IEEE80211_BAND_2GHZ].fragmented);
-       IWL_DEBUG_SCAN(mvm,
-                      "dwell[IEEE80211_BAND_5GHZ]: passive %d, active %d, fragmented %d\n",
-                      params->dwell[IEEE80211_BAND_5GHZ].passive,
-                      params->dwell[IEEE80211_BAND_5GHZ].active,
-                      params->dwell[IEEE80211_BAND_5GHZ].fragmented);
  }
  
  static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
@@@ -327,9 -283,8 +283,8 @@@ static u8 *iwl_mvm_dump_channel_list(st
        return buf;
  }
  
- int iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
  }
  
- int iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
  {
        IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
        ieee80211_sched_scan_results(mvm->hw);
-       return 0;
  }
  
  static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
        }
  }
  
- int iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
        mvm->last_ebs_successful =
                        scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
                        scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
-       return 0;
  }
  
  static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
@@@ -751,11 -699,10 +699,10 @@@ static void iwl_mvm_scan_lmac_dwell(str
                                    struct iwl_scan_req_lmac *cmd,
                                    struct iwl_mvm_scan_params *params)
  {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
+       cmd->active_dwell = params->active_dwell;
+       cmd->passive_dwell = params->passive_dwell;
        if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
+               cmd->fragmented_dwell = params->fragmented_dwell;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
        cmd->scan_prio = iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
@@@ -937,9 -884,9 +884,9 @@@ int iwl_mvm_config_scan(struct iwl_mvm 
        int num_channels =
                mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
                mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
-       int ret, i, j = 0, cmd_size, data_size;
+       int ret, i, j = 0, cmd_size;
        struct iwl_host_cmd cmd = {
-               .id = SCAN_CFG_CMD,
+               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
        };
  
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
        if (!scan_config)
                return -ENOMEM;
  
-       data_size = cmd_size - sizeof(struct iwl_mvm_umac_cmd_hdr);
-       scan_config->hdr.size = cpu_to_le16(data_size);
        scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
                                         SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
                                         SCAN_CONFIG_FLAG_SET_TX_CHAINS |
@@@ -1013,11 -958,10 +958,10 @@@ static void iwl_mvm_scan_umac_dwell(str
                                    struct iwl_scan_req_umac *cmd,
                                    struct iwl_mvm_scan_params *params)
  {
-       cmd->active_dwell = params->dwell[IEEE80211_BAND_2GHZ].active;
-       cmd->passive_dwell = params->dwell[IEEE80211_BAND_2GHZ].passive;
+       cmd->active_dwell = params->active_dwell;
+       cmd->passive_dwell = params->passive_dwell;
        if (params->passive_fragmented)
-               cmd->fragmented_dwell =
-                               params->dwell[IEEE80211_BAND_2GHZ].fragmented;
+               cmd->fragmented_dwell = params->fragmented_dwell;
        cmd->max_out_time = cpu_to_le32(params->max_out_time);
        cmd->suspend_time = cpu_to_le32(params->suspend_time);
        cmd->scan_priority =
@@@ -1099,8 -1043,6 +1043,6 @@@ static int iwl_mvm_scan_umac(struct iwl
                return uid;
  
        memset(cmd, 0, ksize(cmd));
-       cmd->hdr.size = cpu_to_le16(iwl_mvm_scan_size(mvm) -
-                                   sizeof(struct iwl_mvm_umac_cmd_hdr));
  
        iwl_mvm_scan_umac_dwell(mvm, cmd, params);
  
        cmd->uid = cpu_to_le32(uid);
        cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
  
 +      if (type == IWL_MVM_SCAN_SCHED)
 +              cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
 +
        if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
                cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
                                     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
@@@ -1240,7 -1179,7 +1182,7 @@@ int iwl_mvm_reg_scan_start(struct iwl_m
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
  
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params,
                                        IWL_MVM_SCAN_REGULAR);
        } else {
@@@ -1348,7 -1287,7 +1290,7 @@@ int iwl_mvm_sched_scan_start(struct iwl
        iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
  
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
-               hcmd.id = SCAN_REQ_UMAC;
+               hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
                ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
        return ret;
  }
  
- int iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
-                                       struct iwl_rx_cmd_buffer *rxb,
-                                       struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+                                        struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_complete *notif = (void *)pkt->data;
        bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
  
        if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
-               return 0;
+               return;
  
        /* if the scan is already stopping, we don't need to notify mac80211 */
        if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
                mvm->last_ebs_successful = false;
  
        mvm->scan_uid_status[uid] = 0;
-       return 0;
  }
  
- int iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
-                                            struct iwl_rx_cmd_buffer *rxb,
-                                            struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+                                             struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
                       iwl_mvm_dump_channel_list(notif->results,
                                                 notif->scanned_channels, buf,
                                                 sizeof(buf)));
-       return 0;
  }
  
  static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
  {
-       struct iwl_umac_scan_abort cmd = {
-               .hdr.size = cpu_to_le16(sizeof(struct iwl_umac_scan_abort) -
-                                       sizeof(struct iwl_mvm_umac_cmd_hdr)),
-       };
+       struct iwl_umac_scan_abort cmd = {};
        int uid, ret;
  
        lockdep_assert_held(&mvm->mutex);
  
        IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
  
-       ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_UMAC, 0, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm,
+                                  iwl_cmd_id(SCAN_ABORT_UMAC,
+                                             IWL_ALWAYS_LONG_GROUP, 0),
+                                  0, sizeof(cmd), &cmd);
        if (!ret)
                mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
  
  static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
  {
        struct iwl_notification_wait wait_scan_done;
-       static const u8 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+       static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
                                              SCAN_OFFLOAD_COMPLETE, };
        int ret;
  
index 26f076e821491e09d7805c5f37d229d01480229c,f7d3921b982fe088fad435c37e7ea8d8b1b4e7a7..2531aa3d6754a318df3045ceed55c521d99da4c1
@@@ -1148,18 -1148,31 +1148,31 @@@ int iwl_mvm_sta_tx_agg_flush(struct iwl
  
  static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
  {
-       int i;
+       int i, max = -1, max_offs = -1;
  
        lockdep_assert_held(&mvm->mutex);
  
-       i = find_first_zero_bit(mvm->fw_key_table, STA_KEY_MAX_NUM);
+       /* Pick the unused key offset with the highest 'deleted'
+        * counter. Every time a key is deleted, all the counters
+        * are incremented and the one that was just deleted is
+        * reset to zero. Thus, the highest counter is the one
+        * that was deleted longest ago. Pick that one.
+        */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (test_bit(i, mvm->fw_key_table))
+                       continue;
+               if (mvm->fw_key_deleted[i] > max) {
+                       max = mvm->fw_key_deleted[i];
+                       max_offs = i;
+               }
+       }
  
-       if (i == STA_KEY_MAX_NUM)
+       if (max_offs < 0)
                return STA_KEY_IDX_INVALID;
  
-       __set_bit(i, mvm->fw_key_table);
+       __set_bit(max_offs, mvm->fw_key_table);
  
-       return i;
+       return max_offs;
  }
  
  static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif,
@@@ -1401,7 -1414,6 +1414,7 @@@ int iwl_mvm_set_sta_key(struct iwl_mvm 
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
        int ret;
 +      static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
  
        lockdep_assert_held(&mvm->mutex);
  
  end:
        IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
                      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
 -                    sta->addr, ret);
 +                    sta ? sta->addr : zero_addr, ret);
        return ret;
  }
  
@@@ -1479,7 -1491,7 +1492,7 @@@ int iwl_mvm_remove_sta_key(struct iwl_m
  {
        bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
        u8 sta_id;
-       int ret;
+       int ret, i;
  
        lockdep_assert_held(&mvm->mutex);
  
                return -ENOENT;
        }
  
+       /* track which key was deleted last */
+       for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+               if (mvm->fw_key_deleted[i] < U8_MAX)
+                       mvm->fw_key_deleted[i]++;
+       }
+       mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
        if (sta_id == IWL_MVM_STATION_COUNT) {
                IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
                return 0;
@@@ -1661,9 -1680,8 +1681,8 @@@ void iwl_mvm_sta_modify_sleep_tx_count(
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
  }
  
- int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
-                         struct iwl_rx_cmd_buffer *rxb,
-                         struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+                          struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
        u32 sta_id = le32_to_cpu(notif->sta_id);
  
        if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
-               return 0;
+               return;
  
        rcu_read_lock();
        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
        if (!IS_ERR_OR_NULL(sta))
                ieee80211_sta_eosp(sta);
        rcu_read_unlock();
-       return 0;
  }
  
  void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
index e472729e5f149a451fd7128d45fa54856c8a77ce,c0d09af5b1f73f92718036fad16691528f12fa78..dbd7d544575de68a3972588bb117a8fa5b560ef3
@@@ -86,7 -86,7 +86,7 @@@ void iwl_mvm_te_clear_data(struct iwl_m
  {
        lockdep_assert_held(&mvm->time_event_lock);
  
 -      if (te_data->id == TE_MAX)
 +      if (!te_data->vif)
                return;
  
        list_del(&te_data->list);
@@@ -410,9 -410,8 +410,8 @@@ static int iwl_mvm_aux_roc_te_handle_no
  /*
   * The Rx handler for time event notifications
   */
- int iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
-                               struct iwl_rx_cmd_buffer *rxb,
-                               struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+                                struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_time_event_notif *notif = (void *)pkt->data;
        }
  unlock:
        spin_unlock_bh(&mvm->time_event_lock);
-       return 0;
  }
  
  static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
@@@ -503,7 -500,7 +500,7 @@@ static int iwl_mvm_time_event_send_add(
                                       struct iwl_mvm_time_event_data *te_data,
                                       struct iwl_time_event_cmd *te_cmd)
  {
-       static const u8 time_event_response[] = { TIME_EVENT_CMD };
+       static const u16 time_event_response[] = { TIME_EVENT_CMD };
        struct iwl_notification_wait wait_time_event;
        int ret;
  
@@@ -566,7 -563,7 +563,7 @@@ void iwl_mvm_protect_session(struct iwl
  {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
-       const u8 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+       const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
        struct iwl_notification_wait wait_te_notif;
        struct iwl_time_event_cmd time_cmd = {};
  
                cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
        time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
  
-       time_cmd.apply_time =
-               cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
+       time_cmd.apply_time = cpu_to_le32(0);
  
        time_cmd.max_frags = TE_V2_FRAG_NONE;
        time_cmd.max_delay = cpu_to_le32(max_delay);
index 89116864d2a0ec346941ec8e4fded14691612ff3,eac511aebae04da1c6a86f369d6b6d124738eba5..15bf36ad3809d363d58290392e2bbcbcbaa540f7
@@@ -252,7 -252,7 +252,7 @@@ void iwl_mvm_set_tx_cmd_rate(struct iwl
  
        if (info->band == IEEE80211_BAND_2GHZ &&
            !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
 -              rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
 +              rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
        else
                rate_flags =
                        BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
  /*
   * Sets the fields in the Tx cmd that are crypto related
   */
- void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
-                              struct ieee80211_tx_info *info,
-                              struct iwl_tx_cmd *tx_cmd,
-                              struct sk_buff *skb_frag)
+ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+                                     struct ieee80211_tx_info *info,
+                                     struct iwl_tx_cmd *tx_cmd,
+                                     struct sk_buff *skb_frag,
+                                     int hdrlen)
  {
        struct ieee80211_key_conf *keyconf = info->control.hw_key;
+       u8 *crypto_hdr = skb_frag->data + hdrlen;
+       u64 pn;
  
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_CCMP:
-               tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
-               memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
-               if (info->flags & IEEE80211_TX_CTL_AMPDU)
-                       tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_CCMP_AGG);
+       case WLAN_CIPHER_SUITE_CCMP_256:
+               iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+               pn = atomic64_inc_return(&keyconf->tx_pn);
+               crypto_hdr[0] = pn;
+               crypto_hdr[2] = 0;
+               crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+               crypto_hdr[1] = pn >> 8;
+               crypto_hdr[4] = pn >> 16;
+               crypto_hdr[5] = pn >> 24;
+               crypto_hdr[6] = pn >> 32;
+               crypto_hdr[7] = pn >> 40;
                break;
  
        case WLAN_CIPHER_SUITE_TKIP:
   */
  static struct iwl_device_cmd *
  iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-                     struct ieee80211_sta *sta, u8 sta_id)
+                     int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
  {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
  
        if (info->control.hw_key)
-               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb);
+               iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
  
        iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
  
@@@ -346,6 -356,7 +356,7 @@@ int iwl_mvm_tx_skb_non_sta(struct iwl_m
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        u8 sta_id;
+       int hdrlen = ieee80211_hdrlen(hdr->frame_control);
  
        if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
                return -1;
                IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
  
        /*
-        * If the interface on which frame is sent is the P2P_DEVICE
+        * If the interface on which the frame is sent is the P2P_DEVICE
         * or an AP/GO interface use the broadcast station associated
-        * with it; otherwise use the AUX station.
+        * with it; otherwise if the interface is a managed interface
+        * use the AP station associated with it for multicast traffic
+        * (this is not possible for unicast packets as a TLDS discovery
+        * response are sent without a station entry); otherwise use the
+        * AUX station.
         */
-       if (info->control.vif &&
-           (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-            info->control.vif->type == NL80211_IFTYPE_AP)) {
+       sta_id = mvm->aux_sta.sta_id;
+       if (info->control.vif) {
                struct iwl_mvm_vif *mvmvif =
                        iwl_mvm_vif_from_mac80211(info->control.vif);
-               sta_id = mvmvif->bcast_sta.sta_id;
-       } else {
-               sta_id = mvm->aux_sta.sta_id;
+               if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+                   info->control.vif->type == NL80211_IFTYPE_AP)
+                       sta_id = mvmvif->bcast_sta.sta_id;
+               else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+                        is_multicast_ether_addr(hdr->addr1)) {
+                       u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
+                       if (ap_sta_id != IWL_MVM_STATION_COUNT)
+                               sta_id = ap_sta_id;
+               }
        }
  
        IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
  
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, NULL, sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
        if (!dev_cmd)
                return -1;
  
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
  
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
  
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
@@@ -416,9 -438,11 +438,11 @@@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm
        u8 tid = IWL_MAX_TID_COUNT;
        u8 txq_id = info->hw_queue;
        bool is_data_qos = false, is_ampdu = false;
+       int hdrlen;
  
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
        fc = hdr->frame_control;
+       hdrlen = ieee80211_hdrlen(fc);
  
        if (WARN_ON_ONCE(!mvmsta))
                return -1;
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
  
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, sta, mvmsta->sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
        if (!dev_cmd)
                goto drop;
  
        }
  
        /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(fc));
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
  
        WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
  
@@@ -911,8 -935,7 +935,7 @@@ static void iwl_mvm_rx_tx_cmd_agg(struc
        rcu_read_unlock();
  }
  
- int iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                     struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
                iwl_mvm_rx_tx_cmd_single(mvm, pkt);
        else
                iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
-       return 0;
  }
  
  static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
                (void *)(uintptr_t)tid_data->rate_n_flags;
  }
  
- int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
-                       struct iwl_device_cmd *cmd)
+ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
  {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
        if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
                      tid >= IWL_MAX_TID_COUNT,
                      "sta_id %d tid %d", sta_id, tid))
-               return 0;
+               return;
  
        rcu_read_lock();
  
        /* Reclaiming frames for a station that has been deleted ? */
        if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
                rcu_read_unlock();
-               return 0;
+               return;
        }
  
        mvmsta = iwl_mvm_sta_from_mac80211(sta);
                        "invalid BA notification: Q %d, tid %d, flow %d\n",
                        tid_data->txq_id, tid, scd_flow);
                rcu_read_unlock();
-               return 0;
+               return;
        }
  
        spin_lock_bh(&mvmsta->lock);
@@@ -1072,8 -1092,6 +1092,6 @@@ out
                skb = __skb_dequeue(&reclaimed_skbs);
                ieee80211_tx_status(mvm->hw, skb);
        }
-       return 0;
  }
  
  /*
index 9f65c1cff1b1958057ab3bedf385604b34a3323e,cdf3a0c33902f1866d34568b300e5485e9061d38..b0825c402c732c0514637b3b21b26288a7275444
@@@ -368,14 -368,12 +368,14 @@@ static const struct pci_device_id iwl_h
  /* 3165 Series */
        {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
        {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
  
  /* 7265 Series */
        {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
 +      {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
 -      {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
 -      {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
@@@ -614,6 -613,7 +614,7 @@@ static int iwl_pci_resume(struct devic
  {
        struct pci_dev *pdev = to_pci_dev(device);
        struct iwl_trans *trans = pci_get_drvdata(pdev);
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
  
        /* Before you put code here, think about WoWLAN. You cannot check here
                return 0;
  
        /*
-        * On suspend, ict is disabled, and the interrupt mask
-        * gets cleared. Reconfigure them both in case of d0i3
-        * image. Otherwise, only enable rfkill interrupt (in
-        * order to keep track of the rfkill status)
+        * Enable rfkill interrupt (in order to keep track of
+        * the rfkill status)
         */
-       if (trans->wowlan_d0i3) {
-               iwl_pcie_reset_ict(trans);
-               iwl_enable_interrupts(trans);
-       } else {
-               iwl_enable_rfkill_int(trans);
-       }
+       iwl_enable_rfkill_int(trans);
  
        hw_rfkill = iwl_is_rfkill_set(trans);
+       mutex_lock(&trans_pcie->mutex);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+       mutex_unlock(&trans_pcie->mutex);
  
        return 0;
  }
index 376b84e54ad7e8bbb48d039d354c03748665451c,17f65dc894727f18d1c4029966ef56a0d8604bda..4f872f05d988f48fa5e5f6c66d650bdc0fdf47ca
  #include "iwl-io.h"
  #include "iwl-op-mode.h"
  
 -/*
 - * RX related structures and functions
 - */
 -#define RX_NUM_QUEUES 1
 -#define RX_POST_REQ_ALLOC 2
 -#define RX_CLAIM_REQ_ALLOC 8
 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
 -#define RX_LOW_WATERMARK 8
 -
  struct iwl_host_cmd;
  
  /*This file includes the declaration that are internal to the
@@@ -77,29 -86,29 +77,29 @@@ struct isr_statistics 
   * struct iwl_rxq - Rx queue
   * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
   * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
 + * @pool:
 + * @queue:
   * @read: Shared index to newest available Rx buffer
   * @write: Shared index to oldest written Rx packet
   * @free_count: Number of pre-allocated buffers in rx_free
 - * @used_count: Number of RBDs handled to allocator to use for allocation
   * @write_actual:
 - * @rx_free: list of RBDs with allocated RB ready for use
 - * @rx_used: list of RBDs with no RB attached
 + * @rx_free: list of free SKBs for use
 + * @rx_used: List of Rx buffers with no SKB
   * @need_update: flag to indicate we need to update read/write index
   * @rb_stts: driver's pointer to receive buffer status
   * @rb_stts_dma: bus address of receive buffer status
   * @lock:
 - * @pool: initial pool of iwl_rx_mem_buffer for the queue
 - * @queue: actual rx queue
   *
   * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
   */
  struct iwl_rxq {
        __le32 *bd;
        dma_addr_t bd_dma;
 +      struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
 +      struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
        u32 read;
        u32 write;
        u32 free_count;
 -      u32 used_count;
        u32 write_actual;
        struct list_head rx_free;
        struct list_head rx_used;
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
 -      struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
 -      struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 -};
 -
 -/**
 - * struct iwl_rb_allocator - Rx allocator
 - * @pool: initial pool of allocator
 - * @req_pending: number of requests the allcator had not processed yet
 - * @req_ready: number of requests honored and ready for claiming
 - * @rbd_allocated: RBDs with pages allocated and ready to be handled to
 - *    the queue. This is a list of &struct iwl_rx_mem_buffer
 - * @rbd_empty: RBDs with no page attached for allocator use. This is a list
 - *    of &struct iwl_rx_mem_buffer
 - * @lock: protects the rbd_allocated and rbd_empty lists
 - * @alloc_wq: work queue for background calls
 - * @rx_alloc: work struct for background calls
 - */
 -struct iwl_rb_allocator {
 -      struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
 -      atomic_t req_pending;
 -      atomic_t req_ready;
 -      struct list_head rbd_allocated;
 -      struct list_head rbd_empty;
 -      spinlock_t lock;
 -      struct workqueue_struct *alloc_wq;
 -      struct work_struct rx_alloc;
  };
  
  struct iwl_dma_ptr {
@@@ -250,7 -285,7 +250,7 @@@ iwl_pcie_get_scratchbuf_dma(struct iwl_
  /**
   * struct iwl_trans_pcie - PCIe transport specific data
   * @rxq: all the RX queue data
 - * @rba: allocator for RX replenishing
 + * @rx_replenish: work that will be called when buffers need to be allocated
   * @drv - pointer to iwl_drv
   * @trans: pointer to the generic transport area
   * @scd_base_addr: scheduler sram base address in SRAM
   * @rx_buf_size_8k: 8 kB RX buffer size
   * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
   * @scd_set_active: should the transport configure the SCD for HCMD queue
+  * @wide_cmd_header: true when ucode supports wide command header format
   * @rx_page_order: page order for receive buffer size
   * @reg_lock: protect hw register access
+  * @mutex: to protect stop_device / start_fw / start_hw
   * @cmd_in_flight: true when we have a host command in flight
   * @fw_mon_phys: physical address of the buffer for the firmware monitor
   * @fw_mon_page: points to the first page of the buffer for the firmware monitor
   */
  struct iwl_trans_pcie {
        struct iwl_rxq rxq;
 -      struct iwl_rb_allocator rba;
 +      struct work_struct rx_replenish;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
  
        dma_addr_t ict_tbl_dma;
        int ict_index;
        bool use_ict;
+       bool is_down;
        struct isr_statistics isr_stats;
  
        spinlock_t irq_lock;
+       struct mutex mutex;
        u32 inta_mask;
        u32 scd_base_addr;
        struct iwl_dma_ptr scd_bc_tbls;
        bool rx_buf_size_8k;
        bool bc_table_dword;
        bool scd_set_active;
+       bool wide_cmd_header;
        u32 rx_page_order;
  
        const char *const *command_names;
@@@ -385,7 -425,7 +390,7 @@@ int iwl_trans_pcie_tx(struct iwl_trans 
  void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
  int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
  void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
-                           struct iwl_rx_cmd_buffer *rxb, int handler_status);
+                           struct iwl_rx_cmd_buffer *rxb);
  void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
                            struct sk_buff_head *skbs);
  void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
index adad8d0fae7f2766812826377c46d29f1f77d4ed,454ef1d9a76a8ea1edbbb60b76d56f77e2efe295..e1af0fffedd818b0e16a6e72fbce9f9445ca8bce
@@@ -1,7 -1,7 +1,7 @@@
  /******************************************************************************
   *
   * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
 - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
   *
   * Portions of this file are derived from the ipw3945 project, as well
   * as portions of the ieee80211 subsystem header files.
   * resets the Rx queue buffers with new memory.
   *
   * The management in the driver is as follows:
 - * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
 - *   When the interrupt handler is called, the request is processed.
 - *   The page is either stolen - transferred to the upper layer
 - *   or reused - added immediately to the iwl->rxq->rx_free list.
 - * + When the page is stolen - the driver updates the matching queue's used
 - *   count, detaches the RBD and transfers it to the queue used list.
 - *   When there are two used RBDs - they are transferred to the allocator empty
 - *   list. Work is then scheduled for the allocator to start allocating
 - *   eight buffers.
 - *   When there are another 6 used RBDs - they are transferred to the allocator
 - *   empty list and the driver tries to claim the pre-allocated buffers and
 - *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
 - *   until ready.
 - *   When there are 8+ buffers in the free list - either from allocation or from
 - *   8 reused unstolen pages - restock is called to update the FW and indexes.
 - * + In order to make sure the allocator always has RBDs to use for allocation
 - *   the allocator has initial pool in the size of num_queues*(8-2) - the
 - *   maximum missing RBDs per allocation request (request posted with 2
 - *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
 - *   The queues supplies the recycle of the rest of the RBDs.
 + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
 + *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
 + *   to replenish the iwl->rxq->rx_free.
 + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
 + *   iwl->rxq is replenished and the READ INDEX is updated (updating the
 + *   'processed' and 'read' driver indexes as well)
   * + A received packet is processed and handed to the kernel network stack,
   *   detached from the iwl->rxq.  The driver 'processed' index is updated.
 - * + If there are no allocated buffers in iwl->rxq->rx_free,
 + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
 + *   rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
   *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
   *   If there were enough free buffers and RX_STALLED is set it is cleared.
   *
   *
   * iwl_rxq_alloc()            Allocates rx_free
   * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
 - *                            iwl_pcie_rxq_restock.
 - *                            Used only during initialization.
 + *                            iwl_pcie_rxq_restock
   * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
   *                            queue, updates firmware pointers, and updates
 - *                            the WRITE index.
 - * iwl_pcie_rx_allocator()     Background work for allocating pages.
 + *                            the WRITE index.  If insufficient rx_free buffers
 + *                            are available, schedules iwl_pcie_rx_replenish
   *
   * -- enable interrupts --
   * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
   *                            READ INDEX, detaching the SKB from the pool.
   *                            Moves the packet buffer from queue to rx_used.
 - *                            Posts and claims requests to the allocator.
   *                            Calls iwl_pcie_rxq_restock to refill any empty
   *                            slots.
 - *
 - * RBD life-cycle:
 - *
 - * Init:
 - * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
 - *
 - * Regular Receive interrupt:
 - * Page Stolen:
 - * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
 - * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
 - * Page not Stolen:
 - * rxq.queue -> rxq.rx_free -> rxq.queue
   * ...
   *
   */
@@@ -240,10 -267,6 +240,10 @@@ static void iwl_pcie_rxq_restock(struc
                rxq->free_count--;
        }
        spin_unlock(&rxq->lock);
 +      /* If the pre-allocated buffer pool is dropping low, schedule to
 +       * refill it */
 +      if (rxq->free_count <= RX_LOW_WATERMARK)
 +              schedule_work(&trans_pcie->rx_replenish);
  
        /* If we've added more space for the firmware to place data, tell it.
         * Increment device's write pointer in multiples of 8. */
        }
  }
  
 -/*
 - * iwl_pcie_rx_alloc_page - allocates and returns a page.
 - *
 - */
 -static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
 -{
 -      struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 -      struct iwl_rxq *rxq = &trans_pcie->rxq;
 -      struct page *page;
 -      gfp_t gfp_mask = GFP_KERNEL;
 -
 -      if (rxq->free_count > RX_LOW_WATERMARK)
 -              gfp_mask |= __GFP_NOWARN;
 -
 -      if (trans_pcie->rx_page_order > 0)
 -              gfp_mask |= __GFP_COMP;
 -
 -      /* Alloc a new receive buffer */
 -      page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
 -      if (!page) {
 -              if (net_ratelimit())
 -                      IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
 -                                     trans_pcie->rx_page_order);
 -              /* Issue an error if the hardware has consumed more than half
 -               * of its free buffer list and we don't have enough
 -               * pre-allocated buffers.
 -`              */
 -              if (rxq->free_count <= RX_LOW_WATERMARK &&
 -                  iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
 -                  net_ratelimit())
 -                      IWL_CRIT(trans,
 -                               "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
 -                               rxq->free_count);
 -              return NULL;
 -      }
 -      return page;
 -}
 -
  /*
   * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
   *
   * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
   * allocated buffers.
   */
 -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
 +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
 +      gfp_t gfp_mask = priority;
  
        while (1) {
                spin_lock(&rxq->lock);
                }
                spin_unlock(&rxq->lock);
  
 +              if (rxq->free_count > RX_LOW_WATERMARK)
 +                      gfp_mask |= __GFP_NOWARN;
 +
 +              if (trans_pcie->rx_page_order > 0)
 +                      gfp_mask |= __GFP_COMP;
 +
                /* Alloc a new receive buffer */
 -              page = iwl_pcie_rx_alloc_page(trans);
 -              if (!page)
 +              page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
 +              if (!page) {
 +                      if (net_ratelimit())
 +                              IWL_DEBUG_INFO(trans, "alloc_pages failed, "
 +                                         "order: %d\n",
 +                                         trans_pcie->rx_page_order);
 +
 +                      if ((rxq->free_count <= RX_LOW_WATERMARK) &&
 +                          net_ratelimit())
 +                              IWL_CRIT(trans, "Failed to alloc_pages with %s."
 +                                       "Only %u free buffers remaining.\n",
 +                                       priority == GFP_ATOMIC ?
 +                                       "GFP_ATOMIC" : "GFP_KERNEL",
 +                                       rxq->free_count);
 +                      /* We don't reschedule replenish work here -- we will
 +                       * call the restock method and if it still needs
 +                       * more buffers it will schedule replenish */
                        return;
 +              }
  
                spin_lock(&rxq->lock);
  
@@@ -355,7 -393,7 +355,7 @@@ static void iwl_pcie_rxq_free_rbs(struc
  
        lockdep_assert_held(&rxq->lock);
  
 -      for (i = 0; i < RX_QUEUE_SIZE; i++) {
 +      for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
                if (!rxq->pool[i].page)
                        continue;
                dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
   * When moving to rx_free an page is allocated for the slot.
   *
   * Also restock the Rx queue via iwl_pcie_rxq_restock.
 - * This is called only during initialization
 + * This is called as a scheduled work item (except for during initialization)
   */
 -static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
 +static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
  {
 -      iwl_pcie_rxq_alloc_rbs(trans);
 +      iwl_pcie_rxq_alloc_rbs(trans, gfp);
  
        iwl_pcie_rxq_restock(trans);
  }
  
 -/*
 - * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
 - *
 - * Allocates for each received request 8 pages
 - * Called as a scheduled work item.
 - */
 -static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
 -{
 -      struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 -      struct iwl_rb_allocator *rba = &trans_pcie->rba;
 -
 -      while (atomic_read(&rba->req_pending)) {
 -              int i;
 -              struct list_head local_empty;
 -              struct list_head local_allocated;
 -
 -              INIT_LIST_HEAD(&local_allocated);
 -              spin_lock(&rba->lock);
 -              /* swap out the entire rba->rbd_empty to a local list */
 -              list_replace_init(&rba->rbd_empty, &local_empty);
 -              spin_unlock(&rba->lock);
 -
 -              for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
 -                      struct iwl_rx_mem_buffer *rxb;
 -                      struct page *page;
 -
 -                      /* List should never be empty - each reused RBD is
 -                       * returned to the list, and initial pool covers any
 -                       * possible gap between the time the page is allocated
 -                       * to the time the RBD is added.
 -                       */
 -                      BUG_ON(list_empty(&local_empty));
 -                      /* Get the first rxb from the rbd list */
 -                      rxb = list_first_entry(&local_empty,
 -                                             struct iwl_rx_mem_buffer, list);
 -                      BUG_ON(rxb->page);
 -
 -                      /* Alloc a new receive buffer */
 -                      page = iwl_pcie_rx_alloc_page(trans);
 -                      if (!page)
 -                              continue;
 -                      rxb->page = page;
 -
 -                      /* Get physical address of the RB */
 -                      rxb->page_dma = dma_map_page(trans->dev, page, 0,
 -                                      PAGE_SIZE << trans_pcie->rx_page_order,
 -                                      DMA_FROM_DEVICE);
 -                      if (dma_mapping_error(trans->dev, rxb->page_dma)) {
 -                              rxb->page = NULL;
 -                              __free_pages(page, trans_pcie->rx_page_order);
 -                              continue;
 -                      }
 -                      /* dma address must be no more than 36 bits */
 -                      BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
 -                      /* and also 256 byte aligned! */
 -                      BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 -
 -                      /* move the allocated entry to the out list */
 -                      list_move(&rxb->list, &local_allocated);
 -                      i++;
 -              }
 -
 -              spin_lock(&rba->lock);
 -              /* add the allocated rbds to the allocator allocated list */
 -              list_splice_tail(&local_allocated, &rba->rbd_allocated);
 -              /* add the unused rbds back to the allocator empty list */
 -              list_splice_tail(&local_empty, &rba->rbd_empty);
 -              spin_unlock(&rba->lock);
 -
 -              atomic_dec(&rba->req_pending);
 -              atomic_inc(&rba->req_ready);
 -      }
 -}
 -
 -/*
 - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
 -.*
 -.* Called by queue when the queue posted allocation request and
 - * has freed 8 RBDs in order to restock itself.
 - */
 -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
 -                                   struct iwl_rx_mem_buffer
 -                                   *out[RX_CLAIM_REQ_ALLOC])
 -{
 -      struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 -      struct iwl_rb_allocator *rba = &trans_pcie->rba;
 -      int i;
 -
 -      if (atomic_dec_return(&rba->req_ready) < 0) {
 -              atomic_inc(&rba->req_ready);
 -              IWL_DEBUG_RX(trans,
 -                           "Allocation request not ready, pending requests = %d\n",
 -                           atomic_read(&rba->req_pending));
 -              return -ENOMEM;
 -      }
 -
 -      spin_lock(&rba->lock);
 -      for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
 -              /* Get next free Rx buffer, remove it from free list */
 -              out[i] = list_first_entry(&rba->rbd_allocated,
 -                             struct iwl_rx_mem_buffer, list);
 -              list_del(&out[i]->list);
 -      }
 -      spin_unlock(&rba->lock);
 -
 -      return 0;
 -}
 -
 -static void iwl_pcie_rx_allocator_work(struct work_struct *data)
 +static void iwl_pcie_rx_replenish_work(struct work_struct *data)
  {
 -      struct iwl_rb_allocator *rba_p =
 -              container_of(data, struct iwl_rb_allocator, rx_alloc);
        struct iwl_trans_pcie *trans_pcie =
 -              container_of(rba_p, struct iwl_trans_pcie, rba);
 +          container_of(data, struct iwl_trans_pcie, rx_replenish);
  
 -      iwl_pcie_rx_allocator(trans_pcie->trans);
 +      iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
  }
  
  static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
 -      struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
  
        memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
  
        spin_lock_init(&rxq->lock);
 -      spin_lock_init(&rba->lock);
  
        if (WARN_ON(rxq->bd || rxq->rb_stts))
                return -EINVAL;
@@@ -487,15 -637,49 +487,15 @@@ static void iwl_pcie_rx_init_rxb_lists(
        INIT_LIST_HEAD(&rxq->rx_free);
        INIT_LIST_HEAD(&rxq->rx_used);
        rxq->free_count = 0;
 -      rxq->used_count = 0;
  
 -      for (i = 0; i < RX_QUEUE_SIZE; i++)
 +      for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
                list_add(&rxq->pool[i].list, &rxq->rx_used);
  }
  
 -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
 -{
 -      int i;
 -
 -      lockdep_assert_held(&rba->lock);
 -
 -      INIT_LIST_HEAD(&rba->rbd_allocated);
 -      INIT_LIST_HEAD(&rba->rbd_empty);
 -
 -      for (i = 0; i < RX_POOL_SIZE; i++)
 -              list_add(&rba->pool[i].list, &rba->rbd_empty);
 -}
 -
 -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
 -{
 -      struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 -      struct iwl_rb_allocator *rba = &trans_pcie->rba;
 -      int i;
 -
 -      lockdep_assert_held(&rba->lock);
 -
 -      for (i = 0; i < RX_POOL_SIZE; i++) {
 -              if (!rba->pool[i].page)
 -                      continue;
 -              dma_unmap_page(trans->dev, rba->pool[i].page_dma,
 -                             PAGE_SIZE << trans_pcie->rx_page_order,
 -                             DMA_FROM_DEVICE);
 -              __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
 -              rba->pool[i].page = NULL;
 -      }
 -}
 -
  int iwl_pcie_rx_init(struct iwl_trans *trans)
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
 -      struct iwl_rb_allocator *rba = &trans_pcie->rba;
        int i, err;
  
        if (!rxq->bd) {
                if (err)
                        return err;
        }
 -      if (!rba->alloc_wq)
 -              rba->alloc_wq = alloc_workqueue("rb_allocator",
 -                                              WQ_HIGHPRI | WQ_UNBOUND, 1);
 -      INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
 -
 -      spin_lock(&rba->lock);
 -      atomic_set(&rba->req_pending, 0);
 -      atomic_set(&rba->req_ready, 0);
 -      /* free all first - we might be reconfigured for a different size */
 -      iwl_pcie_rx_free_rba(trans);
 -      iwl_pcie_rx_init_rba(rba);
 -      spin_unlock(&rba->lock);
  
        spin_lock(&rxq->lock);
  
 +      INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
 +
        /* free all first - we might be reconfigured for a different size */
        iwl_pcie_rxq_free_rbs(trans);
        iwl_pcie_rx_init_rxb_lists(rxq);
        memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
        spin_unlock(&rxq->lock);
  
 -      iwl_pcie_rx_replenish(trans);
 +      iwl_pcie_rx_replenish(trans, GFP_KERNEL);
  
        iwl_pcie_rx_hw_init(trans, rxq);
  
@@@ -537,6 -731,7 +537,6 @@@ void iwl_pcie_rx_free(struct iwl_trans 
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
 -      struct iwl_rb_allocator *rba = &trans_pcie->rba;
  
        /*if rxq->bd is NULL, it means that nothing has been allocated,
         * exit now */
                return;
        }
  
 -      cancel_work_sync(&rba->rx_alloc);
 -      if (rba->alloc_wq) {
 -              destroy_workqueue(rba->alloc_wq);
 -              rba->alloc_wq = NULL;
 -      }
 -
 -      spin_lock(&rba->lock);
 -      iwl_pcie_rx_free_rba(trans);
 -      spin_unlock(&rba->lock);
 +      cancel_work_sync(&trans_pcie->rx_replenish);
  
        spin_lock(&rxq->lock);
        iwl_pcie_rxq_free_rbs(trans);
        rxq->rb_stts = NULL;
  }
  
 -/*
 - * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
 - *
 - * Called when a RBD can be reused. The RBD is transferred to the allocator.
 - * When there are 2 empty RBDs - a request for allocation is posted
 - */
 -static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
 -                                struct iwl_rx_mem_buffer *rxb,
 -                                struct iwl_rxq *rxq)
 -{
 -      struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 -      struct iwl_rb_allocator *rba = &trans_pcie->rba;
 -
 -      /* Count the used RBDs */
 -      rxq->used_count++;
 -
 -      /* Move the RBD to the used list, will be moved to allocator in batches
 -       * before claiming or posting a request*/
 -      list_add_tail(&rxb->list, &rxq->rx_used);
 -
 -      /* If we have RX_POST_REQ_ALLOC new released rx buffers -
 -       * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
 -       * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
 -       * after but we still need to post another request.
 -       */
 -      if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
 -              /* Move the 2 RBDs to the allocator ownership.
 -               Allocator has another 6 from pool for the request completion*/
 -              spin_lock(&rba->lock);
 -              list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
 -              spin_unlock(&rba->lock);
 -
 -              atomic_inc(&rba->req_pending);
 -              queue_work(rba->alloc_wq, &rba->rx_alloc);
 -      }
 -}
 -
  static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                                struct iwl_rx_mem_buffer *rxb)
  {
  
        while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
                struct iwl_rx_packet *pkt;
-               struct iwl_device_cmd *cmd;
                u16 sequence;
                bool reclaim;
-               int index, cmd_index, err, len;
+               int index, cmd_index, len;
                struct iwl_rx_cmd_buffer rxcb = {
                        ._offset = offset,
                        ._rx_page_order = trans_pcie->rx_page_order,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
  
-               if (reclaim)
-                       cmd = txq->entries[cmd_index].cmd;
-               else
-                       cmd = NULL;
-               err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
+               iwl_op_mode_rx(trans->op_mode, &rxcb);
  
                if (reclaim) {
                        kzfree(txq->entries[cmd_index].free_buf);
                         * iwl_trans_send_cmd()
                         * as we reclaim the driver command queue */
                        if (!rxcb._page_stolen)
-                               iwl_pcie_hcmd_complete(trans, &rxcb, err);
+                               iwl_pcie_hcmd_complete(trans, &rxcb);
                        else
                                IWL_WARN(trans, "Claim null rxb?\n");
                }
                         */
                        __free_pages(rxb->page, trans_pcie->rx_page_order);
                        rxb->page = NULL;
 -                      iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
 +                      list_add_tail(&rxb->list, &rxq->rx_used);
                } else {
                        list_add_tail(&rxb->list, &rxq->rx_free);
                        rxq->free_count++;
                }
        } else
 -              iwl_pcie_rx_reuse_rbd(trans, rxb, rxq);
 +              list_add_tail(&rxb->list, &rxq->rx_used);
  }
  
  /*
@@@ -704,10 -938,7 +698,10 @@@ static void iwl_pcie_rx_handle(struct i
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_rxq *rxq = &trans_pcie->rxq;
 -      u32 r, i, j;
 +      u32 r, i;
 +      u8 fill_rx = 0;
 +      u32 count = 8;
 +      int total_empty;
  
  restart:
        spin_lock(&rxq->lock);
        if (i == r)
                IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
  
 +      /* calculate total frames need to be restock after handling RX */
 +      total_empty = r - rxq->write_actual;
 +      if (total_empty < 0)
 +              total_empty += RX_QUEUE_SIZE;
 +
 +      if (total_empty > (RX_QUEUE_SIZE / 2))
 +              fill_rx = 1;
 +
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
  
                iwl_pcie_rx_handle_rb(trans, rxb);
  
                i = (i + 1) & RX_QUEUE_MASK;
 -
 -              /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
 -               * try to claim the pre-allocated buffers from the allocator */
 -              if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) {
 -                      struct iwl_rb_allocator *rba = &trans_pcie->rba;
 -                      struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC];
 -
 -                      /* Add the remaining 6 empty RBDs for allocator use */
 -                      spin_lock(&rba->lock);
 -                      list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
 -                      spin_unlock(&rba->lock);
 -
 -                      /* If not ready - continue, will try to reclaim later.
 -                      * No need to reschedule work - allocator exits only on
 -                      * success */
 -                      if (!iwl_pcie_rx_allocator_get(trans, out)) {
 -                              /* If success - then RX_CLAIM_REQ_ALLOC
 -                               * buffers were retrieved and should be added
 -                               * to free list */
 -                              rxq->used_count -= RX_CLAIM_REQ_ALLOC;
 -                              for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
 -                                      list_add_tail(&out[j]->list,
 -                                                    &rxq->rx_free);
 -                                      rxq->free_count++;
 -                              }
 +              /* If there are a lot of unused frames,
 +               * restock the Rx queue so ucode wont assert. */
 +              if (fill_rx) {
 +                      count++;
 +                      if (count >= 8) {
 +                              rxq->read = i;
 +                              spin_unlock(&rxq->lock);
 +                              iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
 +                              count = 0;
 +                              goto restart;
                        }
                }
 -              /* handle restock for two cases:
 -              * - we just pulled buffers from the allocator
 -              * - we have 8+ unstolen pages accumulated */
 -              if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
 -                      rxq->read = i;
 -                      spin_unlock(&rxq->lock);
 -                      iwl_pcie_rxq_restock(trans);
 -                      goto restart;
 -              }
        }
  
        /* Backtrack one entry */
        rxq->read = i;
        spin_unlock(&rxq->lock);
  
 +      if (fill_rx)
 +              iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
 +      else
 +              iwl_pcie_rxq_restock(trans);
 +
        if (trans_pcie->napi.poll)
                napi_gro_flush(&trans_pcie->napi, false);
  }
  static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int i;
  
        /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
        if (trans->cfg->internal_wimax_coex &&
        iwl_trans_fw_error(trans);
        local_bh_enable();
  
+       for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
+               del_timer(&trans_pcie->txq[i].stuck_timer);
        clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
        wake_up(&trans_pcie->wait_command_queue);
  }
@@@ -1003,7 -1249,9 +1001,9 @@@ irqreturn_t iwl_pcie_irq_handler(int ir
  
                isr_stats->rfkill++;
  
+               mutex_lock(&trans_pcie->mutex);
                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+               mutex_unlock(&trans_pcie->mutex);
                if (hw_rfkill) {
                        set_bit(STATUS_RFKILL, &trans->status);
                        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
@@@ -1195,8 -1443,9 +1195,9 @@@ void iwl_pcie_reset_ict(struct iwl_tran
  
        val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
  
-       val |= CSR_DRAM_INT_TBL_ENABLE;
-       val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
+       val |= CSR_DRAM_INT_TBL_ENABLE |
+              CSR_DRAM_INIT_TBL_WRAP_CHECK |
+              CSR_DRAM_INIT_TBL_WRITE_POINTER;
  
        IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
  
index 6203c4ad9bba5d8ce3bb2f46f832c3bb4ebe85fc,cbc29ccc6a0062588d575f8a9e8cf47a9d403b07..0549c91ad3729fdedcd7da944d796674a6cec167
@@@ -182,7 -182,7 +182,7 @@@ static void iwl_trans_pcie_write_shr(st
  
  static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
  {
 -      if (!trans->cfg->apmg_not_supported)
 +      if (trans->cfg->apmg_not_supported)
                return;
  
        if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@@ -881,6 -881,14 +881,14 @@@ static void iwl_pcie_apply_destination(
                case PRPH_CLEARBIT:
                        iwl_clear_bits_prph(trans, addr, BIT(val));
                        break;
+               case PRPH_BLOCKBIT:
+                       if (iwl_read_prph(trans, addr) & BIT(val)) {
+                               IWL_ERR(trans,
+                                       "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+                                       val, addr);
+                               goto monitor;
+                       }
+                       break;
                default:
                        IWL_ERR(trans, "FW debug - unknown OP %d\n",
                                dest->reg_ops[i].op);
                }
        }
  
+ monitor:
        if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
                iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
                               trans_pcie->fw_mon_phys >> dest->base_shift);
@@@ -982,13 -991,25 +991,25 @@@ static int iwl_pcie_load_given_ucode_80
  static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                                   const struct fw_img *fw, bool run_in_rfkill)
  {
-       int ret;
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
+       int ret;
+       mutex_lock(&trans_pcie->mutex);
+       /* Someone called stop_device, don't try to start_fw */
+       if (trans_pcie->is_down) {
+               IWL_WARN(trans,
+                        "Can't start_fw since the HW hasn't been started\n");
+               ret = EIO;
+               goto out;
+       }
  
        /* This may fail if AMT took ownership of the device */
        if (iwl_pcie_prepare_card_hw(trans)) {
                IWL_WARN(trans, "Exit HW not ready\n");
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
  
        iwl_enable_rfkill_int(trans);
        else
                clear_bit(STATUS_RFKILL, &trans->status);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
-       if (hw_rfkill && !run_in_rfkill)
-               return -ERFKILL;
+       if (hw_rfkill && !run_in_rfkill) {
+               ret = -ERFKILL;
+               goto out;
+       }
  
        iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
  
        ret = iwl_pcie_nic_init(trans);
        if (ret) {
                IWL_ERR(trans, "Unable to init nic\n");
-               return ret;
+               goto out;
        }
  
        /* make sure rfkill handshake bits are cleared */
  
        /* Load the given image to the HW */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
-               return iwl_pcie_load_given_ucode_8000(trans, fw);
+               ret = iwl_pcie_load_given_ucode_8000(trans, fw);
        else
-               return iwl_pcie_load_given_ucode(trans, fw);
+               ret = iwl_pcie_load_given_ucode(trans, fw);
+ out:
+       mutex_unlock(&trans_pcie->mutex);
+       return ret;
  }
  
  static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        iwl_pcie_tx_start(trans, scd_addr);
  }
  
- static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill, was_hw_rfkill;
  
+       lockdep_assert_held(&trans_pcie->mutex);
+       if (trans_pcie->is_down)
+               return;
+       trans_pcie->is_down = true;
        was_hw_rfkill = iwl_is_rfkill_set(trans);
  
        /* tell the device to stop sending interrupts */
        iwl_pcie_prepare_card_hw(trans);
  }
  
+ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+ {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       mutex_lock(&trans_pcie->mutex);
+       _iwl_trans_pcie_stop_device(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+ }
  void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
  {
+       struct iwl_trans_pcie __maybe_unused *trans_pcie =
+               IWL_TRANS_GET_PCIE_TRANS(trans);
+       lockdep_assert_held(&trans_pcie->mutex);
        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-               iwl_trans_pcie_stop_device(trans, true);
+               _iwl_trans_pcie_stop_device(trans, true);
  }
  
  static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
  {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       if (trans->wowlan_d0i3) {
+               /* Enable persistence mode to avoid reset */
+               iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+                           CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+       }
        iwl_disable_interrupts(trans);
  
        /*
  
        iwl_pcie_disable_ict(trans);
  
+       synchronize_irq(trans_pcie->pci_dev->irq);
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
  
-       /*
-        * reset TX queues -- some of their registers reset during S3
-        * so if we don't reset everything here the D3 image would try
-        * to execute some invalid memory upon resume
-        */
-       iwl_trans_pcie_tx_reset(trans);
+       if (!trans->wowlan_d0i3) {
+               /*
+                * reset TX queues -- some of their registers reset during S3
+                * so if we don't reset everything here the D3 image would try
+                * to execute some invalid memory upon resume
+                */
+               iwl_trans_pcie_tx_reset(trans);
+       }
  
        iwl_pcie_set_pwr(trans, true);
  }
@@@ -1202,12 -1262,18 +1262,18 @@@ static int iwl_trans_pcie_d3_resume(str
  
        iwl_pcie_set_pwr(trans, false);
  
-       iwl_trans_pcie_tx_reset(trans);
+       if (trans->wowlan_d0i3) {
+               iwl_clear_bit(trans, CSR_GP_CNTRL,
+                             CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       } else {
+               iwl_trans_pcie_tx_reset(trans);
  
-       ret = iwl_pcie_rx_init(trans);
-       if (ret) {
-               IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
-               return ret;
+               ret = iwl_pcie_rx_init(trans);
+               if (ret) {
+                       IWL_ERR(trans,
+                               "Failed to resume the device (RX reset)\n");
+                       return ret;
+               }
        }
  
        val = iwl_read32(trans, CSR_RESET);
        return 0;
  }
  
- static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
  {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill;
        int err;
  
+       lockdep_assert_held(&trans_pcie->mutex);
        err = iwl_pcie_prepare_card_hw(trans);
        if (err) {
                IWL_ERR(trans, "Error while preparing HW: %d\n", err);
        /* From now on, the op_mode will be kept updated about RF kill state */
        iwl_enable_rfkill_int(trans);
  
+       /* Set is_down to false here so that...*/
+       trans_pcie->is_down = false;
        hw_rfkill = iwl_is_rfkill_set(trans);
        if (hw_rfkill)
                set_bit(STATUS_RFKILL, &trans->status);
        else
                clear_bit(STATUS_RFKILL, &trans->status);
+       /* ... rfkill can call stop_device and set it false if needed */
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
  
        return 0;
  }
  
+ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+ {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+       mutex_lock(&trans_pcie->mutex);
+       ret = _iwl_trans_pcie_start_hw(trans, low_power);
+       mutex_unlock(&trans_pcie->mutex);
+       return ret;
+ }
  static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
  {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
  
+       mutex_lock(&trans_pcie->mutex);
        /* disable interrupts - don't enable HW RF kill interrupt */
        spin_lock(&trans_pcie->irq_lock);
        iwl_disable_interrupts(trans);
        spin_unlock(&trans_pcie->irq_lock);
  
        iwl_pcie_disable_ict(trans);
+       mutex_unlock(&trans_pcie->mutex);
+       synchronize_irq(trans_pcie->pci_dev->irq);
  }
  
  static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@@ -1326,6 -1417,7 +1417,7 @@@ static void iwl_trans_pcie_configure(st
        else
                trans_pcie->rx_page_order = get_order(4 * 1024);
  
+       trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
        trans_pcie->command_names = trans_cfg->command_names;
        trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
        trans_pcie->scd_set_active = trans_cfg->scd_set_active;
@@@ -2459,7 -2551,7 +2551,7 @@@ struct iwl_trans *iwl_trans_pcie_alloc(
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
        u16 pci_cmd;
 -      int err;
 +      int ret;
  
        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
                                &pdev->dev, cfg, &trans_ops_pcie, 0);
        spin_lock_init(&trans_pcie->irq_lock);
        spin_lock_init(&trans_pcie->reg_lock);
        spin_lock_init(&trans_pcie->ref_lock);
+       mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
  
 -      err = pci_enable_device(pdev);
 -      if (err)
 +      ret = pci_enable_device(pdev);
 +      if (ret)
                goto out_no_pci;
  
        if (!cfg->base_params->pcie_l1_allowed) {
  
        pci_set_master(pdev);
  
 -      err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
 -      if (!err)
 -              err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
 -      if (err) {
 -              err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 -              if (!err)
 -                      err = pci_set_consistent_dma_mask(pdev,
 +      ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
 +      if (!ret)
 +              ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
 +      if (ret) {
 +              ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 +              if (!ret)
 +                      ret = pci_set_consistent_dma_mask(pdev,
                                                          DMA_BIT_MASK(32));
                /* both attempts failed: */
 -              if (err) {
 +              if (ret) {
                        dev_err(&pdev->dev, "No suitable DMA available\n");
                        goto out_pci_disable_device;
                }
        }
  
 -      err = pci_request_regions(pdev, DRV_NAME);
 -      if (err) {
 +      ret = pci_request_regions(pdev, DRV_NAME);
 +      if (ret) {
                dev_err(&pdev->dev, "pci_request_regions failed\n");
                goto out_pci_disable_device;
        }
        trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
        if (!trans_pcie->hw_base) {
                dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
 -              err = -ENODEV;
 +              ret = -ENODEV;
                goto out_pci_release_regions;
        }
  
        trans_pcie->pci_dev = pdev;
        iwl_disable_interrupts(trans);
  
 -      err = pci_enable_msi(pdev);
 -      if (err) {
 -              dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
 +      ret = pci_enable_msi(pdev);
 +      if (ret) {
 +              dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
                /* enable rfkill interrupt: hw bug w/a */
                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
                if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
         */
        if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
                unsigned long flags;
 -              int ret;
  
                trans->hw_rev = (trans->hw_rev & 0xfff0) |
                                (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
  
 +              ret = iwl_pcie_prepare_card_hw(trans);
 +              if (ret) {
 +                      IWL_WARN(trans, "Exit HW not ready\n");
 +                      goto out_pci_disable_msi;
 +              }
 +
                /*
                 * in-order to recognize C step driver should read chip version
                 * id located at the AUX bus MISC address space.
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
  
 -      if (iwl_pcie_alloc_ict(trans))
 +      ret = iwl_pcie_alloc_ict(trans);
 +      if (ret)
                goto out_pci_disable_msi;
  
 -      err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
 +      ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
                                   iwl_pcie_irq_handler,
                                   IRQF_SHARED, DRV_NAME, trans);
 -      if (err) {
 +      if (ret) {
                IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
                goto out_free_ict;
        }
@@@ -2623,5 -2710,5 +2716,5 @@@ out_pci_disable_device
        pci_disable_device(pdev);
  out_no_pci:
        iwl_trans_free(trans);
 -      return ERR_PTR(err);
 +      return ERR_PTR(ret);
  }