3 * This file is part of wlcore
5 * Copyright (C) 2008-2010 Nokia Corporation
6 * Copyright (C) 2011-2013 Texas Instruments Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/wl12xx.h>
29 #include <linux/interrupt.h>
33 #include "wl12xx_80211.h"
44 #define WL1271_BOOT_RETRIES 3
46 static char *fwlog_param;
47 static int bug_on_recovery = -1;
48 static int no_recovery = -1;
50 static void __wl1271_op_remove_interface(struct wl1271 *wl,
51 struct ieee80211_vif *vif,
52 bool reset_tx_queues);
53 static void wlcore_op_stop_locked(struct wl1271 *wl);
54 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
60 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
73 wl1271_info("Association completed.");
77 static void wl1271_reg_notify(struct wiphy *wiphy,
78 struct regulatory_request *request)
80 struct ieee80211_supported_band *band;
81 struct ieee80211_channel *ch;
83 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
84 struct wl1271 *wl = hw->priv;
86 band = wiphy->bands[IEEE80211_BAND_5GHZ];
87 for (i = 0; i < band->n_channels; i++) {
88 ch = &band->channels[i];
89 if (ch->flags & IEEE80211_CHAN_DISABLED)
92 if (ch->flags & IEEE80211_CHAN_RADAR)
93 ch->flags |= IEEE80211_CHAN_NO_IBSS |
94 IEEE80211_CHAN_PASSIVE_SCAN;
98 wlcore_regdomain_config(wl);
101 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
106 /* we should hold wl->mutex */
107 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
112 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
120 * this function is being called when the rx_streaming interval
121 * has beed changed or rx_streaming should be disabled
123 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
126 int period = wl->conf.rx_streaming.interval;
128 /* don't reconfigure if rx_streaming is disabled */
129 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
132 /* reconfigure/disable according to new streaming_period */
134 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
135 (wl->conf.rx_streaming.always ||
136 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
137 ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 ret = wl1271_set_rx_streaming(wl, wlvif, false);
140 /* don't cancel_work_sync since we might deadlock */
141 del_timer_sync(&wlvif->rx_streaming_timer);
147 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
150 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
151 rx_streaming_enable_work);
152 struct wl1271 *wl = wlvif->wl;
154 mutex_lock(&wl->mutex);
156 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
157 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
158 (!wl->conf.rx_streaming.always &&
159 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
162 if (!wl->conf.rx_streaming.interval)
165 ret = wl1271_ps_elp_wakeup(wl);
169 ret = wl1271_set_rx_streaming(wl, wlvif, true);
173 /* stop it after some time of inactivity */
174 mod_timer(&wlvif->rx_streaming_timer,
175 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
178 wl1271_ps_elp_sleep(wl);
180 mutex_unlock(&wl->mutex);
183 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
186 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
187 rx_streaming_disable_work);
188 struct wl1271 *wl = wlvif->wl;
190 mutex_lock(&wl->mutex);
192 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
195 ret = wl1271_ps_elp_wakeup(wl);
199 ret = wl1271_set_rx_streaming(wl, wlvif, false);
204 wl1271_ps_elp_sleep(wl);
206 mutex_unlock(&wl->mutex);
209 static void wl1271_rx_streaming_timer(unsigned long data)
211 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
212 struct wl1271 *wl = wlvif->wl;
213 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
216 /* wl->mutex must be taken */
217 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 /* if the watchdog is not armed, don't do anything */
220 if (wl->tx_allocated_blocks == 0)
223 cancel_delayed_work(&wl->tx_watchdog_work);
224 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
225 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
228 static void wl12xx_tx_watchdog_work(struct work_struct *work)
230 struct delayed_work *dwork;
233 dwork = container_of(work, struct delayed_work, work);
234 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236 mutex_lock(&wl->mutex);
238 if (unlikely(wl->state != WLCORE_STATE_ON))
241 /* Tx went out in the meantime - everything is ok */
242 if (unlikely(wl->tx_allocated_blocks == 0))
246 * if a ROC is in progress, we might not have any Tx for a long
247 * time (e.g. pending Tx on the non-ROC channels)
249 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
250 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
251 wl->conf.tx.tx_watchdog_timeout);
252 wl12xx_rearm_tx_watchdog_locked(wl);
257 * if a scan is in progress, we might not have any Tx for a long
260 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
261 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
262 wl->conf.tx.tx_watchdog_timeout);
263 wl12xx_rearm_tx_watchdog_locked(wl);
268 * AP might cache a frame for a long time for a sleeping station,
269 * so rearm the timer if there's an AP interface with stations. If
270 * Tx is genuinely stuck we will most hopefully discover it when all
271 * stations are removed due to inactivity.
273 if (wl->active_sta_count) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276 wl->conf.tx.tx_watchdog_timeout,
277 wl->active_sta_count);
278 wl12xx_rearm_tx_watchdog_locked(wl);
282 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
283 wl->conf.tx.tx_watchdog_timeout);
284 wl12xx_queue_recovery_work(wl);
287 mutex_unlock(&wl->mutex);
290 static void wlcore_adjust_conf(struct wl1271 *wl)
292 /* Adjust settings according to optional module parameters */
295 if (!strcmp(fwlog_param, "continuous")) {
296 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
297 } else if (!strcmp(fwlog_param, "ondemand")) {
298 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
299 } else if (!strcmp(fwlog_param, "dbgpins")) {
300 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
301 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
302 } else if (!strcmp(fwlog_param, "disable")) {
303 wl->conf.fwlog.mem_blocks = 0;
304 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
306 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
310 if (bug_on_recovery != -1)
311 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
313 if (no_recovery != -1)
314 wl->conf.recovery.no_recovery = (u8) no_recovery;
317 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
318 struct wl12xx_vif *wlvif,
323 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
326 * Wake up from high level PS if the STA is asleep with too little
327 * packets in FW or if the STA is awake.
329 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
330 wl12xx_ps_link_end(wl, wlvif, hlid);
333 * Start high-level PS if the STA is asleep with enough blocks in FW.
334 * Make an exception if this is the only connected link. In this
335 * case FW-memory congestion is less of a problem.
336 * Note that a single connected STA means 3 active links, since we must
337 * account for the global and broadcast AP links. The "fw_ps" check
338 * assures us the third link is a STA connected to the AP. Otherwise
339 * the FW would not set the PSM bit.
341 else if (wl->active_link_count > 3 && fw_ps &&
342 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
343 wl12xx_ps_link_start(wl, wlvif, hlid, true);
346 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
347 struct wl12xx_vif *wlvif,
348 struct wl_fw_status_2 *status)
353 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
354 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
355 wl1271_debug(DEBUG_PSM,
356 "link ps prev 0x%x cur 0x%x changed 0x%x",
357 wl->ap_fw_ps_map, cur_fw_ps_map,
358 wl->ap_fw_ps_map ^ cur_fw_ps_map);
360 wl->ap_fw_ps_map = cur_fw_ps_map;
363 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
364 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
365 wl->links[hlid].allocated_pkts);
368 static int wlcore_fw_status(struct wl1271 *wl,
369 struct wl_fw_status_1 *status_1,
370 struct wl_fw_status_2 *status_2)
372 struct wl12xx_vif *wlvif;
374 u32 old_tx_blk_count = wl->tx_blocks_available;
375 int avail, freed_blocks;
379 struct wl1271_link *lnk;
381 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
382 sizeof(*status_2) + wl->fw_status_priv_len;
384 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
389 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
390 "drv_rx_counter = %d, tx_results_counter = %d)",
392 status_1->fw_rx_counter,
393 status_1->drv_rx_counter,
394 status_1->tx_results_counter);
396 for (i = 0; i < NUM_TX_QUEUES; i++) {
397 /* prevent wrap-around in freed-packets counter */
398 wl->tx_allocated_pkts[i] -=
399 (status_2->counters.tx_released_pkts[i] -
400 wl->tx_pkts_freed[i]) & 0xff;
402 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
406 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
410 /* prevent wrap-around in freed-packets counter */
411 diff = (status_2->counters.tx_lnk_free_pkts[i] -
412 lnk->prev_freed_pkts) & 0xff;
417 lnk->allocated_pkts -= diff;
418 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
420 /* accumulate the prev_freed_pkts counter */
421 lnk->total_freed_pkts += diff;
424 /* prevent wrap-around in total blocks counter */
425 if (likely(wl->tx_blocks_freed <=
426 le32_to_cpu(status_2->total_released_blks)))
427 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
430 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
431 le32_to_cpu(status_2->total_released_blks);
433 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
435 wl->tx_allocated_blocks -= freed_blocks;
438 * If the FW freed some blocks:
439 * If we still have allocated blocks - re-arm the timer, Tx is
440 * not stuck. Otherwise, cancel the timer (no Tx currently).
443 if (wl->tx_allocated_blocks)
444 wl12xx_rearm_tx_watchdog_locked(wl);
446 cancel_delayed_work(&wl->tx_watchdog_work);
449 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
452 * The FW might change the total number of TX memblocks before
453 * we get a notification about blocks being released. Thus, the
454 * available blocks calculation might yield a temporary result
455 * which is lower than the actual available blocks. Keeping in
456 * mind that only blocks that were allocated can be moved from
457 * TX to RX, tx_blocks_available should never decrease here.
459 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
462 /* if more blocks are available now, tx work can be scheduled */
463 if (wl->tx_blocks_available > old_tx_blk_count)
464 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
466 /* for AP update num of allocated TX blocks per link and ps status */
467 wl12xx_for_each_wlvif_ap(wl, wlvif) {
468 wl12xx_irq_update_links_status(wl, wlvif, status_2);
471 /* update the host-chipset time offset */
473 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
474 (s64)le32_to_cpu(status_2->fw_localtime);
476 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
481 static void wl1271_flush_deferred_work(struct wl1271 *wl)
485 /* Pass all received frames to the network stack */
486 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
487 ieee80211_rx_ni(wl->hw, skb);
489 /* Return sent skbs to the network stack */
490 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
491 ieee80211_tx_status_ni(wl->hw, skb);
494 static void wl1271_netstack_work(struct work_struct *work)
497 container_of(work, struct wl1271, netstack_work);
500 wl1271_flush_deferred_work(wl);
501 } while (skb_queue_len(&wl->deferred_rx_queue));
504 #define WL1271_IRQ_MAX_LOOPS 256
506 static int wlcore_irq_locked(struct wl1271 *wl)
510 int loopcount = WL1271_IRQ_MAX_LOOPS;
512 unsigned int defer_count;
516 * In case edge triggered interrupt must be used, we cannot iterate
517 * more than once without introducing race conditions with the hardirq.
519 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
522 wl1271_debug(DEBUG_IRQ, "IRQ work");
524 if (unlikely(wl->state != WLCORE_STATE_ON))
527 ret = wl1271_ps_elp_wakeup(wl);
531 while (!done && loopcount--) {
533 * In order to avoid a race with the hardirq, clear the flag
534 * before acknowledging the chip. Since the mutex is held,
535 * wl1271_ps_elp_wakeup cannot be called concurrently.
537 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
538 smp_mb__after_clear_bit();
540 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
544 wlcore_hw_tx_immediate_compl(wl);
546 intr = le32_to_cpu(wl->fw_status_1->intr);
547 intr &= WLCORE_ALL_INTR_MASK;
553 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
554 wl1271_error("HW watchdog interrupt received! starting recovery.");
555 wl->watchdog_recovery = true;
558 /* restarting the chip. ignore any other interrupt. */
562 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
563 wl1271_error("SW watchdog interrupt received! "
564 "starting recovery.");
565 wl->watchdog_recovery = true;
568 /* restarting the chip. ignore any other interrupt. */
572 if (likely(intr & WL1271_ACX_INTR_DATA)) {
573 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
575 ret = wlcore_rx(wl, wl->fw_status_1);
579 /* Check if any tx blocks were freed */
580 spin_lock_irqsave(&wl->wl_lock, flags);
581 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
582 wl1271_tx_total_queue_count(wl) > 0) {
583 spin_unlock_irqrestore(&wl->wl_lock, flags);
585 * In order to avoid starvation of the TX path,
586 * call the work function directly.
588 ret = wlcore_tx_work_locked(wl);
592 spin_unlock_irqrestore(&wl->wl_lock, flags);
595 /* check for tx results */
596 ret = wlcore_hw_tx_delayed_compl(wl);
600 /* Make sure the deferred queues don't get too long */
601 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
602 skb_queue_len(&wl->deferred_rx_queue);
603 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
604 wl1271_flush_deferred_work(wl);
607 if (intr & WL1271_ACX_INTR_EVENT_A) {
608 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
609 ret = wl1271_event_handle(wl, 0);
614 if (intr & WL1271_ACX_INTR_EVENT_B) {
615 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
616 ret = wl1271_event_handle(wl, 1);
621 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
622 wl1271_debug(DEBUG_IRQ,
623 "WL1271_ACX_INTR_INIT_COMPLETE");
625 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
626 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
629 wl1271_ps_elp_sleep(wl);
635 static irqreturn_t wlcore_irq(int irq, void *cookie)
639 struct wl1271 *wl = cookie;
641 /* complete the ELP completion */
642 spin_lock_irqsave(&wl->wl_lock, flags);
643 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
645 complete(wl->elp_compl);
646 wl->elp_compl = NULL;
649 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
650 /* don't enqueue a work right now. mark it as pending */
651 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
652 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
653 disable_irq_nosync(wl->irq);
654 pm_wakeup_event(wl->dev, 0);
655 spin_unlock_irqrestore(&wl->wl_lock, flags);
658 spin_unlock_irqrestore(&wl->wl_lock, flags);
660 /* TX might be handled here, avoid redundant work */
661 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
662 cancel_work_sync(&wl->tx_work);
664 mutex_lock(&wl->mutex);
666 ret = wlcore_irq_locked(wl);
668 wl12xx_queue_recovery_work(wl);
670 spin_lock_irqsave(&wl->wl_lock, flags);
671 /* In case TX was not handled here, queue TX work */
672 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
673 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
674 wl1271_tx_total_queue_count(wl) > 0)
675 ieee80211_queue_work(wl->hw, &wl->tx_work);
676 spin_unlock_irqrestore(&wl->wl_lock, flags);
678 mutex_unlock(&wl->mutex);
683 struct vif_counter_data {
686 struct ieee80211_vif *cur_vif;
687 bool cur_vif_running;
690 static void wl12xx_vif_count_iter(void *data, u8 *mac,
691 struct ieee80211_vif *vif)
693 struct vif_counter_data *counter = data;
696 if (counter->cur_vif == vif)
697 counter->cur_vif_running = true;
700 /* caller must not hold wl->mutex, as it might deadlock */
701 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
702 struct ieee80211_vif *cur_vif,
703 struct vif_counter_data *data)
705 memset(data, 0, sizeof(*data));
706 data->cur_vif = cur_vif;
708 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
709 wl12xx_vif_count_iter, data);
712 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
714 const struct firmware *fw;
716 enum wl12xx_fw_type fw_type;
720 fw_type = WL12XX_FW_TYPE_PLT;
721 fw_name = wl->plt_fw_name;
724 * we can't call wl12xx_get_vif_count() here because
725 * wl->mutex is taken, so use the cached last_vif_count value
727 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
728 fw_type = WL12XX_FW_TYPE_MULTI;
729 fw_name = wl->mr_fw_name;
731 fw_type = WL12XX_FW_TYPE_NORMAL;
732 fw_name = wl->sr_fw_name;
736 if (wl->fw_type == fw_type)
739 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
741 ret = request_firmware(&fw, fw_name, wl->dev);
744 wl1271_error("could not get firmware %s: %d", fw_name, ret);
749 wl1271_error("firmware size is not multiple of 32 bits: %zu",
756 wl->fw_type = WL12XX_FW_TYPE_NONE;
757 wl->fw_len = fw->size;
758 wl->fw = vmalloc(wl->fw_len);
761 wl1271_error("could not allocate memory for the firmware");
766 memcpy(wl->fw, fw->data, wl->fw_len);
768 wl->fw_type = fw_type;
770 release_firmware(fw);
775 void wl12xx_queue_recovery_work(struct wl1271 *wl)
777 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
779 /* Avoid a recursive recovery */
780 if (wl->state == WLCORE_STATE_ON) {
781 wl->state = WLCORE_STATE_RESTARTING;
782 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
783 wlcore_disable_interrupts_nosync(wl);
784 ieee80211_queue_work(wl->hw, &wl->recovery_work);
788 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
792 /* The FW log is a length-value list, find where the log end */
793 while (len < maxlen) {
794 if (memblock[len] == 0)
796 if (len + memblock[len] + 1 > maxlen)
798 len += memblock[len] + 1;
801 /* Make sure we have enough room */
802 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
804 /* Fill the FW log file, consumed by the sysfs fwlog entry */
805 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
806 wl->fwlog_size += len;
811 #define WLCORE_FW_LOG_END 0x2000000
813 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
821 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
822 (wl->conf.fwlog.mem_blocks == 0))
825 wl1271_info("Reading FW panic log");
827 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
832 * Make sure the chip is awake and the logger isn't active.
833 * Do not send a stop fwlog command if the fw is hanged or if
834 * dbgpins are used (due to some fw bug).
836 if (wl1271_ps_elp_wakeup(wl))
838 if (!wl->watchdog_recovery &&
839 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
840 wl12xx_cmd_stop_fwlog(wl);
842 /* Read the first memory block address */
843 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
847 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
851 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
852 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
853 end_of_log = WLCORE_FW_LOG_END;
855 offset = sizeof(addr);
859 /* Traverse the memory blocks linked list */
861 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
862 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
868 * Memory blocks are linked to one another. The first 4 bytes
869 * of each memory block hold the hardware address of the next
870 * one. The last memory block points to the first one in
871 * on demand mode and is equal to 0x2000000 in continuous mode.
873 addr = le32_to_cpup((__le32 *)block);
874 if (!wl12xx_copy_fwlog(wl, block + offset,
875 WL12XX_HW_BLOCK_SIZE - offset))
877 } while (addr && (addr != end_of_log));
879 wake_up_interruptible(&wl->fwlog_waitq);
885 static void wlcore_print_recovery(struct wl1271 *wl)
891 wl1271_info("Hardware recovery in progress. FW ver: %s",
892 wl->chip.fw_ver_str);
894 /* change partitions momentarily so we can read the FW pc */
895 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
899 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
903 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
907 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
908 pc, hint_sts, ++wl->recovery_count);
910 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
914 static void wl1271_recovery_work(struct work_struct *work)
917 container_of(work, struct wl1271, recovery_work);
918 struct wl12xx_vif *wlvif;
919 struct ieee80211_vif *vif;
921 mutex_lock(&wl->mutex);
923 if (wl->state == WLCORE_STATE_OFF || wl->plt)
926 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
927 wl12xx_read_fwlog_panic(wl);
928 wlcore_print_recovery(wl);
931 BUG_ON(wl->conf.recovery.bug_on_recovery &&
932 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
934 if (wl->conf.recovery.no_recovery) {
935 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
939 /* Prevent spurious TX during FW restart */
940 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
942 /* reboot the chipset */
943 while (!list_empty(&wl->wlvif_list)) {
944 wlvif = list_first_entry(&wl->wlvif_list,
945 struct wl12xx_vif, list);
946 vif = wl12xx_wlvif_to_vif(wlvif);
947 __wl1271_op_remove_interface(wl, vif, false);
950 wlcore_op_stop_locked(wl);
952 ieee80211_restart_hw(wl->hw);
955 * Its safe to enable TX now - the queues are stopped after a request
958 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
961 wl->watchdog_recovery = false;
962 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
963 mutex_unlock(&wl->mutex);
966 static int wlcore_fw_wakeup(struct wl1271 *wl)
968 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
971 static int wl1271_setup(struct wl1271 *wl)
973 wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
974 sizeof(*wl->fw_status_2) +
975 wl->fw_status_priv_len, GFP_KERNEL);
976 if (!wl->fw_status_1)
979 wl->fw_status_2 = (struct wl_fw_status_2 *)
980 (((u8 *) wl->fw_status_1) +
981 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
983 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
984 if (!wl->tx_res_if) {
985 kfree(wl->fw_status_1);
992 static int wl12xx_set_power_on(struct wl1271 *wl)
996 msleep(WL1271_PRE_POWER_ON_SLEEP);
997 ret = wl1271_power_on(wl);
1000 msleep(WL1271_POWER_ON_SLEEP);
1001 wl1271_io_reset(wl);
1004 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1008 /* ELP module wake up */
1009 ret = wlcore_fw_wakeup(wl);
1017 wl1271_power_off(wl);
1021 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1025 ret = wl12xx_set_power_on(wl);
1030 * For wl127x based devices we could use the default block
1031 * size (512 bytes), but due to a bug in the sdio driver, we
1032 * need to set it explicitly after the chip is powered on. To
1033 * simplify the code and since the performance impact is
1034 * negligible, we use the same block size for all different
1037 * Check if the bus supports blocksize alignment and, if it
1038 * doesn't, make sure we don't have the quirk.
1040 if (!wl1271_set_block_size(wl))
1041 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1043 /* TODO: make sure the lower driver has set things up correctly */
1045 ret = wl1271_setup(wl);
1049 ret = wl12xx_fetch_firmware(wl, plt);
1057 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1059 int retries = WL1271_BOOT_RETRIES;
1060 struct wiphy *wiphy = wl->hw->wiphy;
1062 static const char* const PLT_MODE[] = {
1071 mutex_lock(&wl->mutex);
1073 wl1271_notice("power up");
1075 if (wl->state != WLCORE_STATE_OFF) {
1076 wl1271_error("cannot go into PLT state because not "
1077 "in off state: %d", wl->state);
1082 /* Indicate to lower levels that we are now in PLT mode */
1084 wl->plt_mode = plt_mode;
1088 ret = wl12xx_chip_wakeup(wl, true);
1092 if (plt_mode != PLT_CHIP_AWAKE) {
1093 ret = wl->ops->plt_init(wl);
1098 wl->state = WLCORE_STATE_ON;
1099 wl1271_notice("firmware booted in PLT mode %s (%s)",
1101 wl->chip.fw_ver_str);
1103 /* update hw/fw version info in wiphy struct */
1104 wiphy->hw_version = wl->chip.id;
1105 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1106 sizeof(wiphy->fw_version));
1111 wl1271_power_off(wl);
1115 wl->plt_mode = PLT_OFF;
1117 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1118 WL1271_BOOT_RETRIES);
1120 mutex_unlock(&wl->mutex);
1125 int wl1271_plt_stop(struct wl1271 *wl)
1129 wl1271_notice("power down");
1132 * Interrupts must be disabled before setting the state to OFF.
1133 * Otherwise, the interrupt handler might be called and exit without
1134 * reading the interrupt status.
1136 wlcore_disable_interrupts(wl);
1137 mutex_lock(&wl->mutex);
1139 mutex_unlock(&wl->mutex);
1142 * This will not necessarily enable interrupts as interrupts
1143 * may have been disabled when op_stop was called. It will,
1144 * however, balance the above call to disable_interrupts().
1146 wlcore_enable_interrupts(wl);
1148 wl1271_error("cannot power down because not in PLT "
1149 "state: %d", wl->state);
1154 mutex_unlock(&wl->mutex);
1156 wl1271_flush_deferred_work(wl);
1157 cancel_work_sync(&wl->netstack_work);
1158 cancel_work_sync(&wl->recovery_work);
1159 cancel_delayed_work_sync(&wl->elp_work);
1160 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1162 mutex_lock(&wl->mutex);
1163 wl1271_power_off(wl);
1165 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1166 wl->state = WLCORE_STATE_OFF;
1168 wl->plt_mode = PLT_OFF;
1170 mutex_unlock(&wl->mutex);
1176 static void wl1271_op_tx(struct ieee80211_hw *hw,
1177 struct ieee80211_tx_control *control,
1178 struct sk_buff *skb)
1180 struct wl1271 *wl = hw->priv;
1181 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1182 struct ieee80211_vif *vif = info->control.vif;
1183 struct wl12xx_vif *wlvif = NULL;
1184 unsigned long flags;
1189 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1190 ieee80211_free_txskb(hw, skb);
1194 wlvif = wl12xx_vif_to_data(vif);
1195 mapping = skb_get_queue_mapping(skb);
1196 q = wl1271_tx_get_queue(mapping);
1198 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1200 spin_lock_irqsave(&wl->wl_lock, flags);
1203 * drop the packet if the link is invalid or the queue is stopped
1204 * for any reason but watermark. Watermark is a "soft"-stop so we
1205 * allow these packets through.
1207 if (hlid == WL12XX_INVALID_LINK_ID ||
1208 (!test_bit(hlid, wlvif->links_map)) ||
1209 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1210 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1211 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1212 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1213 ieee80211_free_txskb(hw, skb);
1217 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1219 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1221 wl->tx_queue_count[q]++;
1222 wlvif->tx_queue_count[q]++;
1225 * The workqueue is slow to process the tx_queue and we need stop
1226 * the queue here, otherwise the queue will get too long.
1228 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1229 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1230 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1231 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1232 wlcore_stop_queue_locked(wl, wlvif, q,
1233 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1237 * The chip specific setup must run before the first TX packet -
1238 * before that, the tx_work will not be initialized!
1241 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1242 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1243 ieee80211_queue_work(wl->hw, &wl->tx_work);
1246 spin_unlock_irqrestore(&wl->wl_lock, flags);
1249 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1251 unsigned long flags;
1254 /* no need to queue a new dummy packet if one is already pending */
1255 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1258 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1260 spin_lock_irqsave(&wl->wl_lock, flags);
1261 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1262 wl->tx_queue_count[q]++;
1263 spin_unlock_irqrestore(&wl->wl_lock, flags);
1265 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1266 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1267 return wlcore_tx_work_locked(wl);
1270 * If the FW TX is busy, TX work will be scheduled by the threaded
1271 * interrupt handler function
1277 * The size of the dummy packet should be at least 1400 bytes. However, in
1278 * order to minimize the number of bus transactions, aligning it to 512 bytes
1279 * boundaries could be beneficial, performance wise
1281 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1283 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1285 struct sk_buff *skb;
1286 struct ieee80211_hdr_3addr *hdr;
1287 unsigned int dummy_packet_size;
1289 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1290 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1292 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1294 wl1271_warning("Failed to allocate a dummy packet skb");
1298 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1300 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1301 memset(hdr, 0, sizeof(*hdr));
1302 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1303 IEEE80211_STYPE_NULLFUNC |
1304 IEEE80211_FCTL_TODS);
1306 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1308 /* Dummy packets require the TID to be management */
1309 skb->priority = WL1271_TID_MGMT;
1311 /* Initialize all fields that might be used */
1312 skb_set_queue_mapping(skb, 0);
1313 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1321 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1323 int num_fields = 0, in_field = 0, fields_size = 0;
1324 int i, pattern_len = 0;
1327 wl1271_warning("No mask in WoWLAN pattern");
1332 * The pattern is broken up into segments of bytes at different offsets
1333 * that need to be checked by the FW filter. Each segment is called
1334 * a field in the FW API. We verify that the total number of fields
1335 * required for this pattern won't exceed FW limits (8)
1336 * as well as the total fields buffer won't exceed the FW limit.
1337 * Note that if there's a pattern which crosses Ethernet/IP header
1338 * boundary a new field is required.
1340 for (i = 0; i < p->pattern_len; i++) {
1341 if (test_bit(i, (unsigned long *)p->mask)) {
1346 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1348 fields_size += pattern_len +
1349 RX_FILTER_FIELD_OVERHEAD;
1357 fields_size += pattern_len +
1358 RX_FILTER_FIELD_OVERHEAD;
1365 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1369 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1370 wl1271_warning("RX Filter too complex. Too many segments");
1374 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1375 wl1271_warning("RX filter pattern is too big");
1382 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1384 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1387 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1394 for (i = 0; i < filter->num_fields; i++)
1395 kfree(filter->fields[i].pattern);
1400 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1401 u16 offset, u8 flags,
1402 u8 *pattern, u8 len)
1404 struct wl12xx_rx_filter_field *field;
1406 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1407 wl1271_warning("Max fields per RX filter. can't alloc another");
1411 field = &filter->fields[filter->num_fields];
1413 field->pattern = kzalloc(len, GFP_KERNEL);
1414 if (!field->pattern) {
1415 wl1271_warning("Failed to allocate RX filter pattern");
1419 filter->num_fields++;
1421 field->offset = cpu_to_le16(offset);
1422 field->flags = flags;
1424 memcpy(field->pattern, pattern, len);
1429 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1431 int i, fields_size = 0;
1433 for (i = 0; i < filter->num_fields; i++)
1434 fields_size += filter->fields[i].len +
1435 sizeof(struct wl12xx_rx_filter_field) -
1441 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1445 struct wl12xx_rx_filter_field *field;
1447 for (i = 0; i < filter->num_fields; i++) {
1448 field = (struct wl12xx_rx_filter_field *)buf;
1450 field->offset = filter->fields[i].offset;
1451 field->flags = filter->fields[i].flags;
1452 field->len = filter->fields[i].len;
1454 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1455 buf += sizeof(struct wl12xx_rx_filter_field) -
1456 sizeof(u8 *) + field->len;
1461 * Allocates an RX filter returned through f
1462 * which needs to be freed using rx_filter_free()
1465 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1466 struct wl12xx_rx_filter **f)
1469 struct wl12xx_rx_filter *filter;
1473 filter = wl1271_rx_filter_alloc();
1475 wl1271_warning("Failed to alloc rx filter");
1481 while (i < p->pattern_len) {
1482 if (!test_bit(i, (unsigned long *)p->mask)) {
1487 for (j = i; j < p->pattern_len; j++) {
1488 if (!test_bit(j, (unsigned long *)p->mask))
1491 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1492 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1496 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1498 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1500 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1501 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1506 ret = wl1271_rx_filter_alloc_field(filter,
1509 &p->pattern[i], len);
1516 filter->action = FILTER_SIGNAL;
1522 wl1271_rx_filter_free(filter);
1528 static int wl1271_configure_wowlan(struct wl1271 *wl,
1529 struct cfg80211_wowlan *wow)
1533 if (!wow || wow->any || !wow->n_patterns) {
1534 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1539 ret = wl1271_rx_filter_clear_all(wl);
1546 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1549 /* Validate all incoming patterns before clearing current FW state */
1550 for (i = 0; i < wow->n_patterns; i++) {
1551 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1553 wl1271_warning("Bad wowlan pattern %d", i);
1558 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1562 ret = wl1271_rx_filter_clear_all(wl);
1566 /* Translate WoWLAN patterns into filters */
1567 for (i = 0; i < wow->n_patterns; i++) {
1568 struct cfg80211_pkt_pattern *p;
1569 struct wl12xx_rx_filter *filter = NULL;
1571 p = &wow->patterns[i];
1573 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1575 wl1271_warning("Failed to create an RX filter from "
1576 "wowlan pattern %d", i);
1580 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1582 wl1271_rx_filter_free(filter);
1587 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1593 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1594 struct wl12xx_vif *wlvif,
1595 struct cfg80211_wowlan *wow)
1599 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1602 ret = wl1271_ps_elp_wakeup(wl);
1606 ret = wl1271_configure_wowlan(wl, wow);
1610 if ((wl->conf.conn.suspend_wake_up_event ==
1611 wl->conf.conn.wake_up_event) &&
1612 (wl->conf.conn.suspend_listen_interval ==
1613 wl->conf.conn.listen_interval))
1616 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1617 wl->conf.conn.suspend_wake_up_event,
1618 wl->conf.conn.suspend_listen_interval);
1621 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1624 wl1271_ps_elp_sleep(wl);
1630 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1631 struct wl12xx_vif *wlvif)
1635 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1638 ret = wl1271_ps_elp_wakeup(wl);
1642 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1644 wl1271_ps_elp_sleep(wl);
1650 static int wl1271_configure_suspend(struct wl1271 *wl,
1651 struct wl12xx_vif *wlvif,
1652 struct cfg80211_wowlan *wow)
1654 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1655 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1656 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1657 return wl1271_configure_suspend_ap(wl, wlvif);
1661 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1664 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1665 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1667 if ((!is_ap) && (!is_sta))
1670 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1673 ret = wl1271_ps_elp_wakeup(wl);
1678 wl1271_configure_wowlan(wl, NULL);
1680 if ((wl->conf.conn.suspend_wake_up_event ==
1681 wl->conf.conn.wake_up_event) &&
1682 (wl->conf.conn.suspend_listen_interval ==
1683 wl->conf.conn.listen_interval))
1686 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1687 wl->conf.conn.wake_up_event,
1688 wl->conf.conn.listen_interval);
1691 wl1271_error("resume: wake up conditions failed: %d",
1695 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1699 wl1271_ps_elp_sleep(wl);
1702 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1703 struct cfg80211_wowlan *wow)
1705 struct wl1271 *wl = hw->priv;
1706 struct wl12xx_vif *wlvif;
1709 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1712 /* we want to perform the recovery before suspending */
1713 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1714 wl1271_warning("postponing suspend to perform recovery");
1718 wl1271_tx_flush(wl);
1720 mutex_lock(&wl->mutex);
1721 wl->wow_enabled = true;
1722 wl12xx_for_each_wlvif(wl, wlvif) {
1723 ret = wl1271_configure_suspend(wl, wlvif, wow);
1725 mutex_unlock(&wl->mutex);
1726 wl1271_warning("couldn't prepare device to suspend");
1730 mutex_unlock(&wl->mutex);
1731 /* flush any remaining work */
1732 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1735 * disable and re-enable interrupts in order to flush
1738 wlcore_disable_interrupts(wl);
1741 * set suspended flag to avoid triggering a new threaded_irq
1742 * work. no need for spinlock as interrupts are disabled.
1744 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1746 wlcore_enable_interrupts(wl);
1747 flush_work(&wl->tx_work);
1748 flush_delayed_work(&wl->elp_work);
1753 static int wl1271_op_resume(struct ieee80211_hw *hw)
1755 struct wl1271 *wl = hw->priv;
1756 struct wl12xx_vif *wlvif;
1757 unsigned long flags;
1758 bool run_irq_work = false, pending_recovery;
1761 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1763 WARN_ON(!wl->wow_enabled);
1766 * re-enable irq_work enqueuing, and call irq_work directly if
1767 * there is a pending work.
1769 spin_lock_irqsave(&wl->wl_lock, flags);
1770 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1771 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1772 run_irq_work = true;
1773 spin_unlock_irqrestore(&wl->wl_lock, flags);
1775 mutex_lock(&wl->mutex);
1777 /* test the recovery flag before calling any SDIO functions */
1778 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1782 wl1271_debug(DEBUG_MAC80211,
1783 "run postponed irq_work directly");
1785 /* don't talk to the HW if recovery is pending */
1786 if (!pending_recovery) {
1787 ret = wlcore_irq_locked(wl);
1789 wl12xx_queue_recovery_work(wl);
1792 wlcore_enable_interrupts(wl);
1795 if (pending_recovery) {
1796 wl1271_warning("queuing forgotten recovery on resume");
1797 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1801 wl12xx_for_each_wlvif(wl, wlvif) {
1802 wl1271_configure_resume(wl, wlvif);
1806 wl->wow_enabled = false;
1807 mutex_unlock(&wl->mutex);
1813 static int wl1271_op_start(struct ieee80211_hw *hw)
1815 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1818 * We have to delay the booting of the hardware because
1819 * we need to know the local MAC address before downloading and
1820 * initializing the firmware. The MAC address cannot be changed
1821 * after boot, and without the proper MAC address, the firmware
1822 * will not function properly.
1824 * The MAC address is first known when the corresponding interface
1825 * is added. That is where we will initialize the hardware.
1831 static void wlcore_op_stop_locked(struct wl1271 *wl)
1835 if (wl->state == WLCORE_STATE_OFF) {
1836 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1838 wlcore_enable_interrupts(wl);
1844 * this must be before the cancel_work calls below, so that the work
1845 * functions don't perform further work.
1847 wl->state = WLCORE_STATE_OFF;
1850 * Use the nosync variant to disable interrupts, so the mutex could be
1851 * held while doing so without deadlocking.
1853 wlcore_disable_interrupts_nosync(wl);
1855 mutex_unlock(&wl->mutex);
1857 wlcore_synchronize_interrupts(wl);
1858 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1859 cancel_work_sync(&wl->recovery_work);
1860 wl1271_flush_deferred_work(wl);
1861 cancel_delayed_work_sync(&wl->scan_complete_work);
1862 cancel_work_sync(&wl->netstack_work);
1863 cancel_work_sync(&wl->tx_work);
1864 cancel_delayed_work_sync(&wl->elp_work);
1865 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1867 /* let's notify MAC80211 about the remaining pending TX frames */
1868 mutex_lock(&wl->mutex);
1869 wl12xx_tx_reset(wl);
1871 wl1271_power_off(wl);
1873 * In case a recovery was scheduled, interrupts were disabled to avoid
1874 * an interrupt storm. Now that the power is down, it is safe to
1875 * re-enable interrupts to balance the disable depth
1877 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1878 wlcore_enable_interrupts(wl);
1880 wl->band = IEEE80211_BAND_2GHZ;
1883 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1884 wl->channel_type = NL80211_CHAN_NO_HT;
1885 wl->tx_blocks_available = 0;
1886 wl->tx_allocated_blocks = 0;
1887 wl->tx_results_count = 0;
1888 wl->tx_packets_count = 0;
1889 wl->time_offset = 0;
1890 wl->ap_fw_ps_map = 0;
1892 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1893 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1894 memset(wl->links_map, 0, sizeof(wl->links_map));
1895 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1896 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1897 wl->active_sta_count = 0;
1898 wl->active_link_count = 0;
1900 /* The system link is always allocated */
1901 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1902 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1903 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1906 * this is performed after the cancel_work calls and the associated
1907 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1908 * get executed before all these vars have been reset.
1912 wl->tx_blocks_freed = 0;
1914 for (i = 0; i < NUM_TX_QUEUES; i++) {
1915 wl->tx_pkts_freed[i] = 0;
1916 wl->tx_allocated_pkts[i] = 0;
1919 wl1271_debugfs_reset(wl);
1921 kfree(wl->fw_status_1);
1922 wl->fw_status_1 = NULL;
1923 wl->fw_status_2 = NULL;
1924 kfree(wl->tx_res_if);
1925 wl->tx_res_if = NULL;
1926 kfree(wl->target_mem_map);
1927 wl->target_mem_map = NULL;
1930 * FW channels must be re-calibrated after recovery,
1931 * clear the last Reg-Domain channel configuration.
1933 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1936 static void wlcore_op_stop(struct ieee80211_hw *hw)
1938 struct wl1271 *wl = hw->priv;
1940 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1942 mutex_lock(&wl->mutex);
1944 wlcore_op_stop_locked(wl);
1946 mutex_unlock(&wl->mutex);
1949 static void wlcore_channel_switch_work(struct work_struct *work)
1951 struct delayed_work *dwork;
1953 struct ieee80211_vif *vif;
1954 struct wl12xx_vif *wlvif;
1957 dwork = container_of(work, struct delayed_work, work);
1958 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1961 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1963 mutex_lock(&wl->mutex);
1965 if (unlikely(wl->state != WLCORE_STATE_ON))
1968 /* check the channel switch is still ongoing */
1969 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1972 vif = wl12xx_wlvif_to_vif(wlvif);
1973 ieee80211_chswitch_done(vif, false);
1975 ret = wl1271_ps_elp_wakeup(wl);
1979 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1981 wl1271_ps_elp_sleep(wl);
1983 mutex_unlock(&wl->mutex);
1986 static void wlcore_connection_loss_work(struct work_struct *work)
1988 struct delayed_work *dwork;
1990 struct ieee80211_vif *vif;
1991 struct wl12xx_vif *wlvif;
1993 dwork = container_of(work, struct delayed_work, work);
1994 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1997 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1999 mutex_lock(&wl->mutex);
2001 if (unlikely(wl->state != WLCORE_STATE_ON))
2004 /* Call mac80211 connection loss */
2005 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2008 vif = wl12xx_wlvif_to_vif(wlvif);
2009 ieee80211_connection_loss(vif);
2011 mutex_unlock(&wl->mutex);
2014 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2016 struct delayed_work *dwork;
2018 struct wl12xx_vif *wlvif;
2019 unsigned long time_spare;
2022 dwork = container_of(work, struct delayed_work, work);
2023 wlvif = container_of(dwork, struct wl12xx_vif,
2024 pending_auth_complete_work);
2027 mutex_lock(&wl->mutex);
2029 if (unlikely(wl->state != WLCORE_STATE_ON))
2033 * Make sure a second really passed since the last auth reply. Maybe
2034 * a second auth reply arrived while we were stuck on the mutex.
2035 * Check for a little less than the timeout to protect from scheduler
2038 time_spare = jiffies +
2039 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2040 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2043 ret = wl1271_ps_elp_wakeup(wl);
2047 /* cancel the ROC if active */
2048 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2050 wl1271_ps_elp_sleep(wl);
2052 mutex_unlock(&wl->mutex);
2055 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2057 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2058 WL12XX_MAX_RATE_POLICIES);
2059 if (policy >= WL12XX_MAX_RATE_POLICIES)
2062 __set_bit(policy, wl->rate_policies_map);
2067 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2069 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2072 __clear_bit(*idx, wl->rate_policies_map);
2073 *idx = WL12XX_MAX_RATE_POLICIES;
2076 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2078 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2079 WLCORE_MAX_KLV_TEMPLATES);
2080 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2083 __set_bit(policy, wl->klv_templates_map);
2088 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2090 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2093 __clear_bit(*idx, wl->klv_templates_map);
2094 *idx = WLCORE_MAX_KLV_TEMPLATES;
2097 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2099 switch (wlvif->bss_type) {
2100 case BSS_TYPE_AP_BSS:
2102 return WL1271_ROLE_P2P_GO;
2104 return WL1271_ROLE_AP;
2106 case BSS_TYPE_STA_BSS:
2108 return WL1271_ROLE_P2P_CL;
2110 return WL1271_ROLE_STA;
2113 return WL1271_ROLE_IBSS;
2116 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2118 return WL12XX_INVALID_ROLE_TYPE;
2121 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2123 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2126 /* clear everything but the persistent data */
2127 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2129 switch (ieee80211_vif_type_p2p(vif)) {
2130 case NL80211_IFTYPE_P2P_CLIENT:
2133 case NL80211_IFTYPE_STATION:
2134 wlvif->bss_type = BSS_TYPE_STA_BSS;
2136 case NL80211_IFTYPE_ADHOC:
2137 wlvif->bss_type = BSS_TYPE_IBSS;
2139 case NL80211_IFTYPE_P2P_GO:
2142 case NL80211_IFTYPE_AP:
2143 wlvif->bss_type = BSS_TYPE_AP_BSS;
2146 wlvif->bss_type = MAX_BSS_TYPE;
2150 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2151 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2152 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2154 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2155 wlvif->bss_type == BSS_TYPE_IBSS) {
2156 /* init sta/ibss data */
2157 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2158 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2159 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2160 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2161 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2162 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2163 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2164 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2167 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2168 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2169 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2170 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2171 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2172 wl12xx_allocate_rate_policy(wl,
2173 &wlvif->ap.ucast_rate_idx[i]);
2174 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2176 * TODO: check if basic_rate shouldn't be
2177 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2178 * instead (the same thing for STA above).
2180 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2181 /* TODO: this seems to be used only for STA, check it */
2182 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2185 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2186 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2187 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2190 * mac80211 configures some values globally, while we treat them
2191 * per-interface. thus, on init, we have to copy them from wl
2193 wlvif->band = wl->band;
2194 wlvif->channel = wl->channel;
2195 wlvif->power_level = wl->power_level;
2196 wlvif->channel_type = wl->channel_type;
2198 INIT_WORK(&wlvif->rx_streaming_enable_work,
2199 wl1271_rx_streaming_enable_work);
2200 INIT_WORK(&wlvif->rx_streaming_disable_work,
2201 wl1271_rx_streaming_disable_work);
2202 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2203 wlcore_channel_switch_work);
2204 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2205 wlcore_connection_loss_work);
2206 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2207 wlcore_pending_auth_complete_work);
2208 INIT_LIST_HEAD(&wlvif->list);
2210 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2211 (unsigned long) wlvif);
2215 static int wl12xx_init_fw(struct wl1271 *wl)
2217 int retries = WL1271_BOOT_RETRIES;
2218 bool booted = false;
2219 struct wiphy *wiphy = wl->hw->wiphy;
2224 ret = wl12xx_chip_wakeup(wl, false);
2228 ret = wl->ops->boot(wl);
2232 ret = wl1271_hw_init(wl);
2240 mutex_unlock(&wl->mutex);
2241 /* Unlocking the mutex in the middle of handling is
2242 inherently unsafe. In this case we deem it safe to do,
2243 because we need to let any possibly pending IRQ out of
2244 the system (and while we are WLCORE_STATE_OFF the IRQ
2245 work function will not do anything.) Also, any other
2246 possible concurrent operations will fail due to the
2247 current state, hence the wl1271 struct should be safe. */
2248 wlcore_disable_interrupts(wl);
2249 wl1271_flush_deferred_work(wl);
2250 cancel_work_sync(&wl->netstack_work);
2251 mutex_lock(&wl->mutex);
2253 wl1271_power_off(wl);
2257 wl1271_error("firmware boot failed despite %d retries",
2258 WL1271_BOOT_RETRIES);
2262 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2264 /* update hw/fw version info in wiphy struct */
2265 wiphy->hw_version = wl->chip.id;
2266 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2267 sizeof(wiphy->fw_version));
2270 * Now we know if 11a is supported (info from the NVS), so disable
2271 * 11a channels if not supported
2273 if (!wl->enable_11a)
2274 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2276 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2277 wl->enable_11a ? "" : "not ");
2279 wl->state = WLCORE_STATE_ON;
2284 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2286 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2290 * Check whether a fw switch (i.e. moving from one loaded
2291 * fw to another) is needed. This function is also responsible
2292 * for updating wl->last_vif_count, so it must be called before
2293 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2296 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2297 struct vif_counter_data vif_counter_data,
2300 enum wl12xx_fw_type current_fw = wl->fw_type;
2301 u8 vif_count = vif_counter_data.counter;
2303 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2306 /* increase the vif count if this is a new vif */
2307 if (add && !vif_counter_data.cur_vif_running)
2310 wl->last_vif_count = vif_count;
2312 /* no need for fw change if the device is OFF */
2313 if (wl->state == WLCORE_STATE_OFF)
2316 /* no need for fw change if a single fw is used */
2317 if (!wl->mr_fw_name)
2320 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2322 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2329 * Enter "forced psm". Make sure the sta is in psm against the ap,
2330 * to make the fw switch a bit more disconnection-persistent.
2332 static void wl12xx_force_active_psm(struct wl1271 *wl)
2334 struct wl12xx_vif *wlvif;
2336 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2337 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2341 struct wlcore_hw_queue_iter_data {
2342 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2344 struct ieee80211_vif *vif;
2345 /* is the current vif among those iterated */
2349 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2350 struct ieee80211_vif *vif)
2352 struct wlcore_hw_queue_iter_data *iter_data = data;
2354 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2357 if (iter_data->cur_running || vif == iter_data->vif) {
2358 iter_data->cur_running = true;
2362 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2365 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2366 struct wl12xx_vif *wlvif)
2368 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2369 struct wlcore_hw_queue_iter_data iter_data = {};
2372 iter_data.vif = vif;
2374 /* mark all bits taken by active interfaces */
2375 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2376 IEEE80211_IFACE_ITER_RESUME_ALL,
2377 wlcore_hw_queue_iter, &iter_data);
2379 /* the current vif is already running in mac80211 (resume/recovery) */
2380 if (iter_data.cur_running) {
2381 wlvif->hw_queue_base = vif->hw_queue[0];
2382 wl1271_debug(DEBUG_MAC80211,
2383 "using pre-allocated hw queue base %d",
2384 wlvif->hw_queue_base);
2386 /* interface type might have changed type */
2387 goto adjust_cab_queue;
2390 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2391 WLCORE_NUM_MAC_ADDRESSES);
2392 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2395 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2396 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2397 wlvif->hw_queue_base);
2399 for (i = 0; i < NUM_TX_QUEUES; i++) {
2400 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2401 /* register hw queues in mac80211 */
2402 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2406 /* the last places are reserved for cab queues per interface */
2407 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2408 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2409 wlvif->hw_queue_base / NUM_TX_QUEUES;
2411 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2416 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2417 struct ieee80211_vif *vif)
2419 struct wl1271 *wl = hw->priv;
2420 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2421 struct vif_counter_data vif_count;
2426 wl1271_error("Adding Interface not allowed while in PLT mode");
2430 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2431 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2433 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2434 ieee80211_vif_type_p2p(vif), vif->addr);
2436 wl12xx_get_vif_count(hw, vif, &vif_count);
2438 mutex_lock(&wl->mutex);
2439 ret = wl1271_ps_elp_wakeup(wl);
2444 * in some very corner case HW recovery scenarios its possible to
2445 * get here before __wl1271_op_remove_interface is complete, so
2446 * opt out if that is the case.
2448 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2449 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2455 ret = wl12xx_init_vif_data(wl, vif);
2460 role_type = wl12xx_get_role_type(wl, wlvif);
2461 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2466 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2470 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2471 wl12xx_force_active_psm(wl);
2472 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2473 mutex_unlock(&wl->mutex);
2474 wl1271_recovery_work(&wl->recovery_work);
2479 * TODO: after the nvs issue will be solved, move this block
2480 * to start(), and make sure here the driver is ON.
2482 if (wl->state == WLCORE_STATE_OFF) {
2484 * we still need this in order to configure the fw
2485 * while uploading the nvs
2487 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2489 ret = wl12xx_init_fw(wl);
2494 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2495 role_type, &wlvif->role_id);
2499 ret = wl1271_init_vif_specific(wl, vif);
2503 list_add(&wlvif->list, &wl->wlvif_list);
2504 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2506 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2511 wl1271_ps_elp_sleep(wl);
2513 mutex_unlock(&wl->mutex);
2518 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2519 struct ieee80211_vif *vif,
2520 bool reset_tx_queues)
2522 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2524 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2526 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2528 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2531 /* because of hardware recovery, we may get here twice */
2532 if (wl->state == WLCORE_STATE_OFF)
2535 wl1271_info("down");
2537 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2538 wl->scan_wlvif == wlvif) {
2540 * Rearm the tx watchdog just before idling scan. This
2541 * prevents just-finished scans from triggering the watchdog
2543 wl12xx_rearm_tx_watchdog_locked(wl);
2545 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2546 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2547 wl->scan_wlvif = NULL;
2548 wl->scan.req = NULL;
2549 ieee80211_scan_completed(wl->hw, true);
2552 if (wl->sched_vif == wlvif) {
2553 ieee80211_sched_scan_stopped(wl->hw);
2554 wl->sched_vif = NULL;
2557 if (wl->roc_vif == vif) {
2559 ieee80211_remain_on_channel_expired(wl->hw);
2562 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2563 /* disable active roles */
2564 ret = wl1271_ps_elp_wakeup(wl);
2568 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2569 wlvif->bss_type == BSS_TYPE_IBSS) {
2570 if (wl12xx_dev_role_started(wlvif))
2571 wl12xx_stop_dev(wl, wlvif);
2574 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2578 wl1271_ps_elp_sleep(wl);
2581 wl12xx_tx_reset_wlvif(wl, wlvif);
2583 /* clear all hlids (except system_hlid) */
2584 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2586 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2587 wlvif->bss_type == BSS_TYPE_IBSS) {
2588 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2589 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2590 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2591 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2592 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2594 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2595 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2596 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2597 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2598 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2599 wl12xx_free_rate_policy(wl,
2600 &wlvif->ap.ucast_rate_idx[i]);
2601 wl1271_free_ap_keys(wl, wlvif);
2604 dev_kfree_skb(wlvif->probereq);
2605 wlvif->probereq = NULL;
2606 if (wl->last_wlvif == wlvif)
2607 wl->last_wlvif = NULL;
2608 list_del(&wlvif->list);
2609 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2610 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2611 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2619 * Last AP, have more stations. Configure sleep auth according to STA.
2620 * Don't do thin on unintended recovery.
2622 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2623 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2626 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2627 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2628 /* Configure for power according to debugfs */
2629 if (sta_auth != WL1271_PSM_ILLEGAL)
2630 wl1271_acx_sleep_auth(wl, sta_auth);
2631 /* Configure for ELP power saving */
2633 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2637 mutex_unlock(&wl->mutex);
2639 del_timer_sync(&wlvif->rx_streaming_timer);
2640 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2641 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2642 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2643 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2644 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2646 mutex_lock(&wl->mutex);
2649 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2650 struct ieee80211_vif *vif)
2652 struct wl1271 *wl = hw->priv;
2653 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2654 struct wl12xx_vif *iter;
2655 struct vif_counter_data vif_count;
2657 wl12xx_get_vif_count(hw, vif, &vif_count);
2658 mutex_lock(&wl->mutex);
2660 if (wl->state == WLCORE_STATE_OFF ||
2661 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2665 * wl->vif can be null here if someone shuts down the interface
2666 * just when hardware recovery has been started.
2668 wl12xx_for_each_wlvif(wl, iter) {
2672 __wl1271_op_remove_interface(wl, vif, true);
2675 WARN_ON(iter != wlvif);
2676 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2677 wl12xx_force_active_psm(wl);
2678 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2679 wl12xx_queue_recovery_work(wl);
2682 mutex_unlock(&wl->mutex);
2685 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2686 struct ieee80211_vif *vif,
2687 enum nl80211_iftype new_type, bool p2p)
2689 struct wl1271 *wl = hw->priv;
2692 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2693 wl1271_op_remove_interface(hw, vif);
2695 vif->type = new_type;
2697 ret = wl1271_op_add_interface(hw, vif);
2699 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2703 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2706 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2709 * One of the side effects of the JOIN command is that is clears
2710 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2711 * to a WPA/WPA2 access point will therefore kill the data-path.
2712 * Currently the only valid scenario for JOIN during association
2713 * is on roaming, in which case we will also be given new keys.
2714 * Keep the below message for now, unless it starts bothering
2715 * users who really like to roam a lot :)
2717 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2718 wl1271_info("JOIN while associated.");
2720 /* clear encryption type */
2721 wlvif->encryption_type = KEY_NONE;
2724 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2726 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2728 * TODO: this is an ugly workaround for wl12xx fw
2729 * bug - we are not able to tx/rx after the first
2730 * start_sta, so make dummy start+stop calls,
2731 * and then call start_sta again.
2732 * this should be fixed in the fw.
2734 wl12xx_cmd_role_start_sta(wl, wlvif);
2735 wl12xx_cmd_role_stop_sta(wl, wlvif);
2738 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2744 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2748 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2752 wl1271_error("No SSID in IEs!");
2757 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2758 wl1271_error("SSID is too long!");
2762 wlvif->ssid_len = ssid_len;
2763 memcpy(wlvif->ssid, ptr+2, ssid_len);
2767 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2769 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2770 struct sk_buff *skb;
2773 /* we currently only support setting the ssid from the ap probe req */
2774 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2777 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2781 ieoffset = offsetof(struct ieee80211_mgmt,
2782 u.probe_req.variable);
2783 wl1271_ssid_set(wlvif, skb, ieoffset);
2789 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2790 struct ieee80211_bss_conf *bss_conf,
2796 wlvif->aid = bss_conf->aid;
2797 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2798 wlvif->beacon_int = bss_conf->beacon_int;
2799 wlvif->wmm_enabled = bss_conf->qos;
2801 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2804 * with wl1271, we don't need to update the
2805 * beacon_int and dtim_period, because the firmware
2806 * updates it by itself when the first beacon is
2807 * received after a join.
2809 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2814 * Get a template for hardware connection maintenance
2816 dev_kfree_skb(wlvif->probereq);
2817 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2820 ieoffset = offsetof(struct ieee80211_mgmt,
2821 u.probe_req.variable);
2822 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2824 /* enable the connection monitoring feature */
2825 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2830 * The join command disable the keep-alive mode, shut down its process,
2831 * and also clear the template config, so we need to reset it all after
2832 * the join. The acx_aid starts the keep-alive process, and the order
2833 * of the commands below is relevant.
2835 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2839 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2843 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2847 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2848 wlvif->sta.klv_template_id,
2849 ACX_KEEP_ALIVE_TPL_VALID);
2854 * The default fw psm configuration is AUTO, while mac80211 default
2855 * setting is off (ACTIVE), so sync the fw with the correct value.
2857 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2863 wl1271_tx_enabled_rates_get(wl,
2866 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2874 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2877 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2879 /* make sure we are connected (sta) joined */
2881 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2884 /* make sure we are joined (ibss) */
2886 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2890 /* use defaults when not associated */
2893 /* free probe-request template */
2894 dev_kfree_skb(wlvif->probereq);
2895 wlvif->probereq = NULL;
2897 /* disable connection monitor features */
2898 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2902 /* Disable the keep-alive feature */
2903 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2908 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2909 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2911 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2912 ieee80211_chswitch_done(vif, false);
2913 cancel_delayed_work(&wlvif->channel_switch_work);
2916 /* invalidate keep-alive template */
2917 wl1271_acx_keep_alive_config(wl, wlvif,
2918 wlvif->sta.klv_template_id,
2919 ACX_KEEP_ALIVE_TPL_INVALID);
2924 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2926 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2927 wlvif->rate_set = wlvif->basic_rate_set;
2930 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2933 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2935 if (idle == cur_idle)
2939 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2941 /* The current firmware only supports sched_scan in idle */
2942 if (wl->sched_vif == wlvif)
2943 wl->ops->sched_scan_stop(wl, wlvif);
2945 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
2949 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2950 struct ieee80211_conf *conf, u32 changed)
2954 if (conf->power_level != wlvif->power_level) {
2955 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2959 wlvif->power_level = conf->power_level;
2965 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2967 struct wl1271 *wl = hw->priv;
2968 struct wl12xx_vif *wlvif;
2969 struct ieee80211_conf *conf = &hw->conf;
2972 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2974 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2976 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2979 mutex_lock(&wl->mutex);
2981 if (changed & IEEE80211_CONF_CHANGE_POWER)
2982 wl->power_level = conf->power_level;
2984 if (unlikely(wl->state != WLCORE_STATE_ON))
2987 ret = wl1271_ps_elp_wakeup(wl);
2991 /* configure each interface */
2992 wl12xx_for_each_wlvif(wl, wlvif) {
2993 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2999 wl1271_ps_elp_sleep(wl);
3002 mutex_unlock(&wl->mutex);
3007 struct wl1271_filter_params {
3010 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3013 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3014 struct netdev_hw_addr_list *mc_list)
3016 struct wl1271_filter_params *fp;
3017 struct netdev_hw_addr *ha;
3019 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3021 wl1271_error("Out of memory setting filters.");
3025 /* update multicast filtering parameters */
3026 fp->mc_list_length = 0;
3027 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3028 fp->enabled = false;
3031 netdev_hw_addr_list_for_each(ha, mc_list) {
3032 memcpy(fp->mc_list[fp->mc_list_length],
3033 ha->addr, ETH_ALEN);
3034 fp->mc_list_length++;
3038 return (u64)(unsigned long)fp;
3041 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3044 FIF_BCN_PRBRESP_PROMISC | \
3048 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3049 unsigned int changed,
3050 unsigned int *total, u64 multicast)
3052 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3053 struct wl1271 *wl = hw->priv;
3054 struct wl12xx_vif *wlvif;
3058 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3059 " total %x", changed, *total);
3061 mutex_lock(&wl->mutex);
3063 *total &= WL1271_SUPPORTED_FILTERS;
3064 changed &= WL1271_SUPPORTED_FILTERS;
3066 if (unlikely(wl->state != WLCORE_STATE_ON))
3069 ret = wl1271_ps_elp_wakeup(wl);
3073 wl12xx_for_each_wlvif(wl, wlvif) {
3074 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3075 if (*total & FIF_ALLMULTI)
3076 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3080 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3083 fp->mc_list_length);
3090 * the fw doesn't provide an api to configure the filters. instead,
3091 * the filters configuration is based on the active roles / ROC
3096 wl1271_ps_elp_sleep(wl);
3099 mutex_unlock(&wl->mutex);
3103 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3104 u8 id, u8 key_type, u8 key_size,
3105 const u8 *key, u8 hlid, u32 tx_seq_32,
3108 struct wl1271_ap_key *ap_key;
3111 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3113 if (key_size > MAX_KEY_SIZE)
3117 * Find next free entry in ap_keys. Also check we are not replacing
3120 for (i = 0; i < MAX_NUM_KEYS; i++) {
3121 if (wlvif->ap.recorded_keys[i] == NULL)
3124 if (wlvif->ap.recorded_keys[i]->id == id) {
3125 wl1271_warning("trying to record key replacement");
3130 if (i == MAX_NUM_KEYS)
3133 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3138 ap_key->key_type = key_type;
3139 ap_key->key_size = key_size;
3140 memcpy(ap_key->key, key, key_size);
3141 ap_key->hlid = hlid;
3142 ap_key->tx_seq_32 = tx_seq_32;
3143 ap_key->tx_seq_16 = tx_seq_16;
3145 wlvif->ap.recorded_keys[i] = ap_key;
3149 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3153 for (i = 0; i < MAX_NUM_KEYS; i++) {
3154 kfree(wlvif->ap.recorded_keys[i]);
3155 wlvif->ap.recorded_keys[i] = NULL;
3159 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3162 struct wl1271_ap_key *key;
3163 bool wep_key_added = false;
3165 for (i = 0; i < MAX_NUM_KEYS; i++) {
3167 if (wlvif->ap.recorded_keys[i] == NULL)
3170 key = wlvif->ap.recorded_keys[i];
3172 if (hlid == WL12XX_INVALID_LINK_ID)
3173 hlid = wlvif->ap.bcast_hlid;
3175 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3176 key->id, key->key_type,
3177 key->key_size, key->key,
3178 hlid, key->tx_seq_32,
3183 if (key->key_type == KEY_WEP)
3184 wep_key_added = true;
3187 if (wep_key_added) {
3188 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3189 wlvif->ap.bcast_hlid);
3195 wl1271_free_ap_keys(wl, wlvif);
3199 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3200 u16 action, u8 id, u8 key_type,
3201 u8 key_size, const u8 *key, u32 tx_seq_32,
3202 u16 tx_seq_16, struct ieee80211_sta *sta)
3205 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3208 struct wl1271_station *wl_sta;
3212 wl_sta = (struct wl1271_station *)sta->drv_priv;
3213 hlid = wl_sta->hlid;
3215 hlid = wlvif->ap.bcast_hlid;
3218 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3220 * We do not support removing keys after AP shutdown.
3221 * Pretend we do to make mac80211 happy.
3223 if (action != KEY_ADD_OR_REPLACE)
3226 ret = wl1271_record_ap_key(wl, wlvif, id,
3228 key, hlid, tx_seq_32,
3231 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3232 id, key_type, key_size,
3233 key, hlid, tx_seq_32,
3241 static const u8 bcast_addr[ETH_ALEN] = {
3242 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3245 addr = sta ? sta->addr : bcast_addr;
3247 if (is_zero_ether_addr(addr)) {
3248 /* We dont support TX only encryption */
3252 /* The wl1271 does not allow to remove unicast keys - they
3253 will be cleared automatically on next CMD_JOIN. Ignore the
3254 request silently, as we dont want the mac80211 to emit
3255 an error message. */
3256 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3259 /* don't remove key if hlid was already deleted */
3260 if (action == KEY_REMOVE &&
3261 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3264 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3265 id, key_type, key_size,
3266 key, addr, tx_seq_32,
3276 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3277 struct ieee80211_vif *vif,
3278 struct ieee80211_sta *sta,
3279 struct ieee80211_key_conf *key_conf)
3281 struct wl1271 *wl = hw->priv;
3283 bool might_change_spare =
3284 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3285 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3287 if (might_change_spare) {
3289 * stop the queues and flush to ensure the next packets are
3290 * in sync with FW spare block accounting
3292 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3293 wl1271_tx_flush(wl);
3296 mutex_lock(&wl->mutex);
3298 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3300 goto out_wake_queues;
3303 ret = wl1271_ps_elp_wakeup(wl);
3305 goto out_wake_queues;
3307 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3309 wl1271_ps_elp_sleep(wl);
3312 if (might_change_spare)
3313 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3315 mutex_unlock(&wl->mutex);
3320 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3321 struct ieee80211_vif *vif,
3322 struct ieee80211_sta *sta,
3323 struct ieee80211_key_conf *key_conf)
3325 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3332 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3334 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3335 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3336 key_conf->cipher, key_conf->keyidx,
3337 key_conf->keylen, key_conf->flags);
3338 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3340 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3342 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3343 hlid = wl_sta->hlid;
3345 hlid = wlvif->ap.bcast_hlid;
3348 hlid = wlvif->sta.hlid;
3350 if (hlid != WL12XX_INVALID_LINK_ID) {
3351 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3352 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3353 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3356 switch (key_conf->cipher) {
3357 case WLAN_CIPHER_SUITE_WEP40:
3358 case WLAN_CIPHER_SUITE_WEP104:
3361 key_conf->hw_key_idx = key_conf->keyidx;
3363 case WLAN_CIPHER_SUITE_TKIP:
3364 key_type = KEY_TKIP;
3365 key_conf->hw_key_idx = key_conf->keyidx;
3367 case WLAN_CIPHER_SUITE_CCMP:
3369 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3371 case WL1271_CIPHER_SUITE_GEM:
3375 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3382 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3383 key_conf->keyidx, key_type,
3384 key_conf->keylen, key_conf->key,
3385 tx_seq_32, tx_seq_16, sta);
3387 wl1271_error("Could not add or replace key");
3392 * reconfiguring arp response if the unicast (or common)
3393 * encryption key type was changed
3395 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3396 (sta || key_type == KEY_WEP) &&
3397 wlvif->encryption_type != key_type) {
3398 wlvif->encryption_type = key_type;
3399 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3401 wl1271_warning("build arp rsp failed: %d", ret);
3408 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3409 key_conf->keyidx, key_type,
3410 key_conf->keylen, key_conf->key,
3413 wl1271_error("Could not remove key");
3419 wl1271_error("Unsupported key cmd 0x%x", cmd);
3425 EXPORT_SYMBOL_GPL(wlcore_set_key);
3427 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3428 struct ieee80211_vif *vif,
3431 struct wl1271 *wl = hw->priv;
3432 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3435 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3438 mutex_lock(&wl->mutex);
3440 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3445 ret = wl1271_ps_elp_wakeup(wl);
3449 wlvif->default_key = key_idx;
3451 /* the default WEP key needs to be configured at least once */
3452 if (wlvif->encryption_type == KEY_WEP) {
3453 ret = wl12xx_cmd_set_default_wep_key(wl,
3461 wl1271_ps_elp_sleep(wl);
3464 mutex_unlock(&wl->mutex);
3467 void wlcore_regdomain_config(struct wl1271 *wl)
3471 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3474 mutex_lock(&wl->mutex);
3476 if (unlikely(wl->state != WLCORE_STATE_ON))
3479 ret = wl1271_ps_elp_wakeup(wl);
3483 ret = wlcore_cmd_regdomain_config_locked(wl);
3485 wl12xx_queue_recovery_work(wl);
3489 wl1271_ps_elp_sleep(wl);
3491 mutex_unlock(&wl->mutex);
3494 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3495 struct ieee80211_vif *vif,
3496 struct cfg80211_scan_request *req)
3498 struct wl1271 *wl = hw->priv;
3503 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3506 ssid = req->ssids[0].ssid;
3507 len = req->ssids[0].ssid_len;
3510 mutex_lock(&wl->mutex);
3512 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3514 * We cannot return -EBUSY here because cfg80211 will expect
3515 * a call to ieee80211_scan_completed if we do - in this case
3516 * there won't be any call.
3522 ret = wl1271_ps_elp_wakeup(wl);
3526 /* fail if there is any role in ROC */
3527 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3528 /* don't allow scanning right now */
3533 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3535 wl1271_ps_elp_sleep(wl);
3537 mutex_unlock(&wl->mutex);
3542 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3543 struct ieee80211_vif *vif)
3545 struct wl1271 *wl = hw->priv;
3546 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3549 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3551 mutex_lock(&wl->mutex);
3553 if (unlikely(wl->state != WLCORE_STATE_ON))
3556 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3559 ret = wl1271_ps_elp_wakeup(wl);
3563 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3564 ret = wl->ops->scan_stop(wl, wlvif);
3570 * Rearm the tx watchdog just before idling scan. This
3571 * prevents just-finished scans from triggering the watchdog
3573 wl12xx_rearm_tx_watchdog_locked(wl);
3575 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3576 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3577 wl->scan_wlvif = NULL;
3578 wl->scan.req = NULL;
3579 ieee80211_scan_completed(wl->hw, true);
3582 wl1271_ps_elp_sleep(wl);
3584 mutex_unlock(&wl->mutex);
3586 cancel_delayed_work_sync(&wl->scan_complete_work);
3589 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3590 struct ieee80211_vif *vif,
3591 struct cfg80211_sched_scan_request *req,
3592 struct ieee80211_sched_scan_ies *ies)
3594 struct wl1271 *wl = hw->priv;
3595 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3598 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3600 mutex_lock(&wl->mutex);
3602 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3607 ret = wl1271_ps_elp_wakeup(wl);
3611 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3615 wl->sched_vif = wlvif;
3618 wl1271_ps_elp_sleep(wl);
3620 mutex_unlock(&wl->mutex);
3624 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3625 struct ieee80211_vif *vif)
3627 struct wl1271 *wl = hw->priv;
3628 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3631 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3633 mutex_lock(&wl->mutex);
3635 if (unlikely(wl->state != WLCORE_STATE_ON))
3638 ret = wl1271_ps_elp_wakeup(wl);
3642 wl->ops->sched_scan_stop(wl, wlvif);
3644 wl1271_ps_elp_sleep(wl);
3646 mutex_unlock(&wl->mutex);
3649 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3651 struct wl1271 *wl = hw->priv;
3654 mutex_lock(&wl->mutex);
3656 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3661 ret = wl1271_ps_elp_wakeup(wl);
3665 ret = wl1271_acx_frag_threshold(wl, value);
3667 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3669 wl1271_ps_elp_sleep(wl);
3672 mutex_unlock(&wl->mutex);
3677 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3679 struct wl1271 *wl = hw->priv;
3680 struct wl12xx_vif *wlvif;
3683 mutex_lock(&wl->mutex);
3685 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3690 ret = wl1271_ps_elp_wakeup(wl);
3694 wl12xx_for_each_wlvif(wl, wlvif) {
3695 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3697 wl1271_warning("set rts threshold failed: %d", ret);
3699 wl1271_ps_elp_sleep(wl);
3702 mutex_unlock(&wl->mutex);
3707 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3710 const u8 *next, *end = skb->data + skb->len;
3711 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3712 skb->len - ieoffset);
3717 memmove(ie, next, end - next);
3718 skb_trim(skb, skb->len - len);
3721 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3722 unsigned int oui, u8 oui_type,
3726 const u8 *next, *end = skb->data + skb->len;
3727 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3728 skb->data + ieoffset,
3729 skb->len - ieoffset);
3734 memmove(ie, next, end - next);
3735 skb_trim(skb, skb->len - len);
3738 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3739 struct ieee80211_vif *vif)
3741 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3742 struct sk_buff *skb;
3745 skb = ieee80211_proberesp_get(wl->hw, vif);
3749 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3750 CMD_TEMPL_AP_PROBE_RESPONSE,
3759 wl1271_debug(DEBUG_AP, "probe response updated");
3760 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3766 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3767 struct ieee80211_vif *vif,
3769 size_t probe_rsp_len,
3772 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3773 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3774 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3775 int ssid_ie_offset, ie_offset, templ_len;
3778 /* no need to change probe response if the SSID is set correctly */
3779 if (wlvif->ssid_len > 0)
3780 return wl1271_cmd_template_set(wl, wlvif->role_id,
3781 CMD_TEMPL_AP_PROBE_RESPONSE,
3786 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3787 wl1271_error("probe_rsp template too big");
3791 /* start searching from IE offset */
3792 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3794 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3795 probe_rsp_len - ie_offset);
3797 wl1271_error("No SSID in beacon!");
3801 ssid_ie_offset = ptr - probe_rsp_data;
3802 ptr += (ptr[1] + 2);
3804 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3806 /* insert SSID from bss_conf */
3807 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3808 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3809 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3810 bss_conf->ssid, bss_conf->ssid_len);
3811 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3813 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3814 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3815 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3817 return wl1271_cmd_template_set(wl, wlvif->role_id,
3818 CMD_TEMPL_AP_PROBE_RESPONSE,
3824 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3825 struct ieee80211_vif *vif,
3826 struct ieee80211_bss_conf *bss_conf,
3829 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3832 if (changed & BSS_CHANGED_ERP_SLOT) {
3833 if (bss_conf->use_short_slot)
3834 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3836 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3838 wl1271_warning("Set slot time failed %d", ret);
3843 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3844 if (bss_conf->use_short_preamble)
3845 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3847 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3850 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3851 if (bss_conf->use_cts_prot)
3852 ret = wl1271_acx_cts_protect(wl, wlvif,
3855 ret = wl1271_acx_cts_protect(wl, wlvif,
3856 CTSPROTECT_DISABLE);
3858 wl1271_warning("Set ctsprotect failed %d", ret);
3867 static int wlcore_set_beacon_template(struct wl1271 *wl,
3868 struct ieee80211_vif *vif,
3871 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3872 struct ieee80211_hdr *hdr;
3875 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
3876 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3884 wl1271_debug(DEBUG_MASTER, "beacon updated");
3886 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3888 dev_kfree_skb(beacon);
3891 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3892 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3894 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3899 dev_kfree_skb(beacon);
3903 wlvif->wmm_enabled =
3904 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3905 WLAN_OUI_TYPE_MICROSOFT_WMM,
3906 beacon->data + ieoffset,
3907 beacon->len - ieoffset);
3910 * In case we already have a probe-resp beacon set explicitly
3911 * by usermode, don't use the beacon data.
3913 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3916 /* remove TIM ie from probe response */
3917 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3920 * remove p2p ie from probe response.
3921 * the fw reponds to probe requests that don't include
3922 * the p2p ie. probe requests with p2p ie will be passed,
3923 * and will be responded by the supplicant (the spec
3924 * forbids including the p2p ie when responding to probe
3925 * requests that didn't include it).
3927 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3928 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3930 hdr = (struct ieee80211_hdr *) beacon->data;
3931 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3932 IEEE80211_STYPE_PROBE_RESP);
3934 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3939 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3940 CMD_TEMPL_PROBE_RESPONSE,
3945 dev_kfree_skb(beacon);
3953 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3954 struct ieee80211_vif *vif,
3955 struct ieee80211_bss_conf *bss_conf,
3958 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3959 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3962 if (changed & BSS_CHANGED_BEACON_INT) {
3963 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3964 bss_conf->beacon_int);
3966 wlvif->beacon_int = bss_conf->beacon_int;
3969 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3970 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3972 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3975 if (changed & BSS_CHANGED_BEACON) {
3976 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3983 wl1271_error("beacon info change failed: %d", ret);
3987 /* AP mode changes */
3988 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3989 struct ieee80211_vif *vif,
3990 struct ieee80211_bss_conf *bss_conf,
3993 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3996 if (changed & BSS_CHANGED_BASIC_RATES) {
3997 u32 rates = bss_conf->basic_rates;
3999 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4001 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4002 wlvif->basic_rate_set);
4004 ret = wl1271_init_ap_rates(wl, wlvif);
4006 wl1271_error("AP rate policy change failed %d", ret);
4010 ret = wl1271_ap_init_templates(wl, vif);
4014 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4018 ret = wlcore_set_beacon_template(wl, vif, true);
4023 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4027 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4028 if (bss_conf->enable_beacon) {
4029 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4030 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4034 ret = wl1271_ap_init_hwenc(wl, wlvif);
4038 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4039 wl1271_debug(DEBUG_AP, "started AP");
4042 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4044 * AP might be in ROC in case we have just
4045 * sent auth reply. handle it.
4047 if (test_bit(wlvif->role_id, wl->roc_map))
4048 wl12xx_croc(wl, wlvif->role_id);
4050 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4054 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4055 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4057 wl1271_debug(DEBUG_AP, "stopped AP");
4062 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4066 /* Handle HT information change */
4067 if ((changed & BSS_CHANGED_HT) &&
4068 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4069 ret = wl1271_acx_set_ht_information(wl, wlvif,
4070 bss_conf->ht_operation_mode);
4072 wl1271_warning("Set ht information failed %d", ret);
4081 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4082 struct ieee80211_bss_conf *bss_conf,
4088 wl1271_debug(DEBUG_MAC80211,
4089 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4090 bss_conf->bssid, bss_conf->aid,
4091 bss_conf->beacon_int,
4092 bss_conf->basic_rates, sta_rate_set);
4094 wlvif->beacon_int = bss_conf->beacon_int;
4095 rates = bss_conf->basic_rates;
4096 wlvif->basic_rate_set =
4097 wl1271_tx_enabled_rates_get(wl, rates,
4100 wl1271_tx_min_rate_get(wl,
4101 wlvif->basic_rate_set);
4105 wl1271_tx_enabled_rates_get(wl,
4109 /* we only support sched_scan while not connected */
4110 if (wl->sched_vif == wlvif)
4111 wl->ops->sched_scan_stop(wl, wlvif);
4113 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4117 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4121 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4125 wlcore_set_ssid(wl, wlvif);
4127 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4132 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4136 /* revert back to minimum rates for the current band */
4137 wl1271_set_band_rate(wl, wlvif);
4138 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4140 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4144 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4145 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4146 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4151 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4154 /* STA/IBSS mode changes */
4155 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4156 struct ieee80211_vif *vif,
4157 struct ieee80211_bss_conf *bss_conf,
4160 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4161 bool do_join = false;
4162 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4163 bool ibss_joined = false;
4164 u32 sta_rate_set = 0;
4166 struct ieee80211_sta *sta;
4167 bool sta_exists = false;
4168 struct ieee80211_sta_ht_cap sta_ht_cap;
4171 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4177 if (changed & BSS_CHANGED_IBSS) {
4178 if (bss_conf->ibss_joined) {
4179 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4182 wlcore_unset_assoc(wl, wlvif);
4183 wl12xx_cmd_role_stop_sta(wl, wlvif);
4187 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4190 /* Need to update the SSID (for filtering etc) */
4191 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4194 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4195 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4196 bss_conf->enable_beacon ? "enabled" : "disabled");
4201 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4202 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4204 if (changed & BSS_CHANGED_CQM) {
4205 bool enable = false;
4206 if (bss_conf->cqm_rssi_thold)
4208 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4209 bss_conf->cqm_rssi_thold,
4210 bss_conf->cqm_rssi_hyst);
4213 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4216 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4217 BSS_CHANGED_ASSOC)) {
4219 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4221 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4223 /* save the supp_rates of the ap */
4224 sta_rate_set = sta->supp_rates[wlvif->band];
4225 if (sta->ht_cap.ht_supported)
4227 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4228 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4229 sta_ht_cap = sta->ht_cap;
4236 if (changed & BSS_CHANGED_BSSID) {
4237 if (!is_zero_ether_addr(bss_conf->bssid)) {
4238 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4243 /* Need to update the BSSID (for filtering etc) */
4246 ret = wlcore_clear_bssid(wl, wlvif);
4252 if (changed & BSS_CHANGED_IBSS) {
4253 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4254 bss_conf->ibss_joined);
4256 if (bss_conf->ibss_joined) {
4257 u32 rates = bss_conf->basic_rates;
4258 wlvif->basic_rate_set =
4259 wl1271_tx_enabled_rates_get(wl, rates,
4262 wl1271_tx_min_rate_get(wl,
4263 wlvif->basic_rate_set);
4265 /* by default, use 11b + OFDM rates */
4266 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4267 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4273 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4278 ret = wlcore_join(wl, wlvif);
4280 wl1271_warning("cmd join failed %d", ret);
4285 if (changed & BSS_CHANGED_ASSOC) {
4286 if (bss_conf->assoc) {
4287 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4292 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4293 wl12xx_set_authorized(wl, wlvif);
4295 wlcore_unset_assoc(wl, wlvif);
4299 if (changed & BSS_CHANGED_PS) {
4300 if ((bss_conf->ps) &&
4301 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4302 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4306 if (wl->conf.conn.forced_ps) {
4307 ps_mode = STATION_POWER_SAVE_MODE;
4308 ps_mode_str = "forced";
4310 ps_mode = STATION_AUTO_PS_MODE;
4311 ps_mode_str = "auto";
4314 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4316 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4318 wl1271_warning("enter %s ps failed %d",
4320 } else if (!bss_conf->ps &&
4321 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4322 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4324 ret = wl1271_ps_set_mode(wl, wlvif,
4325 STATION_ACTIVE_MODE);
4327 wl1271_warning("exit auto ps failed %d", ret);
4331 /* Handle new association with HT. Do this after join. */
4334 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4336 ret = wlcore_hw_set_peer_cap(wl,
4342 wl1271_warning("Set ht cap failed %d", ret);
4348 ret = wl1271_acx_set_ht_information(wl, wlvif,
4349 bss_conf->ht_operation_mode);
4351 wl1271_warning("Set ht information failed %d",
4358 /* Handle arp filtering. Done after join. */
4359 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4360 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4361 __be32 addr = bss_conf->arp_addr_list[0];
4362 wlvif->sta.qos = bss_conf->qos;
4363 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4365 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4366 wlvif->ip_addr = addr;
4368 * The template should have been configured only upon
4369 * association. however, it seems that the correct ip
4370 * isn't being set (when sending), so we have to
4371 * reconfigure the template upon every ip change.
4373 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4375 wl1271_warning("build arp rsp failed: %d", ret);
4379 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4380 (ACX_ARP_FILTER_ARP_FILTERING |
4381 ACX_ARP_FILTER_AUTO_ARP),
4385 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4396 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4397 struct ieee80211_vif *vif,
4398 struct ieee80211_bss_conf *bss_conf,
4401 struct wl1271 *wl = hw->priv;
4402 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4403 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4406 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4407 wlvif->role_id, (int)changed);
4410 * make sure to cancel pending disconnections if our association
4413 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4414 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4416 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4417 !bss_conf->enable_beacon)
4418 wl1271_tx_flush(wl);
4420 mutex_lock(&wl->mutex);
4422 if (unlikely(wl->state != WLCORE_STATE_ON))
4425 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4428 ret = wl1271_ps_elp_wakeup(wl);
4433 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4435 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4437 wl1271_ps_elp_sleep(wl);
4440 mutex_unlock(&wl->mutex);
4443 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4444 struct ieee80211_chanctx_conf *ctx)
4446 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4447 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4448 cfg80211_get_chandef_type(&ctx->def));
4452 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4453 struct ieee80211_chanctx_conf *ctx)
4455 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4456 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4457 cfg80211_get_chandef_type(&ctx->def));
4460 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4461 struct ieee80211_chanctx_conf *ctx,
4464 wl1271_debug(DEBUG_MAC80211,
4465 "mac80211 change chanctx %d (type %d) changed 0x%x",
4466 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4467 cfg80211_get_chandef_type(&ctx->def), changed);
4470 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4471 struct ieee80211_vif *vif,
4472 struct ieee80211_chanctx_conf *ctx)
4474 struct wl1271 *wl = hw->priv;
4475 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4476 int channel = ieee80211_frequency_to_channel(
4477 ctx->def.chan->center_freq);
4479 wl1271_debug(DEBUG_MAC80211,
4480 "mac80211 assign chanctx (role %d) %d (type %d)",
4481 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4483 mutex_lock(&wl->mutex);
4485 wlvif->band = ctx->def.chan->band;
4486 wlvif->channel = channel;
4487 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4489 /* update default rates according to the band */
4490 wl1271_set_band_rate(wl, wlvif);
4492 mutex_unlock(&wl->mutex);
4497 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4498 struct ieee80211_vif *vif,
4499 struct ieee80211_chanctx_conf *ctx)
4501 struct wl1271 *wl = hw->priv;
4502 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4504 wl1271_debug(DEBUG_MAC80211,
4505 "mac80211 unassign chanctx (role %d) %d (type %d)",
4507 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4508 cfg80211_get_chandef_type(&ctx->def));
4510 wl1271_tx_flush(wl);
4513 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4514 struct ieee80211_vif *vif, u16 queue,
4515 const struct ieee80211_tx_queue_params *params)
4517 struct wl1271 *wl = hw->priv;
4518 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4522 mutex_lock(&wl->mutex);
4524 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4527 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4529 ps_scheme = CONF_PS_SCHEME_LEGACY;
4531 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4534 ret = wl1271_ps_elp_wakeup(wl);
4539 * the txop is confed in units of 32us by the mac80211,
4542 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4543 params->cw_min, params->cw_max,
4544 params->aifs, params->txop << 5);
4548 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4549 CONF_CHANNEL_TYPE_EDCF,
4550 wl1271_tx_get_queue(queue),
4551 ps_scheme, CONF_ACK_POLICY_LEGACY,
4555 wl1271_ps_elp_sleep(wl);
4558 mutex_unlock(&wl->mutex);
4563 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4564 struct ieee80211_vif *vif)
4567 struct wl1271 *wl = hw->priv;
4568 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4569 u64 mactime = ULLONG_MAX;
4572 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4574 mutex_lock(&wl->mutex);
4576 if (unlikely(wl->state != WLCORE_STATE_ON))
4579 ret = wl1271_ps_elp_wakeup(wl);
4583 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4588 wl1271_ps_elp_sleep(wl);
4591 mutex_unlock(&wl->mutex);
4595 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4596 struct survey_info *survey)
4598 struct ieee80211_conf *conf = &hw->conf;
4603 survey->channel = conf->chandef.chan;
4608 static int wl1271_allocate_sta(struct wl1271 *wl,
4609 struct wl12xx_vif *wlvif,
4610 struct ieee80211_sta *sta)
4612 struct wl1271_station *wl_sta;
4616 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4617 wl1271_warning("could not allocate HLID - too much stations");
4621 wl_sta = (struct wl1271_station *)sta->drv_priv;
4622 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4624 wl1271_warning("could not allocate HLID - too many links");
4628 /* use the previous security seq, if this is a recovery/resume */
4629 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4631 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4632 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4633 wl->active_sta_count++;
4637 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4639 struct wl1271_station *wl_sta;
4640 struct ieee80211_sta *sta;
4641 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4643 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4646 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4647 __clear_bit(hlid, &wl->ap_ps_map);
4648 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4651 * save the last used PN in the private part of iee80211_sta,
4652 * in case of recovery/suspend
4655 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4657 wl_sta = (void *)sta->drv_priv;
4658 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4661 * increment the initial seq number on recovery to account for
4662 * transmitted packets that we haven't yet got in the FW status
4664 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4665 wl_sta->total_freed_pkts +=
4666 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4670 wl12xx_free_link(wl, wlvif, &hlid);
4671 wl->active_sta_count--;
4674 * rearm the tx watchdog when the last STA is freed - give the FW a
4675 * chance to return STA-buffered packets before complaining.
4677 if (wl->active_sta_count == 0)
4678 wl12xx_rearm_tx_watchdog_locked(wl);
4681 static int wl12xx_sta_add(struct wl1271 *wl,
4682 struct wl12xx_vif *wlvif,
4683 struct ieee80211_sta *sta)
4685 struct wl1271_station *wl_sta;
4689 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4691 ret = wl1271_allocate_sta(wl, wlvif, sta);
4695 wl_sta = (struct wl1271_station *)sta->drv_priv;
4696 hlid = wl_sta->hlid;
4698 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4700 wl1271_free_sta(wl, wlvif, hlid);
4705 static int wl12xx_sta_remove(struct wl1271 *wl,
4706 struct wl12xx_vif *wlvif,
4707 struct ieee80211_sta *sta)
4709 struct wl1271_station *wl_sta;
4712 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4714 wl_sta = (struct wl1271_station *)sta->drv_priv;
4716 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4719 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4723 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4727 static void wlcore_roc_if_possible(struct wl1271 *wl,
4728 struct wl12xx_vif *wlvif)
4730 if (find_first_bit(wl->roc_map,
4731 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4734 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4737 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4741 * when wl_sta is NULL, we treat this call as if coming from a
4742 * pending auth reply.
4743 * wl->mutex must be taken and the FW must be awake when the call
4746 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4747 struct wl1271_station *wl_sta, bool in_conn)
4750 if (WARN_ON(wl_sta && wl_sta->in_connection))
4753 if (!wlvif->ap_pending_auth_reply &&
4754 !wlvif->inconn_count)
4755 wlcore_roc_if_possible(wl, wlvif);
4758 wl_sta->in_connection = true;
4759 wlvif->inconn_count++;
4761 wlvif->ap_pending_auth_reply = true;
4764 if (wl_sta && !wl_sta->in_connection)
4767 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
4770 if (WARN_ON(wl_sta && !wlvif->inconn_count))
4774 wl_sta->in_connection = false;
4775 wlvif->inconn_count--;
4777 wlvif->ap_pending_auth_reply = false;
4780 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
4781 test_bit(wlvif->role_id, wl->roc_map))
4782 wl12xx_croc(wl, wlvif->role_id);
4786 static int wl12xx_update_sta_state(struct wl1271 *wl,
4787 struct wl12xx_vif *wlvif,
4788 struct ieee80211_sta *sta,
4789 enum ieee80211_sta_state old_state,
4790 enum ieee80211_sta_state new_state)
4792 struct wl1271_station *wl_sta;
4793 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4794 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4797 wl_sta = (struct wl1271_station *)sta->drv_priv;
4799 /* Add station (AP mode) */
4801 old_state == IEEE80211_STA_NOTEXIST &&
4802 new_state == IEEE80211_STA_NONE) {
4803 ret = wl12xx_sta_add(wl, wlvif, sta);
4807 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4810 /* Remove station (AP mode) */
4812 old_state == IEEE80211_STA_NONE &&
4813 new_state == IEEE80211_STA_NOTEXIST) {
4815 wl12xx_sta_remove(wl, wlvif, sta);
4817 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4820 /* Authorize station (AP mode) */
4822 new_state == IEEE80211_STA_AUTHORIZED) {
4823 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4827 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4832 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4835 /* Authorize station */
4837 new_state == IEEE80211_STA_AUTHORIZED) {
4838 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4839 ret = wl12xx_set_authorized(wl, wlvif);
4845 old_state == IEEE80211_STA_AUTHORIZED &&
4846 new_state == IEEE80211_STA_ASSOC) {
4847 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4848 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4851 /* clear ROCs on failure or authorization */
4853 (new_state == IEEE80211_STA_AUTHORIZED ||
4854 new_state == IEEE80211_STA_NOTEXIST)) {
4855 if (test_bit(wlvif->role_id, wl->roc_map))
4856 wl12xx_croc(wl, wlvif->role_id);
4860 old_state == IEEE80211_STA_NOTEXIST &&
4861 new_state == IEEE80211_STA_NONE) {
4862 if (find_first_bit(wl->roc_map,
4863 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4864 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4865 wl12xx_roc(wl, wlvif, wlvif->role_id,
4866 wlvif->band, wlvif->channel);
4872 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4873 struct ieee80211_vif *vif,
4874 struct ieee80211_sta *sta,
4875 enum ieee80211_sta_state old_state,
4876 enum ieee80211_sta_state new_state)
4878 struct wl1271 *wl = hw->priv;
4879 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4882 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4883 sta->aid, old_state, new_state);
4885 mutex_lock(&wl->mutex);
4887 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4892 ret = wl1271_ps_elp_wakeup(wl);
4896 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4898 wl1271_ps_elp_sleep(wl);
4900 mutex_unlock(&wl->mutex);
4901 if (new_state < old_state)
4906 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4907 struct ieee80211_vif *vif,
4908 enum ieee80211_ampdu_mlme_action action,
4909 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4912 struct wl1271 *wl = hw->priv;
4913 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4915 u8 hlid, *ba_bitmap;
4917 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4920 /* sanity check - the fields in FW are only 8bits wide */
4921 if (WARN_ON(tid > 0xFF))
4924 mutex_lock(&wl->mutex);
4926 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4931 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4932 hlid = wlvif->sta.hlid;
4933 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4934 struct wl1271_station *wl_sta;
4936 wl_sta = (struct wl1271_station *)sta->drv_priv;
4937 hlid = wl_sta->hlid;
4943 ba_bitmap = &wl->links[hlid].ba_bitmap;
4945 ret = wl1271_ps_elp_wakeup(wl);
4949 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4953 case IEEE80211_AMPDU_RX_START:
4954 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4959 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4961 wl1271_error("exceeded max RX BA sessions");
4965 if (*ba_bitmap & BIT(tid)) {
4967 wl1271_error("cannot enable RX BA session on active "
4972 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4975 *ba_bitmap |= BIT(tid);
4976 wl->ba_rx_session_count++;
4980 case IEEE80211_AMPDU_RX_STOP:
4981 if (!(*ba_bitmap & BIT(tid))) {
4983 * this happens on reconfig - so only output a debug
4984 * message for now, and don't fail the function.
4986 wl1271_debug(DEBUG_MAC80211,
4987 "no active RX BA session on tid: %d",
4993 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4996 *ba_bitmap &= ~BIT(tid);
4997 wl->ba_rx_session_count--;
5002 * The BA initiator session management in FW independently.
5003 * Falling break here on purpose for all TX APDU commands.
5005 case IEEE80211_AMPDU_TX_START:
5006 case IEEE80211_AMPDU_TX_STOP_CONT:
5007 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5008 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5009 case IEEE80211_AMPDU_TX_OPERATIONAL:
5014 wl1271_error("Incorrect ampdu action id=%x\n", action);
5018 wl1271_ps_elp_sleep(wl);
5021 mutex_unlock(&wl->mutex);
5026 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5027 struct ieee80211_vif *vif,
5028 const struct cfg80211_bitrate_mask *mask)
5030 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5031 struct wl1271 *wl = hw->priv;
5034 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5035 mask->control[NL80211_BAND_2GHZ].legacy,
5036 mask->control[NL80211_BAND_5GHZ].legacy);
5038 mutex_lock(&wl->mutex);
5040 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5041 wlvif->bitrate_masks[i] =
5042 wl1271_tx_enabled_rates_get(wl,
5043 mask->control[i].legacy,
5046 if (unlikely(wl->state != WLCORE_STATE_ON))
5049 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5050 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5052 ret = wl1271_ps_elp_wakeup(wl);
5056 wl1271_set_band_rate(wl, wlvif);
5058 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5059 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5061 wl1271_ps_elp_sleep(wl);
5064 mutex_unlock(&wl->mutex);
5069 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5070 struct ieee80211_channel_switch *ch_switch)
5072 struct wl1271 *wl = hw->priv;
5073 struct wl12xx_vif *wlvif;
5076 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5078 wl1271_tx_flush(wl);
5080 mutex_lock(&wl->mutex);
5082 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5083 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5084 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5085 ieee80211_chswitch_done(vif, false);
5088 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5092 ret = wl1271_ps_elp_wakeup(wl);
5096 /* TODO: change mac80211 to pass vif as param */
5097 wl12xx_for_each_wlvif_sta(wl, wlvif) {
5098 unsigned long delay_usec;
5100 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5104 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5106 /* indicate failure 5 seconds after channel switch time */
5107 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5109 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5110 usecs_to_jiffies(delay_usec) +
5111 msecs_to_jiffies(5000));
5115 wl1271_ps_elp_sleep(wl);
5118 mutex_unlock(&wl->mutex);
5121 static void wlcore_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
5123 struct wl1271 *wl = hw->priv;
5125 wl1271_tx_flush(wl);
5128 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5129 struct ieee80211_vif *vif,
5130 struct ieee80211_channel *chan,
5132 enum ieee80211_roc_type type)
5134 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5135 struct wl1271 *wl = hw->priv;
5136 int channel, ret = 0;
5138 channel = ieee80211_frequency_to_channel(chan->center_freq);
5140 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5141 channel, wlvif->role_id);
5143 mutex_lock(&wl->mutex);
5145 if (unlikely(wl->state != WLCORE_STATE_ON))
5148 /* return EBUSY if we can't ROC right now */
5149 if (WARN_ON(wl->roc_vif ||
5150 find_first_bit(wl->roc_map,
5151 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5156 ret = wl1271_ps_elp_wakeup(wl);
5160 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5165 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5166 msecs_to_jiffies(duration));
5168 wl1271_ps_elp_sleep(wl);
5170 mutex_unlock(&wl->mutex);
5174 static int __wlcore_roc_completed(struct wl1271 *wl)
5176 struct wl12xx_vif *wlvif;
5179 /* already completed */
5180 if (unlikely(!wl->roc_vif))
5183 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5185 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5188 ret = wl12xx_stop_dev(wl, wlvif);
5197 static int wlcore_roc_completed(struct wl1271 *wl)
5201 wl1271_debug(DEBUG_MAC80211, "roc complete");
5203 mutex_lock(&wl->mutex);
5205 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5210 ret = wl1271_ps_elp_wakeup(wl);
5214 ret = __wlcore_roc_completed(wl);
5216 wl1271_ps_elp_sleep(wl);
5218 mutex_unlock(&wl->mutex);
5223 static void wlcore_roc_complete_work(struct work_struct *work)
5225 struct delayed_work *dwork;
5229 dwork = container_of(work, struct delayed_work, work);
5230 wl = container_of(dwork, struct wl1271, roc_complete_work);
5232 ret = wlcore_roc_completed(wl);
5234 ieee80211_remain_on_channel_expired(wl->hw);
5237 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5239 struct wl1271 *wl = hw->priv;
5241 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5244 wl1271_tx_flush(wl);
5247 * we can't just flush_work here, because it might deadlock
5248 * (as we might get called from the same workqueue)
5250 cancel_delayed_work_sync(&wl->roc_complete_work);
5251 wlcore_roc_completed(wl);
5256 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5257 struct ieee80211_vif *vif,
5258 struct ieee80211_sta *sta,
5261 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5262 struct wl1271 *wl = hw->priv;
5264 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5267 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5268 struct ieee80211_vif *vif,
5269 struct ieee80211_sta *sta,
5272 struct wl1271 *wl = hw->priv;
5273 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5276 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5278 mutex_lock(&wl->mutex);
5280 if (unlikely(wl->state != WLCORE_STATE_ON))
5283 ret = wl1271_ps_elp_wakeup(wl);
5287 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5292 wl1271_ps_elp_sleep(wl);
5295 mutex_unlock(&wl->mutex);
5300 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5302 struct wl1271 *wl = hw->priv;
5305 mutex_lock(&wl->mutex);
5307 if (unlikely(wl->state != WLCORE_STATE_ON))
5310 /* packets are considered pending if in the TX queue or the FW */
5311 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5313 mutex_unlock(&wl->mutex);
5318 /* can't be const, mac80211 writes to this */
5319 static struct ieee80211_rate wl1271_rates[] = {
5321 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5322 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5324 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5325 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5326 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5328 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5329 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5330 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5332 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5333 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5334 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5336 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5337 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5339 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5340 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5342 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5343 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5345 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5346 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5348 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5349 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5351 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5352 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5354 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5355 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5357 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5358 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5361 /* can't be const, mac80211 writes to this */
5362 static struct ieee80211_channel wl1271_channels[] = {
5363 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5364 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5365 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5366 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5367 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5368 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5369 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5370 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5371 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5372 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5373 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5374 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5375 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5376 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5379 /* can't be const, mac80211 writes to this */
5380 static struct ieee80211_supported_band wl1271_band_2ghz = {
5381 .channels = wl1271_channels,
5382 .n_channels = ARRAY_SIZE(wl1271_channels),
5383 .bitrates = wl1271_rates,
5384 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5387 /* 5 GHz data rates for WL1273 */
5388 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5390 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5391 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5393 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5394 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5396 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5397 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5399 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5400 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5402 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5403 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5405 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5406 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5408 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5409 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5411 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5412 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5415 /* 5 GHz band channels for WL1273 */
5416 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5417 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5418 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5419 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5420 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5421 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5422 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5423 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5424 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5425 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5426 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5427 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5428 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5429 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5430 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5431 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5432 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5433 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5434 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5435 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5436 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5437 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5438 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5439 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5440 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5441 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5442 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5443 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5444 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5445 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5446 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5447 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5450 static struct ieee80211_supported_band wl1271_band_5ghz = {
5451 .channels = wl1271_channels_5ghz,
5452 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5453 .bitrates = wl1271_rates_5ghz,
5454 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5457 static const struct ieee80211_ops wl1271_ops = {
5458 .start = wl1271_op_start,
5459 .stop = wlcore_op_stop,
5460 .add_interface = wl1271_op_add_interface,
5461 .remove_interface = wl1271_op_remove_interface,
5462 .change_interface = wl12xx_op_change_interface,
5464 .suspend = wl1271_op_suspend,
5465 .resume = wl1271_op_resume,
5467 .config = wl1271_op_config,
5468 .prepare_multicast = wl1271_op_prepare_multicast,
5469 .configure_filter = wl1271_op_configure_filter,
5471 .set_key = wlcore_op_set_key,
5472 .hw_scan = wl1271_op_hw_scan,
5473 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5474 .sched_scan_start = wl1271_op_sched_scan_start,
5475 .sched_scan_stop = wl1271_op_sched_scan_stop,
5476 .bss_info_changed = wl1271_op_bss_info_changed,
5477 .set_frag_threshold = wl1271_op_set_frag_threshold,
5478 .set_rts_threshold = wl1271_op_set_rts_threshold,
5479 .conf_tx = wl1271_op_conf_tx,
5480 .get_tsf = wl1271_op_get_tsf,
5481 .get_survey = wl1271_op_get_survey,
5482 .sta_state = wl12xx_op_sta_state,
5483 .ampdu_action = wl1271_op_ampdu_action,
5484 .tx_frames_pending = wl1271_tx_frames_pending,
5485 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5486 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5487 .channel_switch = wl12xx_op_channel_switch,
5488 .flush = wlcore_op_flush,
5489 .remain_on_channel = wlcore_op_remain_on_channel,
5490 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5491 .add_chanctx = wlcore_op_add_chanctx,
5492 .remove_chanctx = wlcore_op_remove_chanctx,
5493 .change_chanctx = wlcore_op_change_chanctx,
5494 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5495 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5496 .sta_rc_update = wlcore_op_sta_rc_update,
5497 .get_rssi = wlcore_op_get_rssi,
5498 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5502 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5508 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5509 wl1271_error("Illegal RX rate from HW: %d", rate);
5513 idx = wl->band_rate_to_idx[band][rate];
5514 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5515 wl1271_error("Unsupported RX rate from HW: %d", rate);
5522 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5526 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5529 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5530 wl1271_warning("NIC part of the MAC address wraps around!");
5532 for (i = 0; i < wl->num_mac_addr; i++) {
5533 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5534 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5535 wl->addresses[i].addr[2] = (u8) oui;
5536 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5537 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5538 wl->addresses[i].addr[5] = (u8) nic;
5542 /* we may be one address short at the most */
5543 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5546 * turn on the LAA bit in the first address and use it as
5549 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5550 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5551 memcpy(&wl->addresses[idx], &wl->addresses[0],
5552 sizeof(wl->addresses[0]));
5554 wl->addresses[idx].addr[2] |= BIT(1);
5557 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5558 wl->hw->wiphy->addresses = wl->addresses;
5561 static int wl12xx_get_hw_info(struct wl1271 *wl)
5565 ret = wl12xx_set_power_on(wl);
5569 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5573 wl->fuse_oui_addr = 0;
5574 wl->fuse_nic_addr = 0;
5576 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5580 if (wl->ops->get_mac)
5581 ret = wl->ops->get_mac(wl);
5584 wl1271_power_off(wl);
5588 static int wl1271_register_hw(struct wl1271 *wl)
5591 u32 oui_addr = 0, nic_addr = 0;
5593 if (wl->mac80211_registered)
5596 if (wl->nvs_len >= 12) {
5597 /* NOTE: The wl->nvs->nvs element must be first, in
5598 * order to simplify the casting, we assume it is at
5599 * the beginning of the wl->nvs structure.
5601 u8 *nvs_ptr = (u8 *)wl->nvs;
5604 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5606 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5609 /* if the MAC address is zeroed in the NVS derive from fuse */
5610 if (oui_addr == 0 && nic_addr == 0) {
5611 oui_addr = wl->fuse_oui_addr;
5612 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5613 nic_addr = wl->fuse_nic_addr + 1;
5616 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5618 ret = ieee80211_register_hw(wl->hw);
5620 wl1271_error("unable to register mac80211 hw: %d", ret);
5624 wl->mac80211_registered = true;
5626 wl1271_debugfs_init(wl);
5628 wl1271_notice("loaded");
5634 static void wl1271_unregister_hw(struct wl1271 *wl)
5637 wl1271_plt_stop(wl);
5639 ieee80211_unregister_hw(wl->hw);
5640 wl->mac80211_registered = false;
5644 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5647 .types = BIT(NL80211_IFTYPE_STATION),
5651 .types = BIT(NL80211_IFTYPE_AP) |
5652 BIT(NL80211_IFTYPE_P2P_GO) |
5653 BIT(NL80211_IFTYPE_P2P_CLIENT),
5657 static struct ieee80211_iface_combination
5658 wlcore_iface_combinations[] = {
5660 .max_interfaces = 3,
5661 .limits = wlcore_iface_limits,
5662 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5666 static int wl1271_init_ieee80211(struct wl1271 *wl)
5669 static const u32 cipher_suites[] = {
5670 WLAN_CIPHER_SUITE_WEP40,
5671 WLAN_CIPHER_SUITE_WEP104,
5672 WLAN_CIPHER_SUITE_TKIP,
5673 WLAN_CIPHER_SUITE_CCMP,
5674 WL1271_CIPHER_SUITE_GEM,
5677 /* The tx descriptor buffer */
5678 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5680 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5681 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5684 /* FIXME: find a proper value */
5685 wl->hw->channel_change_time = 10000;
5686 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5688 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5689 IEEE80211_HW_SUPPORTS_PS |
5690 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5691 IEEE80211_HW_SUPPORTS_UAPSD |
5692 IEEE80211_HW_HAS_RATE_CONTROL |
5693 IEEE80211_HW_CONNECTION_MONITOR |
5694 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5695 IEEE80211_HW_SPECTRUM_MGMT |
5696 IEEE80211_HW_AP_LINK_PS |
5697 IEEE80211_HW_AMPDU_AGGREGATION |
5698 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5699 IEEE80211_HW_QUEUE_CONTROL;
5701 wl->hw->wiphy->cipher_suites = cipher_suites;
5702 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5704 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5705 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5706 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5707 wl->hw->wiphy->max_scan_ssids = 1;
5708 wl->hw->wiphy->max_sched_scan_ssids = 16;
5709 wl->hw->wiphy->max_match_sets = 16;
5711 * Maximum length of elements in scanning probe request templates
5712 * should be the maximum length possible for a template, without
5713 * the IEEE80211 header of the template
5715 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5716 sizeof(struct ieee80211_header);
5718 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5719 sizeof(struct ieee80211_header);
5721 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5723 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5724 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
5725 WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
5727 /* make sure all our channels fit in the scanned_ch bitmask */
5728 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5729 ARRAY_SIZE(wl1271_channels_5ghz) >
5730 WL1271_MAX_CHANNELS);
5732 * clear channel flags from the previous usage
5733 * and restore max_power & max_antenna_gain values.
5735 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5736 wl1271_band_2ghz.channels[i].flags = 0;
5737 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5738 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5741 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5742 wl1271_band_5ghz.channels[i].flags = 0;
5743 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5744 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5748 * We keep local copies of the band structs because we need to
5749 * modify them on a per-device basis.
5751 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5752 sizeof(wl1271_band_2ghz));
5753 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5754 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5755 sizeof(*wl->ht_cap));
5756 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5757 sizeof(wl1271_band_5ghz));
5758 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5759 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5760 sizeof(*wl->ht_cap));
5762 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5763 &wl->bands[IEEE80211_BAND_2GHZ];
5764 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5765 &wl->bands[IEEE80211_BAND_5GHZ];
5768 * allow 4 queues per mac address we support +
5769 * 1 cab queue per mac + one global offchannel Tx queue
5771 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5773 /* the last queue is the offchannel queue */
5774 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5775 wl->hw->max_rates = 1;
5777 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5779 /* the FW answers probe-requests in AP-mode */
5780 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5781 wl->hw->wiphy->probe_resp_offload =
5782 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5783 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5784 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5786 /* allowed interface combinations */
5787 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5788 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5789 wl->hw->wiphy->n_iface_combinations =
5790 ARRAY_SIZE(wlcore_iface_combinations);
5792 SET_IEEE80211_DEV(wl->hw, wl->dev);
5794 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5795 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5797 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5802 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5805 struct ieee80211_hw *hw;
5810 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5812 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5814 wl1271_error("could not alloc ieee80211_hw");
5820 memset(wl, 0, sizeof(*wl));
5822 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5824 wl1271_error("could not alloc wl priv");
5826 goto err_priv_alloc;
5829 INIT_LIST_HEAD(&wl->wlvif_list);
5833 for (i = 0; i < NUM_TX_QUEUES; i++)
5834 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5835 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5837 skb_queue_head_init(&wl->deferred_rx_queue);
5838 skb_queue_head_init(&wl->deferred_tx_queue);
5840 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5841 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5842 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5843 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5844 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5845 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5846 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5848 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5849 if (!wl->freezable_wq) {
5856 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5857 wl->band = IEEE80211_BAND_2GHZ;
5858 wl->channel_type = NL80211_CHAN_NO_HT;
5860 wl->sg_enabled = true;
5861 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5862 wl->recovery_count = 0;
5865 wl->ap_fw_ps_map = 0;
5867 wl->platform_quirks = 0;
5868 wl->system_hlid = WL12XX_SYSTEM_HLID;
5869 wl->active_sta_count = 0;
5870 wl->active_link_count = 0;
5872 init_waitqueue_head(&wl->fwlog_waitq);
5874 /* The system link is always allocated */
5875 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5877 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5878 for (i = 0; i < wl->num_tx_desc; i++)
5879 wl->tx_frames[i] = NULL;
5881 spin_lock_init(&wl->wl_lock);
5883 wl->state = WLCORE_STATE_OFF;
5884 wl->fw_type = WL12XX_FW_TYPE_NONE;
5885 mutex_init(&wl->mutex);
5886 mutex_init(&wl->flush_mutex);
5887 init_completion(&wl->nvs_loading_complete);
5889 order = get_order(aggr_buf_size);
5890 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5891 if (!wl->aggr_buf) {
5895 wl->aggr_buf_size = aggr_buf_size;
5897 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5898 if (!wl->dummy_packet) {
5903 /* Allocate one page for the FW log */
5904 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5907 goto err_dummy_packet;
5910 wl->mbox_size = mbox_size;
5911 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5917 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5918 if (!wl->buffer_32) {
5929 free_page((unsigned long)wl->fwlog);
5932 dev_kfree_skb(wl->dummy_packet);
5935 free_pages((unsigned long)wl->aggr_buf, order);
5938 destroy_workqueue(wl->freezable_wq);
5941 wl1271_debugfs_exit(wl);
5945 ieee80211_free_hw(hw);
5949 return ERR_PTR(ret);
5951 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5953 int wlcore_free_hw(struct wl1271 *wl)
5955 /* Unblock any fwlog readers */
5956 mutex_lock(&wl->mutex);
5957 wl->fwlog_size = -1;
5958 wake_up_interruptible_all(&wl->fwlog_waitq);
5959 mutex_unlock(&wl->mutex);
5961 wlcore_sysfs_free(wl);
5963 kfree(wl->buffer_32);
5965 free_page((unsigned long)wl->fwlog);
5966 dev_kfree_skb(wl->dummy_packet);
5967 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5969 wl1271_debugfs_exit(wl);
5973 wl->fw_type = WL12XX_FW_TYPE_NONE;
5977 kfree(wl->fw_status_1);
5978 kfree(wl->tx_res_if);
5979 destroy_workqueue(wl->freezable_wq);
5982 ieee80211_free_hw(wl->hw);
5986 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5989 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
5990 .flags = WIPHY_WOWLAN_ANY,
5991 .n_patterns = WL1271_MAX_RX_FILTERS,
5992 .pattern_min_len = 1,
5993 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
5997 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
5999 return IRQ_WAKE_THREAD;
6002 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6004 struct wl1271 *wl = context;
6005 struct platform_device *pdev = wl->pdev;
6006 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6007 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6008 unsigned long irqflags;
6010 irq_handler_t hardirq_fn = NULL;
6013 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6015 wl1271_error("Could not allocate nvs data");
6018 wl->nvs_len = fw->size;
6020 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6026 ret = wl->ops->setup(wl);
6030 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6032 /* adjust some runtime configuration parameters */
6033 wlcore_adjust_conf(wl);
6035 wl->irq = platform_get_irq(pdev, 0);
6036 wl->platform_quirks = pdata->platform_quirks;
6037 wl->if_ops = pdev_data->if_ops;
6039 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) {
6040 irqflags = IRQF_TRIGGER_RISING;
6041 hardirq_fn = wlcore_hardirq;
6043 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6046 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6047 irqflags, pdev->name, wl);
6049 wl1271_error("request_irq() failed: %d", ret);
6054 ret = enable_irq_wake(wl->irq);
6056 wl->irq_wake_enabled = true;
6057 device_init_wakeup(wl->dev, 1);
6058 if (pdata->pwr_in_suspend)
6059 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6062 disable_irq(wl->irq);
6064 ret = wl12xx_get_hw_info(wl);
6066 wl1271_error("couldn't get hw info");
6070 ret = wl->ops->identify_chip(wl);
6074 ret = wl1271_init_ieee80211(wl);
6078 ret = wl1271_register_hw(wl);
6082 ret = wlcore_sysfs_init(wl);
6086 wl->initialized = true;
6090 wl1271_unregister_hw(wl);
6093 free_irq(wl->irq, wl);
6099 release_firmware(fw);
6100 complete_all(&wl->nvs_loading_complete);
6103 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6107 if (!wl->ops || !wl->ptable)
6110 wl->dev = &pdev->dev;
6112 platform_set_drvdata(pdev, wl);
6114 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6115 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6118 wl1271_error("request_firmware_nowait failed: %d", ret);
6119 complete_all(&wl->nvs_loading_complete);
6124 EXPORT_SYMBOL_GPL(wlcore_probe);
6126 int wlcore_remove(struct platform_device *pdev)
6128 struct wl1271 *wl = platform_get_drvdata(pdev);
6130 wait_for_completion(&wl->nvs_loading_complete);
6131 if (!wl->initialized)
6134 if (wl->irq_wake_enabled) {
6135 device_init_wakeup(wl->dev, 0);
6136 disable_irq_wake(wl->irq);
6138 wl1271_unregister_hw(wl);
6139 free_irq(wl->irq, wl);
6144 EXPORT_SYMBOL_GPL(wlcore_remove);
6146 u32 wl12xx_debug_level = DEBUG_NONE;
6147 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6148 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6149 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6151 module_param_named(fwlog, fwlog_param, charp, 0);
6152 MODULE_PARM_DESC(fwlog,
6153 "FW logger options: continuous, ondemand, dbgpins or disable");
6155 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6156 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6158 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6159 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6161 MODULE_LICENSE("GPL");
6162 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6163 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6164 MODULE_FIRMWARE(WL12XX_NVS_NAME);