2 * This file is part of wl1271
4 * Copyright (C) 2009 Nokia Corporation
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
28 #include "wl1271_io.h"
29 #include "wl1271_reg.h"
30 #include "wl1271_ps.h"
31 #include "wl1271_tx.h"
33 static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
37 if (wl->tx_frames[i] == NULL) {
38 wl->tx_frames[i] = skb;
46 static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
48 struct wl1271_tx_hw_descr *desc;
49 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
53 /* allocate free identifier for the packet */
54 id = wl1271_tx_id(wl, skb);
58 /* approximate the number of blocks required for this packet
60 total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
61 total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
62 if (total_blocks <= wl->tx_blocks_available) {
63 desc = (struct wl1271_tx_hw_descr *)skb_push(
64 skb, total_len - skb->len);
66 desc->extra_mem_blocks = TX_HW_BLOCK_SPARE;
67 desc->total_mem_blocks = total_blocks;
70 wl->tx_blocks_available -= total_blocks;
74 wl1271_debug(DEBUG_TX,
75 "tx_allocate: size: %d, blocks: %d, id: %d",
76 total_len, total_blocks, id);
78 wl->tx_frames[id] = NULL;
85 static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
86 u32 extra, struct ieee80211_tx_info *control)
89 struct wl1271_tx_hw_descr *desc;
94 desc = (struct wl1271_tx_hw_descr *) skb->data;
96 /* relocate space for security header */
98 void *framestart = skb->data + sizeof(*desc);
99 u16 fc = *(u16 *)(framestart + extra);
100 int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
101 memmove(framestart, framestart + extra, hdrlen);
104 /* configure packet life time */
106 hosttime = (timespec_to_ns(&ts) >> 10);
107 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
108 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
110 /* configure the tx attributes */
111 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
114 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
115 desc->tid = wl1271_tx_ac_to_tid(ac);
117 desc->aid = TX_HW_DEFAULT_AID;
120 /* align the length (and store in terms of words) */
121 pad = WL1271_TX_ALIGN(skb->len);
122 desc->length = cpu_to_le16(pad >> 2);
124 /* calculate number of padding bytes */
125 pad = pad - skb->len;
126 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
128 /* if the packets are destined for AP (have a STA entry) send them
129 with AP rate policies, otherwise use default basic rates */
130 if (control->control.sta)
131 tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
133 desc->tx_attr = cpu_to_le16(tx_attr);
135 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
139 static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
140 struct ieee80211_tx_info *control)
143 struct wl1271_tx_hw_descr *desc;
146 /* FIXME: This is a workaround for getting non-aligned packets.
147 This happens at least with EAPOL packets from the user space.
148 Our DMA requires packets to be aligned on a 4-byte boundary.
150 if (unlikely((long)skb->data & 0x03)) {
151 int offset = (4 - (long)skb->data) & 0x03;
152 wl1271_debug(DEBUG_TX, "skb offset %d", offset);
154 /* check whether the current skb can be used */
155 if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
156 unsigned char *src = skb->data;
158 /* align the buffer on a 4-byte boundary */
159 skb_reserve(skb, offset);
160 memmove(skb->data, src, skb->len);
162 wl1271_info("No handler, fixme!");
167 len = WL1271_TX_ALIGN(skb->len);
169 /* perform a fixed address block write with the packet */
170 wl1271_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
172 /* write packet new counter into the write access register */
173 wl->tx_packets_count++;
175 desc = (struct wl1271_tx_hw_descr *) skb->data;
176 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
177 desc->id, skb, len, desc->length);
182 /* caller must hold wl->mutex */
183 static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
185 struct ieee80211_tx_info *info;
193 info = IEEE80211_SKB_CB(skb);
195 if (info->control.hw_key &&
196 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
197 extra = WL1271_TKIP_IV_SPACE;
199 if (info->control.hw_key) {
200 idx = info->control.hw_key->hw_key_idx;
202 /* FIXME: do we have to do this if we're not using WEP? */
203 if (unlikely(wl->default_key != idx)) {
204 ret = wl1271_cmd_set_default_wep_key(wl, idx);
207 wl->default_key = idx;
211 ret = wl1271_tx_allocate(wl, skb, extra);
215 ret = wl1271_tx_fill_hdr(wl, skb, extra, info);
219 ret = wl1271_tx_send_packet(wl, skb, info);
226 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
228 struct ieee80211_supported_band *band;
229 u32 enabled_rates = 0;
232 band = wl->hw->wiphy->bands[wl->band];
233 for (bit = 0; bit < band->n_bitrates; bit++) {
235 enabled_rates |= band->bitrates[bit].hw_value;
239 return enabled_rates;
242 void wl1271_tx_work(struct work_struct *work)
244 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
246 bool woken_up = false;
248 u32 prev_tx_packets_count;
251 /* check if the rates supported by the AP have changed */
252 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
255 spin_lock_irqsave(&wl->wl_lock, flags);
256 sta_rates = wl->sta_rate_set;
257 spin_unlock_irqrestore(&wl->wl_lock, flags);
260 mutex_lock(&wl->mutex);
262 if (unlikely(wl->state == WL1271_STATE_OFF))
265 prev_tx_packets_count = wl->tx_packets_count;
267 /* if rates have changed, re-configure the rate policy */
268 if (unlikely(sta_rates)) {
269 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
270 wl1271_acx_rate_policies(wl);
273 while ((skb = skb_dequeue(&wl->tx_queue))) {
275 ret = wl1271_ps_elp_wakeup(wl, false);
281 ret = wl1271_tx_frame(wl, skb);
283 /* firmware buffer is full, lets stop transmitting. */
284 skb_queue_head(&wl->tx_queue, skb);
286 } else if (ret < 0) {
293 /* interrupt the firmware with the new packets */
294 if (prev_tx_packets_count != wl->tx_packets_count)
295 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
299 wl1271_ps_elp_sleep(wl);
301 mutex_unlock(&wl->mutex);
304 static void wl1271_tx_complete_packet(struct wl1271 *wl,
305 struct wl1271_tx_hw_res_descr *result)
307 struct ieee80211_tx_info *info;
313 /* check for id legality */
314 if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
315 wl1271_warning("TX result illegal id: %d", id);
319 skb = wl->tx_frames[id];
320 info = IEEE80211_SKB_CB(skb);
322 /* update the TX status info */
323 if (result->status == TX_SUCCESS) {
324 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
325 info->flags |= IEEE80211_TX_STAT_ACK;
326 rate = wl1271_rate_to_idx(wl, result->rate_class_index);
327 retries = result->ack_failures;
328 } else if (result->status == TX_RETRY_EXCEEDED) {
329 wl->stats.excessive_retries++;
330 retries = result->ack_failures;
333 info->status.rates[0].idx = rate;
334 info->status.rates[0].count = retries;
335 info->status.rates[0].flags = 0;
336 info->status.ack_signal = -1;
338 wl->stats.retry_count += result->ack_failures;
340 /* update security sequence number */
341 wl->tx_security_seq += (result->lsb_security_sequence_number -
342 wl->tx_security_last_seq);
343 wl->tx_security_last_seq = result->lsb_security_sequence_number;
345 /* remove private header from packet */
346 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
348 /* remove TKIP header space if present */
349 if (info->control.hw_key &&
350 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
351 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
352 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
353 skb_pull(skb, WL1271_TKIP_IV_SPACE);
356 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
358 result->id, skb, result->ack_failures,
359 result->rate_class_index, result->status);
361 /* return the packet to the stack */
362 ieee80211_tx_status(wl->hw, skb);
363 wl->tx_frames[result->id] = NULL;
367 /* Called upon reception of a TX complete interrupt */
368 void wl1271_tx_complete(struct wl1271 *wl)
370 struct wl1271_acx_mem_map *memmap =
371 (struct wl1271_acx_mem_map *)wl->target_mem_map;
372 u32 count, fw_counter;
375 /* read the tx results from the chipset */
376 wl1271_read(wl, le32_to_cpu(memmap->tx_result),
377 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
378 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
380 /* write host counter to chipset (to ack) */
381 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
382 offsetof(struct wl1271_tx_hw_res_if,
383 tx_result_host_counter), fw_counter);
385 count = fw_counter - wl->tx_results_count;
386 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
388 /* verify that the result buffer is not getting overrun */
389 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
390 wl1271_warning("TX result overflow from chipset: %d", count);
392 /* process the results */
393 for (i = 0; i < count; i++) {
394 struct wl1271_tx_hw_res_descr *result;
395 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
397 /* process the packet */
398 result = &(wl->tx_res_if->tx_results_queue[offset]);
399 wl1271_tx_complete_packet(wl, result);
401 wl->tx_results_count++;
404 if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
405 skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
408 /* firmware buffer has space, restart queues */
409 wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
410 spin_lock_irqsave(&wl->wl_lock, flags);
411 ieee80211_wake_queues(wl->hw);
412 clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
413 spin_unlock_irqrestore(&wl->wl_lock, flags);
414 ieee80211_queue_work(wl->hw, &wl->tx_work);
418 /* caller must hold wl->mutex */
419 void wl1271_tx_reset(struct wl1271 *wl)
425 /* control->flags = 0; FIXME */
427 while ((skb = skb_dequeue(&wl->tx_queue))) {
428 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
429 ieee80211_tx_status(wl->hw, skb);
432 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
433 if (wl->tx_frames[i] != NULL) {
434 skb = wl->tx_frames[i];
435 wl->tx_frames[i] = NULL;
436 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
437 ieee80211_tx_status(wl->hw, skb);
439 wl->tx_frames_cnt = 0;
442 #define WL1271_TX_FLUSH_TIMEOUT 500000
444 /* caller must *NOT* hold wl->mutex */
445 void wl1271_tx_flush(struct wl1271 *wl)
447 unsigned long timeout;
448 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
450 while (!time_after(jiffies, timeout)) {
451 mutex_lock(&wl->mutex);
452 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
454 if ((wl->tx_frames_cnt == 0) &&
455 skb_queue_empty(&wl->tx_queue)) {
456 mutex_unlock(&wl->mutex);
459 mutex_unlock(&wl->mutex);
463 wl1271_warning("Unable to flush all TX buffers, timed out.");