2 Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
5 <http://rt2x00.serialmonkey.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the
19 Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 Abstract: rt2x00 queue specific routines.
28 #include <linux/slab.h>
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/dma-mapping.h>
34 #include "rt2x00lib.h"
36 struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
37 struct queue_entry *entry)
40 struct skb_frame_desc *skbdesc;
41 unsigned int frame_size;
42 unsigned int head_size = 0;
43 unsigned int tail_size = 0;
46 * The frame size includes descriptor size, because the
47 * hardware directly receive the frame into the skbuffer.
49 frame_size = entry->queue->data_size + entry->queue->desc_size;
52 * The payload should be aligned to a 4-byte boundary,
53 * this means we need at least 3 bytes for moving the frame
54 * into the correct offset.
59 * For IV/EIV/ICV assembly we must make sure there is
60 * at least 8 bytes bytes available in headroom for IV/EIV
61 * and 8 bytes for ICV data as tailroon.
63 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
71 skb = dev_alloc_skb(frame_size + head_size + tail_size);
76 * Make sure we not have a frame with the requested bytes
77 * available in the head and tail.
79 skb_reserve(skb, head_size);
80 skb_put(skb, frame_size);
85 skbdesc = get_skb_frame_desc(skb);
86 memset(skbdesc, 0, sizeof(*skbdesc));
87 skbdesc->entry = entry;
89 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
90 skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
94 skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
100 void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
102 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
105 dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
106 skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
108 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
110 void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
112 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
114 if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
115 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
117 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
120 if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
121 dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
123 skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
126 EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb);
128 void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
133 rt2x00queue_unmap_skb(rt2x00dev, skb);
134 dev_kfree_skb_any(skb);
137 void rt2x00queue_align_frame(struct sk_buff *skb)
139 unsigned int frame_length = skb->len;
140 unsigned int align = ALIGN_SIZE(skb, 0);
145 skb_push(skb, align);
146 memmove(skb->data, skb->data + align, frame_length);
147 skb_trim(skb, frame_length);
150 void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
152 unsigned int frame_length = skb->len;
153 unsigned int align = ALIGN_SIZE(skb, header_length);
158 skb_push(skb, align);
159 memmove(skb->data, skb->data + align, frame_length);
160 skb_trim(skb, frame_length);
163 void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
165 unsigned int payload_length = skb->len - header_length;
166 unsigned int header_align = ALIGN_SIZE(skb, 0);
167 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
168 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
171 * Adjust the header alignment if the payload needs to be moved more
174 if (payload_align > header_align)
177 /* There is nothing to do if no alignment is needed */
181 /* Reserve the amount of space needed in front of the frame */
182 skb_push(skb, header_align);
187 memmove(skb->data, skb->data + header_align, header_length);
189 /* Move the payload, if present and if required */
190 if (payload_length && payload_align)
191 memmove(skb->data + header_length + l2pad,
192 skb->data + header_length + l2pad + payload_align,
195 /* Trim the skb to the correct size */
196 skb_trim(skb, header_length + l2pad + payload_length);
199 void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
201 unsigned int l2pad = L2PAD_SIZE(header_length);
206 memmove(skb->data + l2pad, skb->data, header_length);
207 skb_pull(skb, l2pad);
210 static void rt2x00queue_create_tx_descriptor_seq(struct queue_entry *entry,
211 struct txentry_desc *txdesc)
213 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
214 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
215 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
216 unsigned long irqflags;
218 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) ||
219 unlikely(!tx_info->control.vif))
223 * Hardware should insert sequence counter.
224 * FIXME: We insert a software sequence counter first for
225 * hardware that doesn't support hardware sequence counting.
227 * This is wrong because beacons are not getting sequence
228 * numbers assigned properly.
230 * A secondary problem exists for drivers that cannot toggle
231 * sequence counting per-frame, since those will override the
232 * sequence counter given by mac80211.
234 spin_lock_irqsave(&intf->seqlock, irqflags);
236 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
238 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
239 hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
241 spin_unlock_irqrestore(&intf->seqlock, irqflags);
243 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
246 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry,
247 struct txentry_desc *txdesc,
248 const struct rt2x00_rate *hwrate)
250 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
251 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
252 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
253 unsigned int data_length;
254 unsigned int duration;
255 unsigned int residual;
257 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */
258 data_length = entry->skb->len + 4;
259 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb);
263 * Length calculation depends on OFDM/CCK rate.
265 txdesc->signal = hwrate->plcp;
266 txdesc->service = 0x04;
268 if (hwrate->flags & DEV_RATE_OFDM) {
269 txdesc->length_high = (data_length >> 6) & 0x3f;
270 txdesc->length_low = data_length & 0x3f;
273 * Convert length to microseconds.
275 residual = GET_DURATION_RES(data_length, hwrate->bitrate);
276 duration = GET_DURATION(data_length, hwrate->bitrate);
282 * Check if we need to set the Length Extension
284 if (hwrate->bitrate == 110 && residual <= 30)
285 txdesc->service |= 0x80;
288 txdesc->length_high = (duration >> 8) & 0xff;
289 txdesc->length_low = duration & 0xff;
292 * When preamble is enabled we should set the
293 * preamble bit for the signal.
295 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
296 txdesc->signal |= 0x08;
300 static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
301 struct txentry_desc *txdesc)
303 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
304 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
305 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
306 struct ieee80211_rate *rate =
307 ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
308 const struct rt2x00_rate *hwrate;
310 memset(txdesc, 0, sizeof(*txdesc));
313 * Initialize information from queue
315 txdesc->qid = entry->queue->qid;
316 txdesc->cw_min = entry->queue->cw_min;
317 txdesc->cw_max = entry->queue->cw_max;
318 txdesc->aifs = entry->queue->aifs;
321 * Header and frame information.
323 txdesc->length = entry->skb->len;
324 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
327 * Check whether this frame is to be acked.
329 if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
330 __set_bit(ENTRY_TXD_ACK, &txdesc->flags);
333 * Check if this is a RTS/CTS frame
335 if (ieee80211_is_rts(hdr->frame_control) ||
336 ieee80211_is_cts(hdr->frame_control)) {
337 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
338 if (ieee80211_is_rts(hdr->frame_control))
339 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
341 __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
342 if (tx_info->control.rts_cts_rate_idx >= 0)
344 ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
348 * Determine retry information.
350 txdesc->retry_limit = tx_info->control.rates[0].count - 1;
351 if (txdesc->retry_limit >= rt2x00dev->long_retry)
352 __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
355 * Check if more fragments are pending
357 if (ieee80211_has_morefrags(hdr->frame_control)) {
358 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
359 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
363 * Check if more frames (!= fragments) are pending
365 if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES)
366 __set_bit(ENTRY_TXD_BURST, &txdesc->flags);
369 * Beacons and probe responses require the tsf timestamp
370 * to be inserted into the frame, except for a frame that has been injected
371 * through a monitor interface. This latter is needed for testing a
374 if ((ieee80211_is_beacon(hdr->frame_control) ||
375 ieee80211_is_probe_resp(hdr->frame_control)) &&
376 (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
377 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
380 * Determine with what IFS priority this frame should be send.
381 * Set ifs to IFS_SIFS when the this is not the first fragment,
382 * or this fragment came after RTS/CTS.
384 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
385 !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
386 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
387 txdesc->ifs = IFS_BACKOFF;
389 txdesc->ifs = IFS_SIFS;
392 * Determine rate modulation.
394 hwrate = rt2x00_get_rate(rate->hw_value);
395 txdesc->rate_mode = RATE_MODE_CCK;
396 if (hwrate->flags & DEV_RATE_OFDM)
397 txdesc->rate_mode = RATE_MODE_OFDM;
400 * Apply TX descriptor handling by components
402 rt2x00crypto_create_tx_descriptor(entry, txdesc);
403 rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate);
404 rt2x00queue_create_tx_descriptor_seq(entry, txdesc);
405 rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate);
408 static int rt2x00queue_write_tx_data(struct queue_entry *entry,
409 struct txentry_desc *txdesc)
411 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
414 * This should not happen, we already checked the entry
415 * was ours. When the hardware disagrees there has been
416 * a queue corruption!
418 if (unlikely(rt2x00dev->ops->lib->get_entry_state &&
419 rt2x00dev->ops->lib->get_entry_state(entry))) {
421 "Corrupt queue %d, accessing entry which is not ours.\n"
422 "Please file bug report to %s.\n",
423 entry->queue->qid, DRV_PROJECT);
428 * Add the requested extra tx headroom in front of the skb.
430 skb_push(entry->skb, rt2x00dev->ops->extra_tx_headroom);
431 memset(entry->skb->data, 0, rt2x00dev->ops->extra_tx_headroom);
434 * Call the driver's write_tx_data function, if it exists.
436 if (rt2x00dev->ops->lib->write_tx_data)
437 rt2x00dev->ops->lib->write_tx_data(entry, txdesc);
440 * Map the skb to DMA.
442 if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
443 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
448 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
449 struct txentry_desc *txdesc)
451 struct data_queue *queue = entry->queue;
453 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc);
456 * All processing on the frame has been completed, this means
457 * it is now ready to be dumped to userspace through debugfs.
459 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
462 static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
463 struct txentry_desc *txdesc)
465 struct data_queue *queue = entry->queue;
466 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
469 * Check if we need to kick the queue, there are however a few rules
470 * 1) Don't kick unless this is the last in frame in a burst.
471 * When the burst flag is set, this frame is always followed
472 * by another frame which in some way are related to eachother.
473 * This is true for fragments, RTS or CTS-to-self frames.
474 * 2) Rule 1 can be broken when the available entries
475 * in the queue are less then a certain threshold.
477 if (rt2x00queue_threshold(queue) ||
478 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
479 rt2x00dev->ops->lib->kick_tx_queue(queue);
482 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
485 struct ieee80211_tx_info *tx_info;
486 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
487 struct txentry_desc txdesc;
488 struct skb_frame_desc *skbdesc;
489 u8 rate_idx, rate_flags;
491 if (unlikely(rt2x00queue_full(queue)))
494 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
495 ERROR(queue->rt2x00dev,
496 "Arrived at non-free entry in the non-full queue %d.\n"
497 "Please file bug report to %s.\n",
498 queue->qid, DRV_PROJECT);
503 * Copy all TX descriptor information into txdesc,
504 * after that we are free to use the skb->cb array
505 * for our information.
508 rt2x00queue_create_tx_descriptor(entry, &txdesc);
511 * All information is retrieved from the skb->cb array,
512 * now we should claim ownership of the driver part of that
513 * array, preserving the bitrate index and flags.
515 tx_info = IEEE80211_SKB_CB(skb);
516 rate_idx = tx_info->control.rates[0].idx;
517 rate_flags = tx_info->control.rates[0].flags;
518 skbdesc = get_skb_frame_desc(skb);
519 memset(skbdesc, 0, sizeof(*skbdesc));
520 skbdesc->entry = entry;
521 skbdesc->tx_rate_idx = rate_idx;
522 skbdesc->tx_rate_flags = rate_flags;
525 skbdesc->flags |= SKBDESC_NOT_MAC80211;
528 * When hardware encryption is supported, and this frame
529 * is to be encrypted, we should strip the IV/EIV data from
530 * the frame so we can provide it to the driver separately.
532 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
533 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
534 if (test_bit(DRIVER_REQUIRE_COPY_IV, &queue->rt2x00dev->flags))
535 rt2x00crypto_tx_copy_iv(skb, &txdesc);
537 rt2x00crypto_tx_remove_iv(skb, &txdesc);
541 * When DMA allocation is required we should guarentee to the
542 * driver that the DMA is aligned to a 4-byte boundary.
543 * However some drivers require L2 padding to pad the payload
544 * rather then the header. This could be a requirement for
545 * PCI and USB devices, while header alignment only is valid
548 if (test_bit(DRIVER_REQUIRE_L2PAD, &queue->rt2x00dev->flags))
549 rt2x00queue_insert_l2pad(entry->skb, txdesc.header_length);
550 else if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
551 rt2x00queue_align_frame(entry->skb);
554 * It could be possible that the queue was corrupted and this
555 * call failed. Since we always return NETDEV_TX_OK to mac80211,
556 * this frame will simply be dropped.
558 if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) {
559 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
564 set_bit(ENTRY_DATA_PENDING, &entry->flags);
566 rt2x00queue_index_inc(queue, Q_INDEX);
567 rt2x00queue_write_tx_descriptor(entry, &txdesc);
568 rt2x00queue_kick_tx_queue(entry, &txdesc);
573 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
574 struct ieee80211_vif *vif,
575 const bool enable_beacon)
577 struct rt2x00_intf *intf = vif_to_intf(vif);
578 struct skb_frame_desc *skbdesc;
579 struct txentry_desc txdesc;
581 if (unlikely(!intf->beacon))
584 mutex_lock(&intf->beacon_skb_mutex);
587 * Clean up the beacon skb.
589 rt2x00queue_free_skb(rt2x00dev, intf->beacon->skb);
590 intf->beacon->skb = NULL;
592 if (!enable_beacon) {
593 rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue);
594 mutex_unlock(&intf->beacon_skb_mutex);
598 intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
599 if (!intf->beacon->skb) {
600 mutex_unlock(&intf->beacon_skb_mutex);
605 * Copy all TX descriptor information into txdesc,
606 * after that we are free to use the skb->cb array
607 * for our information.
609 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
612 * Fill in skb descriptor
614 skbdesc = get_skb_frame_desc(intf->beacon->skb);
615 memset(skbdesc, 0, sizeof(*skbdesc));
616 skbdesc->entry = intf->beacon;
619 * Send beacon to hardware and enable beacon genaration..
621 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
623 mutex_unlock(&intf->beacon_skb_mutex);
628 void rt2x00queue_for_each_entry(struct data_queue *queue,
629 enum queue_index start,
630 enum queue_index end,
631 void (*fn)(struct queue_entry *entry))
633 unsigned long irqflags;
634 unsigned int index_start;
635 unsigned int index_end;
638 if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) {
639 ERROR(queue->rt2x00dev,
640 "Entry requested from invalid index range (%d - %d)\n",
646 * Only protect the range we are going to loop over,
647 * if during our loop a extra entry is set to pending
648 * it should not be kicked during this run, since it
649 * is part of another TX operation.
651 spin_lock_irqsave(&queue->lock, irqflags);
652 index_start = queue->index[start];
653 index_end = queue->index[end];
654 spin_unlock_irqrestore(&queue->lock, irqflags);
657 * Start from the TX done pointer, this guarentees that we will
658 * send out all frames in the correct order.
660 if (index_start < index_end) {
661 for (i = index_start; i < index_end; i++)
662 fn(&queue->entries[i]);
664 for (i = index_start; i < queue->limit; i++)
665 fn(&queue->entries[i]);
667 for (i = 0; i < index_end; i++)
668 fn(&queue->entries[i]);
671 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry);
673 struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
674 const enum data_queue_qid queue)
676 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
679 return rt2x00dev->rx;
681 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
682 return &rt2x00dev->tx[queue];
687 if (queue == QID_BEACON)
688 return &rt2x00dev->bcn[0];
689 else if (queue == QID_ATIM && atim)
690 return &rt2x00dev->bcn[1];
694 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
696 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
697 enum queue_index index)
699 struct queue_entry *entry;
700 unsigned long irqflags;
702 if (unlikely(index >= Q_INDEX_MAX)) {
703 ERROR(queue->rt2x00dev,
704 "Entry requested from invalid index type (%d)\n", index);
708 spin_lock_irqsave(&queue->lock, irqflags);
710 entry = &queue->entries[queue->index[index]];
712 spin_unlock_irqrestore(&queue->lock, irqflags);
716 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
718 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
720 unsigned long irqflags;
722 if (unlikely(index >= Q_INDEX_MAX)) {
723 ERROR(queue->rt2x00dev,
724 "Index change on invalid index type (%d)\n", index);
728 spin_lock_irqsave(&queue->lock, irqflags);
730 queue->index[index]++;
731 if (queue->index[index] >= queue->limit)
732 queue->index[index] = 0;
734 queue->last_action[index] = jiffies;
736 if (index == Q_INDEX) {
738 } else if (index == Q_INDEX_DONE) {
743 spin_unlock_irqrestore(&queue->lock, irqflags);
746 static void rt2x00queue_reset(struct data_queue *queue)
748 unsigned long irqflags;
751 spin_lock_irqsave(&queue->lock, irqflags);
756 for (i = 0; i < Q_INDEX_MAX; i++) {
758 queue->last_action[i] = jiffies;
761 spin_unlock_irqrestore(&queue->lock, irqflags);
764 void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
766 struct data_queue *queue;
768 txall_queue_for_each(rt2x00dev, queue)
769 rt2x00dev->ops->lib->kill_tx_queue(queue);
772 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
774 struct data_queue *queue;
777 queue_for_each(rt2x00dev, queue) {
778 rt2x00queue_reset(queue);
780 for (i = 0; i < queue->limit; i++) {
781 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
782 if (queue->qid == QID_RX)
783 rt2x00queue_index_inc(queue, Q_INDEX);
788 static int rt2x00queue_alloc_entries(struct data_queue *queue,
789 const struct data_queue_desc *qdesc)
791 struct queue_entry *entries;
792 unsigned int entry_size;
795 rt2x00queue_reset(queue);
797 queue->limit = qdesc->entry_num;
798 queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
799 queue->data_size = qdesc->data_size;
800 queue->desc_size = qdesc->desc_size;
803 * Allocate all queue entries.
805 entry_size = sizeof(*entries) + qdesc->priv_size;
806 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL);
810 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
811 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
812 ((__index) * (__psize)) )
814 for (i = 0; i < queue->limit; i++) {
815 entries[i].flags = 0;
816 entries[i].queue = queue;
817 entries[i].skb = NULL;
818 entries[i].entry_idx = i;
819 entries[i].priv_data =
820 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
821 sizeof(*entries), qdesc->priv_size);
824 #undef QUEUE_ENTRY_PRIV_OFFSET
826 queue->entries = entries;
831 static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
832 struct data_queue *queue)
839 for (i = 0; i < queue->limit; i++) {
840 if (queue->entries[i].skb)
841 rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
845 static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
846 struct data_queue *queue)
851 for (i = 0; i < queue->limit; i++) {
852 skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
855 queue->entries[i].skb = skb;
861 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
863 struct data_queue *queue;
866 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
870 tx_queue_for_each(rt2x00dev, queue) {
871 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
876 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
880 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
881 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
882 rt2x00dev->ops->atim);
887 status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
894 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
896 rt2x00queue_uninitialize(rt2x00dev);
901 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
903 struct data_queue *queue;
905 rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
907 queue_for_each(rt2x00dev, queue) {
908 kfree(queue->entries);
909 queue->entries = NULL;
913 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
914 struct data_queue *queue, enum data_queue_qid qid)
916 spin_lock_init(&queue->lock);
918 queue->rt2x00dev = rt2x00dev;
926 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
928 struct data_queue *queue;
929 enum data_queue_qid qid;
930 unsigned int req_atim =
931 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
934 * We need the following queues:
938 * Atim: 1 (if required)
940 rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
942 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL);
944 ERROR(rt2x00dev, "Queue allocation failed.\n");
949 * Initialize pointers
951 rt2x00dev->rx = queue;
952 rt2x00dev->tx = &queue[1];
953 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
956 * Initialize queue parameters.
958 * TX: qid = QID_AC_BE + index
959 * TX: cw_min: 2^5 = 32.
960 * TX: cw_max: 2^10 = 1024.
961 * BCN: qid = QID_BEACON
962 * ATIM: qid = QID_ATIM
964 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
967 tx_queue_for_each(rt2x00dev, queue)
968 rt2x00queue_init(rt2x00dev, queue, qid++);
970 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
972 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
977 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
979 kfree(rt2x00dev->rx);
980 rt2x00dev->rx = NULL;
981 rt2x00dev->tx = NULL;
982 rt2x00dev->bcn = NULL;