2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 /********************************************\
20 Queue Control Unit, DFS Control Unit Functions
21 \********************************************/
29 * Get properties for a transmit queue
31 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
32 struct ath5k_txq_info *queue_info)
34 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
39 * Set properties for a transmit queue
41 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
42 const struct ath5k_txq_info *queue_info)
44 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
46 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
49 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
51 /*XXX: Is this supported on 5210 ?*/
52 if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
53 ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
54 (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
55 queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
56 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
62 * Initialize a transmit queue
64 int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
65 struct ath5k_txq_info *queue_info)
73 /*5210 only has 2 queues*/
74 if (ah->ah_version == AR5K_AR5210) {
76 case AR5K_TX_QUEUE_DATA:
77 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
79 case AR5K_TX_QUEUE_BEACON:
80 case AR5K_TX_QUEUE_CAB:
81 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
88 case AR5K_TX_QUEUE_DATA:
89 for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
90 ah->ah_txq[queue].tqi_type !=
91 AR5K_TX_QUEUE_INACTIVE; queue++) {
93 if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
97 case AR5K_TX_QUEUE_UAPSD:
98 queue = AR5K_TX_QUEUE_ID_UAPSD;
100 case AR5K_TX_QUEUE_BEACON:
101 queue = AR5K_TX_QUEUE_ID_BEACON;
103 case AR5K_TX_QUEUE_CAB:
104 queue = AR5K_TX_QUEUE_ID_CAB;
106 case AR5K_TX_QUEUE_XR_DATA:
107 if (ah->ah_version != AR5K_AR5212)
109 "XR data queues only supported in"
111 queue = AR5K_TX_QUEUE_ID_XR_DATA;
119 * Setup internal queue structure
121 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
122 ah->ah_txq[queue].tqi_type = queue_type;
124 if (queue_info != NULL) {
125 queue_info->tqi_type = queue_type;
126 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
132 * We use ah_txq_status to hold a temp value for
133 * the Secondary interrupt mask registers on 5211+
134 * check out ath5k_hw_reset_tx_queue
136 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
142 * Get number of pending frames
143 * for a specific queue [5211+]
145 u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
148 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
150 /* Return if queue is declared inactive */
151 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
154 /* XXX: How about AR5K_CFG_TXCNT ? */
155 if (ah->ah_version == AR5K_AR5210)
158 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
159 pending &= AR5K_QCU_STS_FRMPENDCNT;
161 /* It's possible to have no frames pending even if TXE
162 * is set. To indicate that q has not stopped return
164 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
171 * Set a transmit queue inactive
173 void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
175 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
178 /* This queue will be skipped in further operations */
179 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
181 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
185 * Set DFS properties for a transmit queue on DCU
187 int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
189 u32 cw_min, cw_max, retry_lg, retry_sh;
190 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
192 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
194 tq = &ah->ah_txq[queue];
196 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
199 if (ah->ah_version == AR5K_AR5210) {
200 /* Only handle data queues, others will be ignored */
201 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
205 ath5k_hw_reg_write(ah, ah->ah_turbo ?
206 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
208 /* Set ACK_CTS timeout */
209 ath5k_hw_reg_write(ah, ah->ah_turbo ?
210 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
211 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
212 /* Set Transmit Latency */
213 ath5k_hw_reg_write(ah, ah->ah_turbo ?
214 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
215 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
219 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
220 (ah->ah_aifs + tq->tqi_aifs) *
221 AR5K_INIT_SLOT_TIME_TURBO) <<
222 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
225 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
226 (ah->ah_aifs + tq->tqi_aifs) *
227 AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
228 AR5K_INIT_SIFS, AR5K_IFS0);
232 ath5k_hw_reg_write(ah, ah->ah_turbo ?
233 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
234 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
235 /* Set AR5K_PHY_SETTLING */
236 ath5k_hw_reg_write(ah, ah->ah_turbo ?
237 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
239 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
242 /* Set Frame Control Register */
243 ath5k_hw_reg_write(ah, ah->ah_turbo ?
244 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
245 AR5K_PHY_TURBO_SHORT | 0x2020) :
246 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
247 AR5K_PHY_FRAME_CTL_5210);
251 * Calculate cwmin/max by channel mode
253 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
254 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
255 ah->ah_aifs = AR5K_TUNE_AIFS;
256 /*XR is only supported on 5212*/
257 if (IS_CHAN_XR(ah->ah_current_channel) &&
258 ah->ah_version == AR5K_AR5212) {
259 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
260 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
261 ah->ah_aifs = AR5K_TUNE_AIFS_XR;
262 /*B mode is not supported on 5210*/
263 } else if (IS_CHAN_B(ah->ah_current_channel) &&
264 ah->ah_version != AR5K_AR5210) {
265 cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
266 cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
267 ah->ah_aifs = AR5K_TUNE_AIFS_11B;
271 while (cw_min < ah->ah_cw_min)
272 cw_min = (cw_min << 1) | 1;
274 cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
275 ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
276 cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
277 ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
280 * Calculate and set retry limits
282 if (ah->ah_software_retry) {
283 /* XXX Need to test this */
284 retry_lg = ah->ah_limit_tx_retries;
285 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
286 AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
288 retry_lg = AR5K_INIT_LG_RETRY;
289 retry_sh = AR5K_INIT_SH_RETRY;
292 /*No QCU/DCU [5210]*/
293 if (ah->ah_version == AR5K_AR5210) {
294 ath5k_hw_reg_write(ah,
295 (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
296 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
297 AR5K_NODCU_RETRY_LMT_SLG_RETRY)
298 | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
299 AR5K_NODCU_RETRY_LMT_SSH_RETRY)
300 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
301 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
302 AR5K_NODCU_RETRY_LMT);
305 ath5k_hw_reg_write(ah,
306 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
307 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
308 AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
309 AR5K_DCU_RETRY_LMT_SSH_RETRY) |
310 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
311 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
312 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
314 /*===Rest is also for QCU/DCU only [5211+]===*/
317 * Set initial content window (cw_min/cw_max)
318 * and arbitrated interframe space (aifs)...
320 ath5k_hw_reg_write(ah,
321 AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
322 AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
323 AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
324 AR5K_DCU_LCL_IFS_AIFS),
325 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
330 /* Enable DCU early termination for this queue */
331 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
332 AR5K_QCU_MISC_DCU_EARLY);
334 /* Enable DCU to wait for next fragment from QCU */
335 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
336 AR5K_DCU_MISC_FRAG_WAIT);
338 /* On Maui and Spirit use the global seqnum on DCU */
339 if (ah->ah_mac_version < AR5K_SREV_AR5211)
340 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
341 AR5K_DCU_MISC_SEQNUM_CTL);
343 if (tq->tqi_cbr_period) {
344 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
345 AR5K_QCU_CBRCFG_INTVAL) |
346 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
347 AR5K_QCU_CBRCFG_ORN_THRES),
348 AR5K_QUEUE_CBRCFG(queue));
349 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
350 AR5K_QCU_MISC_FRSHED_CBR);
351 if (tq->tqi_cbr_overflow_limit)
352 AR5K_REG_ENABLE_BITS(ah,
353 AR5K_QUEUE_MISC(queue),
354 AR5K_QCU_MISC_CBR_THRES_ENABLE);
357 if (tq->tqi_ready_time &&
358 (tq->tqi_type != AR5K_TX_QUEUE_CAB))
359 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
360 AR5K_QCU_RDYTIMECFG_INTVAL) |
361 AR5K_QCU_RDYTIMECFG_ENABLE,
362 AR5K_QUEUE_RDYTIMECFG(queue));
364 if (tq->tqi_burst_time) {
365 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
366 AR5K_DCU_CHAN_TIME_DUR) |
367 AR5K_DCU_CHAN_TIME_ENABLE,
368 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
371 & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
372 AR5K_REG_ENABLE_BITS(ah,
373 AR5K_QUEUE_MISC(queue),
374 AR5K_QCU_MISC_RDY_VEOL_POLICY);
377 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
378 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
379 AR5K_QUEUE_DFS_MISC(queue));
381 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
382 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
383 AR5K_QUEUE_DFS_MISC(queue));
386 * Set registers by queue type
388 switch (tq->tqi_type) {
389 case AR5K_TX_QUEUE_BEACON:
390 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
391 AR5K_QCU_MISC_FRSHED_DBA_GT |
392 AR5K_QCU_MISC_CBREXP_BCN_DIS |
393 AR5K_QCU_MISC_BCN_ENABLE);
395 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
396 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
397 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
398 AR5K_DCU_MISC_ARBLOCK_IGNORE |
399 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
400 AR5K_DCU_MISC_BCN_ENABLE);
403 case AR5K_TX_QUEUE_CAB:
404 /* XXX: use BCN_SENT_GT, if we can figure out how */
405 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
406 AR5K_QCU_MISC_FRSHED_DBA_GT |
407 AR5K_QCU_MISC_CBREXP_DIS |
408 AR5K_QCU_MISC_CBREXP_BCN_DIS);
410 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
411 (AR5K_TUNE_SW_BEACON_RESP -
412 AR5K_TUNE_DMA_BEACON_RESP) -
413 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
414 AR5K_QCU_RDYTIMECFG_ENABLE,
415 AR5K_QUEUE_RDYTIMECFG(queue));
417 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
418 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
419 AR5K_DCU_MISC_ARBLOCK_CTL_S));
422 case AR5K_TX_QUEUE_UAPSD:
423 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
424 AR5K_QCU_MISC_CBREXP_DIS);
427 case AR5K_TX_QUEUE_DATA:
432 /* TODO: Handle frame compression */
435 * Enable interrupts for this tx queue
436 * in the secondary interrupt mask registers
438 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
439 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
441 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
442 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
444 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
445 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
447 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
448 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
450 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
451 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
453 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
454 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
456 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
457 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
459 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
460 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
462 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
463 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
465 /* Update secondary interrupt mask registers */
467 /* Filter out inactive queues */
468 ah->ah_txq_imr_txok &= ah->ah_txq_status;
469 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
470 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
471 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
472 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
473 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
474 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
475 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
476 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
478 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
479 AR5K_SIMR0_QCU_TXOK) |
480 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
481 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
482 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
483 AR5K_SIMR1_QCU_TXERR) |
484 AR5K_REG_SM(ah->ah_txq_imr_txeol,
485 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
486 /* Update simr2 but don't overwrite rest simr2 settings */
487 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
488 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
489 AR5K_REG_SM(ah->ah_txq_imr_txurn,
490 AR5K_SIMR2_QCU_TXURN));
491 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
492 AR5K_SIMR3_QCBRORN) |
493 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
494 AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
495 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
496 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
497 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
498 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
499 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
500 /* No queue has TXNOFRM enabled, disable the interrupt
501 * by setting AR5K_TXNOFRM to zero */
502 if (ah->ah_txq_imr_nofrm == 0)
503 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
505 /* Set QCU mask for this DCU to save power */
506 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
513 * Set slot time on DCU
515 int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
517 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
519 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
522 if (ah->ah_version == AR5K_AR5210)
523 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
525 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);