2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
20 struct ath9k_tx_queue_info *qi)
22 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask);
28 REG_WRITE(ah, AR_IMR_S0,
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
31 REG_WRITE(ah, AR_IMR_S1,
32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
34 REG_RMW_FIELD(ah, AR_IMR_S2,
35 AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
38 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
40 return REG_READ(ah, AR_QTXDP(q));
43 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
45 REG_WRITE(ah, AR_QTXDP(q), txdp);
48 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
50 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
51 "Enable TXE on queue: %u\n", q);
52 REG_WRITE(ah, AR_Q_TXE, 1 << q);
55 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
59 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
62 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
69 bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
71 u32 txcfg, curLevel, newLevel;
74 if (ah->tx_trig_level >= MAX_TX_FIFO_THRESHOLD)
77 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
79 txcfg = REG_READ(ah, AR_TXCFG);
80 curLevel = MS(txcfg, AR_FTRIG);
83 if (curLevel < MAX_TX_FIFO_THRESHOLD)
85 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
87 if (newLevel != curLevel)
88 REG_WRITE(ah, AR_TXCFG,
89 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
91 ath9k_hw_set_interrupts(ah, omask);
93 ah->tx_trig_level = newLevel;
95 return newLevel != curLevel;
98 bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
100 #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
101 #define ATH9K_TIME_QUANTUM 100 /* usec */
102 struct ath_common *common = ath9k_hw_common(ah);
103 struct ath9k_hw_capabilities *pCap = &ah->caps;
104 struct ath9k_tx_queue_info *qi;
106 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
108 if (q >= pCap->total_queues) {
109 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
110 "invalid queue: %u\n", q);
115 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
116 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
117 "inactive queue: %u\n", q);
121 REG_WRITE(ah, AR_Q_TXD, 1 << q);
123 for (wait = wait_time; wait != 0; wait--) {
124 if (ath9k_hw_numtxpending(ah, q) == 0)
126 udelay(ATH9K_TIME_QUANTUM);
129 if (ath9k_hw_numtxpending(ah, q)) {
130 ath_print(common, ATH_DBG_QUEUE,
131 "%s: Num of pending TX Frames %d on Q %d\n",
132 __func__, ath9k_hw_numtxpending(ah, q), q);
134 for (j = 0; j < 2; j++) {
135 tsfLow = REG_READ(ah, AR_TSF_L32);
136 REG_WRITE(ah, AR_QUIET2,
137 SM(10, AR_QUIET2_QUIET_DUR));
138 REG_WRITE(ah, AR_QUIET_PERIOD, 100);
139 REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
140 REG_SET_BIT(ah, AR_TIMER_MODE,
143 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
146 ath_print(common, ATH_DBG_QUEUE,
147 "TSF has moved while trying to set "
148 "quiet time TSF: 0x%08x\n", tsfLow);
151 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
154 REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
157 while (ath9k_hw_numtxpending(ah, q)) {
159 ath_print(common, ATH_DBG_QUEUE,
160 "Failed to stop TX DMA in 100 "
161 "msec after killing last frame\n");
164 udelay(ATH9K_TIME_QUANTUM);
167 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
170 REG_WRITE(ah, AR_Q_TXD, 0);
173 #undef ATH9K_TX_STOP_DMA_TIMEOUT
174 #undef ATH9K_TIME_QUANTUM
177 void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
178 u32 segLen, bool firstSeg,
179 bool lastSeg, const struct ath_desc *ds0)
181 struct ar5416_desc *ads = AR5416DESC(ds);
184 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
185 } else if (lastSeg) {
187 ads->ds_ctl1 = segLen;
188 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
189 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
192 ads->ds_ctl1 = segLen | AR_TxMore;
196 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
197 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
198 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
199 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
200 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
203 void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
205 struct ar5416_desc *ads = AR5416DESC(ds);
207 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
208 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
209 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
210 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
211 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
214 int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
216 struct ar5416_desc *ads = AR5416DESC(ds);
218 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
221 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
222 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
223 ds->ds_txstat.ts_status = 0;
224 ds->ds_txstat.ts_flags = 0;
226 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
227 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
228 if (ads->ds_txstatus1 & AR_Filtered)
229 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
230 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
231 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
232 ath9k_hw_updatetxtriglevel(ah, true);
234 if (ads->ds_txstatus9 & AR_TxOpExceeded)
235 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
236 if (ads->ds_txstatus1 & AR_TxTimerExpired)
237 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
239 if (ads->ds_txstatus1 & AR_DescCfgErr)
240 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
241 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
242 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
243 ath9k_hw_updatetxtriglevel(ah, true);
245 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
246 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
247 ath9k_hw_updatetxtriglevel(ah, true);
249 if (ads->ds_txstatus0 & AR_TxBaStatus) {
250 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
251 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
252 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
255 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
256 switch (ds->ds_txstat.ts_rateindex) {
258 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
261 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
264 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
267 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
271 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
272 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
273 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
274 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
275 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
276 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
277 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
278 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
279 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
280 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
281 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
282 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
283 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
284 ds->ds_txstat.ts_antenna = 0;
289 void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
290 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
291 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
293 struct ar5416_desc *ads = AR5416DESC(ds);
295 txPower += ah->txpower_indexoffset;
299 ads->ds_ctl0 = (pktLen & AR_FrameLen)
300 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
301 | SM(txPower, AR_XmitPower)
302 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
303 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
304 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
305 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
308 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
309 | SM(type, AR_FrameType)
310 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
311 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
312 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
314 ads->ds_ctl6 = SM(keyType, AR_EncrType);
316 if (AR_SREV_9285(ah)) {
324 void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
325 struct ath_desc *lastds,
326 u32 durUpdateEn, u32 rtsctsRate,
328 struct ath9k_11n_rate_series series[],
329 u32 nseries, u32 flags)
331 struct ar5416_desc *ads = AR5416DESC(ds);
332 struct ar5416_desc *last_ads = AR5416DESC(lastds);
335 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
336 ds_ctl0 = ads->ds_ctl0;
338 if (flags & ATH9K_TXDESC_RTSENA) {
339 ds_ctl0 &= ~AR_CTSEnable;
340 ds_ctl0 |= AR_RTSEnable;
342 ds_ctl0 &= ~AR_RTSEnable;
343 ds_ctl0 |= AR_CTSEnable;
346 ads->ds_ctl0 = ds_ctl0;
349 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
352 ads->ds_ctl2 = set11nTries(series, 0)
353 | set11nTries(series, 1)
354 | set11nTries(series, 2)
355 | set11nTries(series, 3)
356 | (durUpdateEn ? AR_DurUpdateEna : 0)
357 | SM(0, AR_BurstDur);
359 ads->ds_ctl3 = set11nRate(series, 0)
360 | set11nRate(series, 1)
361 | set11nRate(series, 2)
362 | set11nRate(series, 3);
364 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
365 | set11nPktDurRTSCTS(series, 1);
367 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
368 | set11nPktDurRTSCTS(series, 3);
370 ads->ds_ctl7 = set11nRateFlags(series, 0)
371 | set11nRateFlags(series, 1)
372 | set11nRateFlags(series, 2)
373 | set11nRateFlags(series, 3)
374 | SM(rtsctsRate, AR_RTSCTSRate);
375 last_ads->ds_ctl2 = ads->ds_ctl2;
376 last_ads->ds_ctl3 = ads->ds_ctl3;
379 void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
382 struct ar5416_desc *ads = AR5416DESC(ds);
384 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
385 ads->ds_ctl6 &= ~AR_AggrLen;
386 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
389 void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
392 struct ar5416_desc *ads = AR5416DESC(ds);
395 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
398 ctl6 &= ~AR_PadDelim;
399 ctl6 |= SM(numDelims, AR_PadDelim);
403 void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
405 struct ar5416_desc *ads = AR5416DESC(ds);
407 ads->ds_ctl1 |= AR_IsAggr;
408 ads->ds_ctl1 &= ~AR_MoreAggr;
409 ads->ds_ctl6 &= ~AR_PadDelim;
412 void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
414 struct ar5416_desc *ads = AR5416DESC(ds);
416 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
419 void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
422 struct ar5416_desc *ads = AR5416DESC(ds);
424 ads->ds_ctl2 &= ~AR_BurstDur;
425 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
428 void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
431 struct ar5416_desc *ads = AR5416DESC(ds);
434 ads->ds_ctl0 |= AR_VirtMoreFrag;
436 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
439 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
441 *txqs &= ah->intr_txqs;
442 ah->intr_txqs &= ~(*txqs);
445 bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
446 const struct ath9k_tx_queue_info *qinfo)
449 struct ath_common *common = ath9k_hw_common(ah);
450 struct ath9k_hw_capabilities *pCap = &ah->caps;
451 struct ath9k_tx_queue_info *qi;
453 if (q >= pCap->total_queues) {
454 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
455 "invalid queue: %u\n", q);
460 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
461 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
462 "inactive queue: %u\n", q);
466 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
468 qi->tqi_ver = qinfo->tqi_ver;
469 qi->tqi_subtype = qinfo->tqi_subtype;
470 qi->tqi_qflags = qinfo->tqi_qflags;
471 qi->tqi_priority = qinfo->tqi_priority;
472 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
473 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
475 qi->tqi_aifs = INIT_AIFS;
476 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
477 cw = min(qinfo->tqi_cwmin, 1024U);
479 while (qi->tqi_cwmin < cw)
480 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
482 qi->tqi_cwmin = qinfo->tqi_cwmin;
483 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
484 cw = min(qinfo->tqi_cwmax, 1024U);
486 while (qi->tqi_cwmax < cw)
487 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
489 qi->tqi_cwmax = INIT_CWMAX;
491 if (qinfo->tqi_shretry != 0)
492 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
494 qi->tqi_shretry = INIT_SH_RETRY;
495 if (qinfo->tqi_lgretry != 0)
496 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
498 qi->tqi_lgretry = INIT_LG_RETRY;
499 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
500 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
501 qi->tqi_burstTime = qinfo->tqi_burstTime;
502 qi->tqi_readyTime = qinfo->tqi_readyTime;
504 switch (qinfo->tqi_subtype) {
506 if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
507 qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
516 bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
517 struct ath9k_tx_queue_info *qinfo)
519 struct ath_common *common = ath9k_hw_common(ah);
520 struct ath9k_hw_capabilities *pCap = &ah->caps;
521 struct ath9k_tx_queue_info *qi;
523 if (q >= pCap->total_queues) {
524 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
525 "invalid queue: %u\n", q);
530 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
531 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
532 "inactive queue: %u\n", q);
536 qinfo->tqi_qflags = qi->tqi_qflags;
537 qinfo->tqi_ver = qi->tqi_ver;
538 qinfo->tqi_subtype = qi->tqi_subtype;
539 qinfo->tqi_qflags = qi->tqi_qflags;
540 qinfo->tqi_priority = qi->tqi_priority;
541 qinfo->tqi_aifs = qi->tqi_aifs;
542 qinfo->tqi_cwmin = qi->tqi_cwmin;
543 qinfo->tqi_cwmax = qi->tqi_cwmax;
544 qinfo->tqi_shretry = qi->tqi_shretry;
545 qinfo->tqi_lgretry = qi->tqi_lgretry;
546 qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
547 qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
548 qinfo->tqi_burstTime = qi->tqi_burstTime;
549 qinfo->tqi_readyTime = qi->tqi_readyTime;
554 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
555 const struct ath9k_tx_queue_info *qinfo)
557 struct ath_common *common = ath9k_hw_common(ah);
558 struct ath9k_tx_queue_info *qi;
559 struct ath9k_hw_capabilities *pCap = &ah->caps;
563 case ATH9K_TX_QUEUE_BEACON:
564 q = pCap->total_queues - 1;
566 case ATH9K_TX_QUEUE_CAB:
567 q = pCap->total_queues - 2;
569 case ATH9K_TX_QUEUE_PSPOLL:
572 case ATH9K_TX_QUEUE_UAPSD:
573 q = pCap->total_queues - 3;
575 case ATH9K_TX_QUEUE_DATA:
576 for (q = 0; q < pCap->total_queues; q++)
577 if (ah->txq[q].tqi_type ==
578 ATH9K_TX_QUEUE_INACTIVE)
580 if (q == pCap->total_queues) {
581 ath_print(common, ATH_DBG_FATAL,
582 "No available TX queue\n");
587 ath_print(common, ATH_DBG_FATAL,
588 "Invalid TX queue type: %u\n", type);
592 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
595 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
596 ath_print(common, ATH_DBG_FATAL,
597 "TX queue: %u already active\n", q);
600 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
604 TXQ_FLAG_TXOKINT_ENABLE
605 | TXQ_FLAG_TXERRINT_ENABLE
606 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
607 qi->tqi_aifs = INIT_AIFS;
608 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
609 qi->tqi_cwmax = INIT_CWMAX;
610 qi->tqi_shretry = INIT_SH_RETRY;
611 qi->tqi_lgretry = INIT_LG_RETRY;
612 qi->tqi_physCompBuf = 0;
614 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
615 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
621 bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
623 struct ath9k_hw_capabilities *pCap = &ah->caps;
624 struct ath_common *common = ath9k_hw_common(ah);
625 struct ath9k_tx_queue_info *qi;
627 if (q >= pCap->total_queues) {
628 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
629 "invalid queue: %u\n", q);
633 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
634 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
635 "inactive queue: %u\n", q);
639 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
641 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
642 ah->txok_interrupt_mask &= ~(1 << q);
643 ah->txerr_interrupt_mask &= ~(1 << q);
644 ah->txdesc_interrupt_mask &= ~(1 << q);
645 ah->txeol_interrupt_mask &= ~(1 << q);
646 ah->txurn_interrupt_mask &= ~(1 << q);
647 ath9k_hw_set_txq_interrupts(ah, qi);
652 bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
654 struct ath9k_hw_capabilities *pCap = &ah->caps;
655 struct ath_common *common = ath9k_hw_common(ah);
656 struct ath9k_channel *chan = ah->curchan;
657 struct ath9k_tx_queue_info *qi;
658 u32 cwMin, chanCwMin, value;
660 if (q >= pCap->total_queues) {
661 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
662 "invalid queue: %u\n", q);
667 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
668 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
669 "inactive queue: %u\n", q);
673 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
675 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
676 if (chan && IS_CHAN_B(chan))
677 chanCwMin = INIT_CWMIN_11B;
679 chanCwMin = INIT_CWMIN;
681 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
683 cwMin = qi->tqi_cwmin;
685 REG_WRITE(ah, AR_DLCL_IFS(q),
686 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
687 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
688 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
690 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
691 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
692 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
693 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
695 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
696 REG_WRITE(ah, AR_DMISC(q),
697 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
699 if (qi->tqi_cbrPeriod) {
700 REG_WRITE(ah, AR_QCBRCFG(q),
701 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
702 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
703 REG_WRITE(ah, AR_QMISC(q),
704 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
705 (qi->tqi_cbrOverflowLimit ?
706 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
708 if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
709 REG_WRITE(ah, AR_QRDYTIMECFG(q),
710 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
714 REG_WRITE(ah, AR_DCHNTIME(q),
715 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
716 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
718 if (qi->tqi_burstTime
719 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
720 REG_WRITE(ah, AR_QMISC(q),
721 REG_READ(ah, AR_QMISC(q)) |
722 AR_Q_MISC_RDYTIME_EXP_POLICY);
726 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
727 REG_WRITE(ah, AR_DMISC(q),
728 REG_READ(ah, AR_DMISC(q)) |
729 AR_D_MISC_POST_FR_BKOFF_DIS);
731 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
732 REG_WRITE(ah, AR_DMISC(q),
733 REG_READ(ah, AR_DMISC(q)) |
734 AR_D_MISC_FRAG_BKOFF_EN);
736 switch (qi->tqi_type) {
737 case ATH9K_TX_QUEUE_BEACON:
738 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
739 | AR_Q_MISC_FSP_DBA_GATED
740 | AR_Q_MISC_BEACON_USE
741 | AR_Q_MISC_CBR_INCR_DIS1);
743 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
744 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
745 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
746 | AR_D_MISC_BEACON_USE
747 | AR_D_MISC_POST_FR_BKOFF_DIS);
749 case ATH9K_TX_QUEUE_CAB:
750 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
751 | AR_Q_MISC_FSP_DBA_GATED
752 | AR_Q_MISC_CBR_INCR_DIS1
753 | AR_Q_MISC_CBR_INCR_DIS0);
754 value = (qi->tqi_readyTime -
755 (ah->config.sw_beacon_response_time -
756 ah->config.dma_beacon_response_time) -
757 ah->config.additional_swba_backoff) * 1024;
758 REG_WRITE(ah, AR_QRDYTIMECFG(q),
759 value | AR_Q_RDYTIMECFG_EN);
760 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
761 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
762 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
764 case ATH9K_TX_QUEUE_PSPOLL:
765 REG_WRITE(ah, AR_QMISC(q),
766 REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
768 case ATH9K_TX_QUEUE_UAPSD:
769 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
770 AR_D_MISC_POST_FR_BKOFF_DIS);
776 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
777 REG_WRITE(ah, AR_DMISC(q),
778 REG_READ(ah, AR_DMISC(q)) |
779 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
780 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
781 AR_D_MISC_POST_FR_BKOFF_DIS);
784 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
785 ah->txok_interrupt_mask |= 1 << q;
787 ah->txok_interrupt_mask &= ~(1 << q);
788 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
789 ah->txerr_interrupt_mask |= 1 << q;
791 ah->txerr_interrupt_mask &= ~(1 << q);
792 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
793 ah->txdesc_interrupt_mask |= 1 << q;
795 ah->txdesc_interrupt_mask &= ~(1 << q);
796 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
797 ah->txeol_interrupt_mask |= 1 << q;
799 ah->txeol_interrupt_mask &= ~(1 << q);
800 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
801 ah->txurn_interrupt_mask |= 1 << q;
803 ah->txurn_interrupt_mask &= ~(1 << q);
804 ath9k_hw_set_txq_interrupts(ah, qi);
809 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
810 u32 pa, struct ath_desc *nds, u64 tsf)
812 struct ar5416_desc ads;
813 struct ar5416_desc *adsp = AR5416DESC(ds);
816 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
819 ads.u.rx = adsp->u.rx;
821 ds->ds_rxstat.rs_status = 0;
822 ds->ds_rxstat.rs_flags = 0;
824 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
825 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
827 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
828 ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
829 ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
830 ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
831 ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
832 ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
833 ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
834 ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
836 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
837 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
839 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
841 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
843 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
845 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
847 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
850 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
851 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
853 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
855 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
856 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
858 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
859 ds->ds_rxstat.rs_moreaggr =
860 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
861 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
862 ds->ds_rxstat.rs_flags =
863 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
864 ds->ds_rxstat.rs_flags |=
865 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
867 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
868 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
869 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
870 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
871 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
872 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
874 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
875 if (ads.ds_rxstatus8 & AR_CRCErr)
876 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
877 else if (ads.ds_rxstatus8 & AR_PHYErr) {
878 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
879 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
880 ds->ds_rxstat.rs_phyerr = phyerr;
881 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
882 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
883 else if (ads.ds_rxstatus8 & AR_MichaelErr)
884 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
890 void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
893 struct ar5416_desc *ads = AR5416DESC(ds);
894 struct ath9k_hw_capabilities *pCap = &ah->caps;
896 ads->ds_ctl1 = size & AR_BufLen;
897 if (flags & ATH9K_RXDESC_INTREQ)
898 ads->ds_ctl1 |= AR_RxIntrReq;
900 ads->ds_rxstatus8 &= ~AR_RxDone;
901 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
902 memset(&(ads->u), 0, sizeof(ads->u));
905 bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
910 REG_SET_BIT(ah, AR_DIAG_SW,
911 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
913 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
914 0, AH_WAIT_TIMEOUT)) {
915 REG_CLR_BIT(ah, AR_DIAG_SW,
919 reg = REG_READ(ah, AR_OBS_BUS_1);
920 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
921 "RX failed to go idle in 10 ms RXSM=0x%x\n",
927 REG_CLR_BIT(ah, AR_DIAG_SW,
928 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
934 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
936 REG_WRITE(ah, AR_RXDP, rxdp);
939 void ath9k_hw_rxena(struct ath_hw *ah)
941 REG_WRITE(ah, AR_CR, AR_CR_RXE);
944 void ath9k_hw_startpcureceive(struct ath_hw *ah)
946 ath9k_enable_mib_counters(ah);
950 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
953 void ath9k_hw_stoppcurecv(struct ath_hw *ah)
955 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
957 ath9k_hw_disable_mib_counters(ah);
960 bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
962 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
963 #define AH_RX_TIME_QUANTUM 100 /* usec */
964 struct ath_common *common = ath9k_hw_common(ah);
967 REG_WRITE(ah, AR_CR, AR_CR_RXD);
969 /* Wait for rx enable bit to go low */
970 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
971 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
973 udelay(AH_TIME_QUANTUM);
977 ath_print(common, ATH_DBG_FATAL,
978 "DMA failed to stop in %d ms "
979 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
980 AH_RX_STOP_DMA_TIMEOUT / 1000,
982 REG_READ(ah, AR_DIAG_SW));
988 #undef AH_RX_TIME_QUANTUM
989 #undef AH_RX_STOP_DMA_TIMEOUT