2 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 * Purpose: driver entry for initial, open, close, tx and rx.
29 * vt6655_probe - module initial (insmod) driver entry
30 * vt6655_remove - module remove entry
31 * vt6655_init_info - device structure resource allocation function
32 * device_free_info - device structure resource free function
33 * device_get_pci_info - get allocated pci io/mem resource
34 * device_print_info - print out resource
35 * device_rx_srv - rx service function
36 * device_alloc_rx_buf - rx buffer pre-allocated function
37 * device_free_tx_buf - free tx buffer function
38 * device_init_rd0_ring- initial rd dma0 ring
39 * device_init_rd1_ring- initial rd dma1 ring
40 * device_init_td0_ring- initial tx dma0 ring buffer
41 * device_init_td1_ring- initial tx dma1 ring buffer
42 * device_init_registers- initial MAC & BBP & RF internal registers.
43 * device_init_rings- initial tx/rx ring buffer
44 * device_free_rings- free all allocated ring buffer
45 * device_tx_srv- tx interrupt service function
51 #include <linux/file.h>
61 #include <linux/delay.h>
62 #include <linux/kthread.h>
63 #include <linux/slab.h>
65 /*--------------------- Static Definitions -------------------------*/
67 * Define module options
69 MODULE_AUTHOR("VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>");
70 MODULE_LICENSE("GPL");
71 MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
73 #define DEVICE_PARAM(N, D)
75 #define RX_DESC_MIN0 16
76 #define RX_DESC_MAX0 128
77 #define RX_DESC_DEF0 32
78 DEVICE_PARAM(RxDescriptors0, "Number of receive descriptors0");
80 #define RX_DESC_MIN1 16
81 #define RX_DESC_MAX1 128
82 #define RX_DESC_DEF1 32
83 DEVICE_PARAM(RxDescriptors1, "Number of receive descriptors1");
85 #define TX_DESC_MIN0 16
86 #define TX_DESC_MAX0 128
87 #define TX_DESC_DEF0 32
88 DEVICE_PARAM(TxDescriptors0, "Number of transmit descriptors0");
90 #define TX_DESC_MIN1 16
91 #define TX_DESC_MAX1 128
92 #define TX_DESC_DEF1 64
93 DEVICE_PARAM(TxDescriptors1, "Number of transmit descriptors1");
95 #define INT_WORKS_DEF 20
96 #define INT_WORKS_MIN 10
97 #define INT_WORKS_MAX 64
99 DEVICE_PARAM(int_works, "Number of packets per interrupt services");
101 #define RTS_THRESH_DEF 2347
103 #define FRAG_THRESH_DEF 2346
105 #define SHORT_RETRY_MIN 0
106 #define SHORT_RETRY_MAX 31
107 #define SHORT_RETRY_DEF 8
109 DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
111 #define LONG_RETRY_MIN 0
112 #define LONG_RETRY_MAX 15
113 #define LONG_RETRY_DEF 4
115 DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
117 /* BasebandType[] baseband type selected
118 0: indicate 802.11a type
119 1: indicate 802.11b type
120 2: indicate 802.11g type
122 #define BBP_TYPE_MIN 0
123 #define BBP_TYPE_MAX 2
124 #define BBP_TYPE_DEF 2
126 DEVICE_PARAM(BasebandType, "baseband type");
129 * Static vars definitions
131 static const struct pci_device_id vt6655_pci_id_table[] = {
132 { PCI_VDEVICE(VIA, 0x3253) },
136 /*--------------------- Static Functions --------------------------*/
138 static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
139 static void vt6655_init_info(struct pci_dev *pcid,
140 struct vnt_private **ppDevice);
141 static void device_free_info(struct vnt_private *pDevice);
142 static bool device_get_pci_info(struct vnt_private *, struct pci_dev *pcid);
143 static void device_print_info(struct vnt_private *pDevice);
145 static void device_init_rd0_ring(struct vnt_private *pDevice);
146 static void device_init_rd1_ring(struct vnt_private *pDevice);
147 static void device_init_td0_ring(struct vnt_private *pDevice);
148 static void device_init_td1_ring(struct vnt_private *pDevice);
150 static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx);
151 static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx);
152 static bool device_alloc_rx_buf(struct vnt_private *, struct vnt_rx_desc *);
153 static void device_init_registers(struct vnt_private *pDevice);
154 static void device_free_tx_buf(struct vnt_private *, struct vnt_tx_desc *);
155 static void device_free_td0_ring(struct vnt_private *pDevice);
156 static void device_free_td1_ring(struct vnt_private *pDevice);
157 static void device_free_rd0_ring(struct vnt_private *pDevice);
158 static void device_free_rd1_ring(struct vnt_private *pDevice);
159 static void device_free_rings(struct vnt_private *pDevice);
161 /*--------------------- Export Variables --------------------------*/
163 /*--------------------- Export Functions --------------------------*/
165 static void vt6655_remove(struct pci_dev *pcid)
167 struct vnt_private *pDevice = pci_get_drvdata(pcid);
171 device_free_info(pDevice);
174 static void device_get_options(struct vnt_private *pDevice)
176 POPTIONS pOpts = &(pDevice->sOpts);
178 pOpts->nRxDescs0 = RX_DESC_DEF0;
179 pOpts->nRxDescs1 = RX_DESC_DEF1;
180 pOpts->nTxDescs[0] = TX_DESC_DEF0;
181 pOpts->nTxDescs[1] = TX_DESC_DEF1;
182 pOpts->int_works = INT_WORKS_DEF;
184 pOpts->short_retry = SHORT_RETRY_DEF;
185 pOpts->long_retry = LONG_RETRY_DEF;
186 pOpts->bbp_type = BBP_TYPE_DEF;
190 device_set_options(struct vnt_private *pDevice)
192 pDevice->byShortRetryLimit = pDevice->sOpts.short_retry;
193 pDevice->byLongRetryLimit = pDevice->sOpts.long_retry;
194 pDevice->byBBType = pDevice->sOpts.bbp_type;
195 pDevice->byPacketType = pDevice->byBBType;
196 pDevice->byAutoFBCtrl = AUTO_FB_0;
197 pDevice->bUpdateBBVGA = true;
198 pDevice->byPreambleType = 0;
200 pr_debug(" byShortRetryLimit= %d\n", (int)pDevice->byShortRetryLimit);
201 pr_debug(" byLongRetryLimit= %d\n", (int)pDevice->byLongRetryLimit);
202 pr_debug(" byPreambleType= %d\n", (int)pDevice->byPreambleType);
203 pr_debug(" byShortPreamble= %d\n", (int)pDevice->byShortPreamble);
204 pr_debug(" byBBType= %d\n", (int)pDevice->byBBType);
208 * Initialisation of MAC & BBP registers
211 static void device_init_registers(struct vnt_private *pDevice)
215 unsigned char byValue;
216 unsigned char byCCKPwrdBm = 0;
217 unsigned char byOFDMPwrdBm = 0;
219 MACbShutdown(pDevice->PortOffset);
220 BBvSoftwareReset(pDevice);
222 /* Do MACbSoftwareReset in MACvInitialize */
223 MACbSoftwareReset(pDevice->PortOffset);
225 pDevice->bAES = false;
227 /* Only used in 11g type, sync with ERP IE */
228 pDevice->bProtectMode = false;
230 pDevice->bNonERPPresent = false;
231 pDevice->bBarkerPreambleMd = false;
232 pDevice->wCurrentRate = RATE_1M;
233 pDevice->byTopOFDMBasicRate = RATE_24M;
234 pDevice->byTopCCKBasicRate = RATE_1M;
236 /* Target to IF pin while programming to RF chip. */
237 pDevice->byRevId = 0;
240 MACvInitialize(pDevice->PortOffset);
243 VNSvInPortB(pDevice->PortOffset + MAC_REG_LOCALID, &pDevice->byLocalID);
245 spin_lock_irqsave(&pDevice->lock, flags);
247 SROMvReadAllContents(pDevice->PortOffset, pDevice->abyEEPROM);
249 spin_unlock_irqrestore(&pDevice->lock, flags);
251 /* Get Channel range */
252 pDevice->byMinChannel = 1;
253 pDevice->byMaxChannel = CB_MAX_CHANNEL;
256 byValue = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
257 if (byValue & EEP_ANTINV)
258 pDevice->bTxRxAntInv = true;
260 pDevice->bTxRxAntInv = false;
262 byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
263 /* if not set default is All */
265 byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
267 if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
268 pDevice->byAntennaCount = 2;
269 pDevice->byTxAntennaMode = ANT_B;
270 pDevice->dwTxAntennaSel = 1;
271 pDevice->dwRxAntennaSel = 1;
273 if (pDevice->bTxRxAntInv)
274 pDevice->byRxAntennaMode = ANT_A;
276 pDevice->byRxAntennaMode = ANT_B;
278 pDevice->byAntennaCount = 1;
279 pDevice->dwTxAntennaSel = 0;
280 pDevice->dwRxAntennaSel = 0;
282 if (byValue & EEP_ANTENNA_AUX) {
283 pDevice->byTxAntennaMode = ANT_A;
285 if (pDevice->bTxRxAntInv)
286 pDevice->byRxAntennaMode = ANT_B;
288 pDevice->byRxAntennaMode = ANT_A;
290 pDevice->byTxAntennaMode = ANT_B;
292 if (pDevice->bTxRxAntInv)
293 pDevice->byRxAntennaMode = ANT_A;
295 pDevice->byRxAntennaMode = ANT_B;
299 /* Set initial antenna mode */
300 BBvSetTxAntennaMode(pDevice, pDevice->byTxAntennaMode);
301 BBvSetRxAntennaMode(pDevice, pDevice->byRxAntennaMode);
303 /* zonetype initial */
304 pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
306 if (!pDevice->bZoneRegExist)
307 pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
309 pr_debug("pDevice->byZoneType = %x\n", pDevice->byZoneType);
314 /* Get Desire Power Value */
315 pDevice->byCurPwr = 0xFF;
316 pDevice->byCCKPwr = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_PWR_CCK);
317 pDevice->byOFDMPwrG = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_PWR_OFDMG);
319 /* Load power Table */
320 for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
321 pDevice->abyCCKPwrTbl[ii + 1] =
322 SROMbyReadEmbedded(pDevice->PortOffset,
323 (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
324 if (pDevice->abyCCKPwrTbl[ii + 1] == 0)
325 pDevice->abyCCKPwrTbl[ii+1] = pDevice->byCCKPwr;
327 pDevice->abyOFDMPwrTbl[ii + 1] =
328 SROMbyReadEmbedded(pDevice->PortOffset,
329 (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
330 if (pDevice->abyOFDMPwrTbl[ii + 1] == 0)
331 pDevice->abyOFDMPwrTbl[ii + 1] = pDevice->byOFDMPwrG;
333 pDevice->abyCCKDefaultPwr[ii + 1] = byCCKPwrdBm;
334 pDevice->abyOFDMDefaultPwr[ii + 1] = byOFDMPwrdBm;
337 /* recover 12,13 ,14channel for EUROPE by 11 channel */
338 for (ii = 11; ii < 14; ii++) {
339 pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
340 pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
343 /* Load OFDM A Power Table */
344 for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
345 pDevice->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] =
346 SROMbyReadEmbedded(pDevice->PortOffset,
347 (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
349 pDevice->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] =
350 SROMbyReadEmbedded(pDevice->PortOffset,
351 (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
354 if (pDevice->byLocalID > REV_ID_VT3253_B1) {
355 MACvSelectPage1(pDevice->PortOffset);
357 VNSvOutPortB(pDevice->PortOffset + MAC_REG_MSRCTL + 1,
358 (MSRCTL1_TXPWR | MSRCTL1_CSAPAREN));
360 MACvSelectPage0(pDevice->PortOffset);
363 /* use relative tx timeout and 802.11i D4 */
364 MACvWordRegBitsOn(pDevice->PortOffset,
365 MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
367 /* set performance parameter by registry */
368 MACvSetShortRetryLimit(pDevice->PortOffset, pDevice->byShortRetryLimit);
369 MACvSetLongRetryLimit(pDevice->PortOffset, pDevice->byLongRetryLimit);
371 /* reset TSF counter */
372 VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
373 /* enable TSF counter */
374 VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
376 /* initialize BBP registers */
377 BBbVT3253Init(pDevice);
379 if (pDevice->bUpdateBBVGA) {
380 pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
381 pDevice->byBBVGANew = pDevice->byBBVGACurrent;
382 BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
385 BBvSetRxAntennaMode(pDevice, pDevice->byRxAntennaMode);
386 BBvSetTxAntennaMode(pDevice, pDevice->byTxAntennaMode);
388 /* Set BB and packet type at the same time. */
389 /* Set Short Slot Time, xIFS, and RSPINF. */
390 pDevice->wCurrentRate = RATE_54M;
392 pDevice->bRadioOff = false;
394 pDevice->byRadioCtl = SROMbyReadEmbedded(pDevice->PortOffset,
396 pDevice->bHWRadioOff = false;
398 if (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) {
400 MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO);
402 if (((pDevice->byGPIO & GPIO0_DATA) &&
403 !(pDevice->byRadioCtl & EEP_RADIOCTL_INV)) ||
404 (!(pDevice->byGPIO & GPIO0_DATA) &&
405 (pDevice->byRadioCtl & EEP_RADIOCTL_INV)))
406 pDevice->bHWRadioOff = true;
409 if (pDevice->bHWRadioOff || pDevice->bRadioControlOff)
410 CARDbRadioPowerOff(pDevice);
412 /* get Permanent network address */
413 SROMvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
414 pr_debug("Network address = %pM\n", pDevice->abyCurrentNetAddr);
416 /* reset Tx pointer */
417 CARDvSafeResetRx(pDevice);
418 /* reset Rx pointer */
419 CARDvSafeResetTx(pDevice);
421 if (pDevice->byLocalID <= REV_ID_VT3253_A1)
422 MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_WPAERR);
425 MACvReceive0(pDevice->PortOffset);
426 MACvReceive1(pDevice->PortOffset);
428 /* start the adapter */
429 MACvStart(pDevice->PortOffset);
432 static void device_print_info(struct vnt_private *pDevice)
434 dev_info(&pDevice->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
435 pDevice->abyCurrentNetAddr, (unsigned long)pDevice->ioaddr,
436 (unsigned long)pDevice->PortOffset, pDevice->pcid->irq);
439 static void vt6655_init_info(struct pci_dev *pcid,
440 struct vnt_private **ppDevice)
442 memset(*ppDevice, 0, sizeof(**ppDevice));
444 (*ppDevice)->pcid = pcid;
445 (*ppDevice)->multicast_limit = 32;
447 spin_lock_init(&((*ppDevice)->lock));
450 static bool device_get_pci_info(struct vnt_private *pDevice,
451 struct pci_dev *pcid)
455 unsigned int cis_addr;
457 pci_read_config_byte(pcid, PCI_REVISION_ID, &pDevice->byRevId);
458 pci_read_config_word(pcid, PCI_SUBSYSTEM_ID, &pDevice->SubSystemID);
459 pci_read_config_word(pcid, PCI_SUBSYSTEM_VENDOR_ID, &pDevice->SubVendorID);
460 pci_read_config_word(pcid, PCI_COMMAND, (u16 *)&(pci_cmd));
462 pci_set_master(pcid);
464 pDevice->memaddr = pci_resource_start(pcid, 0);
465 pDevice->ioaddr = pci_resource_start(pcid, 1);
467 cis_addr = pci_resource_start(pcid, 2);
469 pDevice->pcid = pcid;
471 pci_read_config_byte(pcid, PCI_COMMAND, &b);
472 pci_write_config_byte(pcid, PCI_COMMAND, (b|PCI_COMMAND_MASTER));
477 static void device_free_info(struct vnt_private *pDevice)
483 ieee80211_unregister_hw(pDevice->hw);
485 if (pDevice->PortOffset)
486 iounmap(pDevice->PortOffset);
489 pci_release_regions(pDevice->pcid);
492 ieee80211_free_hw(pDevice->hw);
495 static bool device_init_rings(struct vnt_private *pDevice)
499 /*allocate all RD/TD rings a single pool*/
500 vir_pool = dma_zalloc_coherent(&pDevice->pcid->dev,
501 pDevice->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc) +
502 pDevice->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc) +
503 pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
504 pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
505 &pDevice->pool_dma, GFP_ATOMIC);
506 if (vir_pool == NULL) {
507 dev_err(&pDevice->pcid->dev, "allocate desc dma memory failed\n");
511 pDevice->aRD0Ring = vir_pool;
512 pDevice->aRD1Ring = vir_pool +
513 pDevice->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc);
515 pDevice->rd0_pool_dma = pDevice->pool_dma;
516 pDevice->rd1_pool_dma = pDevice->rd0_pool_dma +
517 pDevice->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc);
519 pDevice->tx0_bufs = dma_zalloc_coherent(&pDevice->pcid->dev,
520 pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
521 pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
524 &pDevice->tx_bufs_dma0,
526 if (pDevice->tx0_bufs == NULL) {
527 dev_err(&pDevice->pcid->dev, "allocate buf dma memory failed\n");
529 dma_free_coherent(&pDevice->pcid->dev,
530 pDevice->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc) +
531 pDevice->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc) +
532 pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
533 pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc),
534 vir_pool, pDevice->pool_dma
539 pDevice->td0_pool_dma = pDevice->rd1_pool_dma +
540 pDevice->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc);
542 pDevice->td1_pool_dma = pDevice->td0_pool_dma +
543 pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
545 /* vir_pool: pvoid type */
546 pDevice->apTD0Rings = vir_pool
547 + pDevice->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc)
548 + pDevice->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc);
550 pDevice->apTD1Rings = vir_pool
551 + pDevice->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc)
552 + pDevice->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc)
553 + pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc);
555 pDevice->tx1_bufs = pDevice->tx0_bufs +
556 pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
558 pDevice->tx_beacon_bufs = pDevice->tx1_bufs +
559 pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;
561 pDevice->pbyTmpBuff = pDevice->tx_beacon_bufs +
564 pDevice->tx_bufs_dma1 = pDevice->tx_bufs_dma0 +
565 pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
567 pDevice->tx_beacon_dma = pDevice->tx_bufs_dma1 +
568 pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;
573 static void device_free_rings(struct vnt_private *pDevice)
575 dma_free_coherent(&pDevice->pcid->dev,
576 pDevice->sOpts.nRxDescs0 * sizeof(struct vnt_rx_desc) +
577 pDevice->sOpts.nRxDescs1 * sizeof(struct vnt_rx_desc) +
578 pDevice->sOpts.nTxDescs[0] * sizeof(struct vnt_tx_desc) +
579 pDevice->sOpts.nTxDescs[1] * sizeof(struct vnt_tx_desc)
581 pDevice->aRD0Ring, pDevice->pool_dma
584 if (pDevice->tx0_bufs)
585 dma_free_coherent(&pDevice->pcid->dev,
586 pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
587 pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
590 pDevice->tx0_bufs, pDevice->tx_bufs_dma0
594 static void device_init_rd0_ring(struct vnt_private *pDevice)
597 dma_addr_t curr = pDevice->rd0_pool_dma;
598 struct vnt_rx_desc *pDesc;
600 /* Init the RD0 ring entries */
601 for (i = 0; i < pDevice->sOpts.nRxDescs0;
602 i ++, curr += sizeof(struct vnt_rx_desc)) {
603 pDesc = &(pDevice->aRD0Ring[i]);
604 pDesc->rd_info = alloc_rd_info();
606 if (!device_alloc_rx_buf(pDevice, pDesc))
607 dev_err(&pDevice->pcid->dev, "can not alloc rx bufs\n");
609 pDesc->next = &(pDevice->aRD0Ring[(i+1) % pDevice->sOpts.nRxDescs0]);
610 pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
614 pDevice->aRD0Ring[i-1].next_desc = cpu_to_le32(pDevice->rd0_pool_dma);
615 pDevice->pCurrRD[0] = &(pDevice->aRD0Ring[0]);
618 static void device_init_rd1_ring(struct vnt_private *pDevice)
621 dma_addr_t curr = pDevice->rd1_pool_dma;
622 struct vnt_rx_desc *pDesc;
624 /* Init the RD1 ring entries */
625 for (i = 0; i < pDevice->sOpts.nRxDescs1;
626 i ++, curr += sizeof(struct vnt_rx_desc)) {
627 pDesc = &(pDevice->aRD1Ring[i]);
628 pDesc->rd_info = alloc_rd_info();
630 if (!device_alloc_rx_buf(pDevice, pDesc))
631 dev_err(&pDevice->pcid->dev, "can not alloc rx bufs\n");
633 pDesc->next = &(pDevice->aRD1Ring[(i+1) % pDevice->sOpts.nRxDescs1]);
634 pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc));
638 pDevice->aRD1Ring[i-1].next_desc = cpu_to_le32(pDevice->rd1_pool_dma);
639 pDevice->pCurrRD[1] = &(pDevice->aRD1Ring[0]);
642 static void device_free_rd0_ring(struct vnt_private *pDevice)
646 for (i = 0; i < pDevice->sOpts.nRxDescs0; i++) {
647 struct vnt_rx_desc *pDesc = &(pDevice->aRD0Ring[i]);
648 struct vnt_rd_info *rd_info = pDesc->rd_info;
650 dma_unmap_single(&pDevice->pcid->dev, rd_info->skb_dma,
651 pDevice->rx_buf_sz, DMA_FROM_DEVICE);
653 dev_kfree_skb(rd_info->skb);
655 kfree(pDesc->rd_info);
659 static void device_free_rd1_ring(struct vnt_private *pDevice)
663 for (i = 0; i < pDevice->sOpts.nRxDescs1; i++) {
664 struct vnt_rx_desc *pDesc = &(pDevice->aRD1Ring[i]);
665 struct vnt_rd_info *rd_info = pDesc->rd_info;
667 dma_unmap_single(&pDevice->pcid->dev, rd_info->skb_dma,
668 pDevice->rx_buf_sz, DMA_FROM_DEVICE);
670 dev_kfree_skb(rd_info->skb);
672 kfree(pDesc->rd_info);
676 static void device_init_td0_ring(struct vnt_private *pDevice)
680 struct vnt_tx_desc *pDesc;
682 curr = pDevice->td0_pool_dma;
683 for (i = 0; i < pDevice->sOpts.nTxDescs[0];
684 i++, curr += sizeof(struct vnt_tx_desc)) {
685 pDesc = &(pDevice->apTD0Rings[i]);
686 pDesc->td_info = alloc_td_info();
688 pDesc->td_info->buf = pDevice->tx0_bufs + i * PKT_BUF_SZ;
689 pDesc->td_info->buf_dma = pDevice->tx_bufs_dma0 + i * PKT_BUF_SZ;
691 pDesc->next = &(pDevice->apTD0Rings[(i+1) % pDevice->sOpts.nTxDescs[0]]);
692 pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
696 pDevice->apTD0Rings[i-1].next_desc = cpu_to_le32(pDevice->td0_pool_dma);
697 pDevice->apTailTD[0] = pDevice->apCurrTD[0] = &(pDevice->apTD0Rings[0]);
700 static void device_init_td1_ring(struct vnt_private *pDevice)
704 struct vnt_tx_desc *pDesc;
706 /* Init the TD ring entries */
707 curr = pDevice->td1_pool_dma;
708 for (i = 0; i < pDevice->sOpts.nTxDescs[1];
709 i++, curr += sizeof(struct vnt_tx_desc)) {
710 pDesc = &(pDevice->apTD1Rings[i]);
711 pDesc->td_info = alloc_td_info();
713 pDesc->td_info->buf = pDevice->tx1_bufs + i * PKT_BUF_SZ;
714 pDesc->td_info->buf_dma = pDevice->tx_bufs_dma1 + i * PKT_BUF_SZ;
716 pDesc->next = &(pDevice->apTD1Rings[(i + 1) % pDevice->sOpts.nTxDescs[1]]);
717 pDesc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc));
721 pDevice->apTD1Rings[i-1].next_desc = cpu_to_le32(pDevice->td1_pool_dma);
722 pDevice->apTailTD[1] = pDevice->apCurrTD[1] = &(pDevice->apTD1Rings[0]);
725 static void device_free_td0_ring(struct vnt_private *pDevice)
729 for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++) {
730 struct vnt_tx_desc *pDesc = &pDevice->apTD0Rings[i];
731 struct vnt_td_info *pTDInfo = pDesc->td_info;
733 dev_kfree_skb(pTDInfo->skb);
734 kfree(pDesc->td_info);
738 static void device_free_td1_ring(struct vnt_private *pDevice)
742 for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++) {
743 struct vnt_tx_desc *pDesc = &pDevice->apTD1Rings[i];
744 struct vnt_td_info *pTDInfo = pDesc->td_info;
746 dev_kfree_skb(pTDInfo->skb);
747 kfree(pDesc->td_info);
751 /*-----------------------------------------------------------------*/
753 static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
755 struct vnt_rx_desc *pRD;
758 for (pRD = pDevice->pCurrRD[uIdx];
759 pRD->rd0.owner == OWNED_BY_HOST;
764 if (!pRD->rd_info->skb)
767 if (vnt_receive_frame(pDevice, pRD)) {
768 if (!device_alloc_rx_buf(pDevice, pRD)) {
769 dev_err(&pDevice->pcid->dev,
770 "can not allocate rx buf\n");
774 pRD->rd0.owner = OWNED_BY_NIC;
777 pDevice->pCurrRD[uIdx] = pRD;
782 static bool device_alloc_rx_buf(struct vnt_private *pDevice,
783 struct vnt_rx_desc *pRD)
785 struct vnt_rd_info *pRDInfo = pRD->rd_info;
787 pRDInfo->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
788 if (pRDInfo->skb == NULL)
792 dma_map_single(&pDevice->pcid->dev,
793 skb_put(pRDInfo->skb, skb_tailroom(pRDInfo->skb)),
794 pDevice->rx_buf_sz, DMA_FROM_DEVICE);
796 *((unsigned int *)&pRD->rd0) = 0; /* FIX cast */
798 pRD->rd0.res_count = cpu_to_le16(pDevice->rx_buf_sz);
799 pRD->rd0.owner = OWNED_BY_NIC;
800 pRD->rd1.req_count = cpu_to_le16(pDevice->rx_buf_sz);
801 pRD->buff_addr = cpu_to_le32(pRDInfo->skb_dma);
806 static const u8 fallback_rate0[5][5] = {
807 {RATE_18M, RATE_18M, RATE_12M, RATE_12M, RATE_12M},
808 {RATE_24M, RATE_24M, RATE_18M, RATE_12M, RATE_12M},
809 {RATE_36M, RATE_36M, RATE_24M, RATE_18M, RATE_18M},
810 {RATE_48M, RATE_48M, RATE_36M, RATE_24M, RATE_24M},
811 {RATE_54M, RATE_54M, RATE_48M, RATE_36M, RATE_36M}
814 static const u8 fallback_rate1[5][5] = {
815 {RATE_18M, RATE_18M, RATE_12M, RATE_6M, RATE_6M},
816 {RATE_24M, RATE_24M, RATE_18M, RATE_6M, RATE_6M},
817 {RATE_36M, RATE_36M, RATE_24M, RATE_12M, RATE_12M},
818 {RATE_48M, RATE_48M, RATE_24M, RATE_12M, RATE_12M},
819 {RATE_54M, RATE_54M, RATE_36M, RATE_18M, RATE_18M}
822 static int vnt_int_report_rate(struct vnt_private *priv,
823 struct vnt_td_info *context, u8 tsr0, u8 tsr1)
825 struct vnt_tx_fifo_head *fifo_head;
826 struct ieee80211_tx_info *info;
827 struct ieee80211_rate *rate;
829 u8 tx_retry = (tsr0 & TSR0_NCR);
838 fifo_head = (struct vnt_tx_fifo_head *)context->buf;
839 fb_option = (le16_to_cpu(fifo_head->fifo_ctl) &
840 (FIFOCTL_AUTO_FB_0 | FIFOCTL_AUTO_FB_1));
842 info = IEEE80211_SKB_CB(context->skb);
843 idx = info->control.rates[0].idx;
845 if (fb_option && !(tsr1 & TSR1_TERR)) {
849 rate = ieee80211_get_tx_rate(priv->hw, info);
850 tx_rate = rate->hw_value - RATE_18M;
855 if (fb_option & FIFOCTL_AUTO_FB_0)
856 tx_rate = fallback_rate0[tx_rate][retry];
857 else if (fb_option & FIFOCTL_AUTO_FB_1)
858 tx_rate = fallback_rate1[tx_rate][retry];
860 if (info->band == IEEE80211_BAND_5GHZ)
861 idx = tx_rate - RATE_6M;
866 ieee80211_tx_info_clear_status(info);
868 info->status.rates[0].count = tx_retry;
870 if (!(tsr1 & TSR1_TERR)) {
871 info->status.rates[0].idx = idx;
873 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
874 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
876 info->flags |= IEEE80211_TX_STAT_ACK;
882 static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
884 struct vnt_tx_desc *pTD;
886 unsigned char byTsr0;
887 unsigned char byTsr1;
889 for (pTD = pDevice->apTailTD[uIdx]; pDevice->iTDUsed[uIdx] > 0; pTD = pTD->next) {
890 if (pTD->td0.owner == OWNED_BY_NIC)
895 byTsr0 = pTD->td0.tsr0;
896 byTsr1 = pTD->td0.tsr1;
898 /* Only the status of first TD in the chain is correct */
899 if (pTD->td1.tcr & TCR_STP) {
900 if ((pTD->td_info->flags & TD_FLAGS_NETIF_SKB) != 0) {
901 if (!(byTsr1 & TSR1_TERR)) {
903 pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
908 pr_debug(" Tx[%d] dropped & tsr1[%02X] tsr0[%02X]\n",
909 (int)uIdx, byTsr1, byTsr0);
913 if (byTsr1 & TSR1_TERR) {
914 if ((pTD->td_info->flags & TD_FLAGS_PRIV_SKB) != 0) {
915 pr_debug(" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X]\n",
916 (int)uIdx, byTsr1, byTsr0);
920 vnt_int_report_rate(pDevice, pTD->td_info, byTsr0, byTsr1);
922 device_free_tx_buf(pDevice, pTD);
923 pDevice->iTDUsed[uIdx]--;
927 pDevice->apTailTD[uIdx] = pTD;
932 static void device_error(struct vnt_private *pDevice, unsigned short status)
934 if (status & ISR_FETALERR) {
935 dev_err(&pDevice->pcid->dev, "Hardware fatal error\n");
937 MACbShutdown(pDevice->PortOffset);
942 static void device_free_tx_buf(struct vnt_private *pDevice,
943 struct vnt_tx_desc *pDesc)
945 struct vnt_td_info *pTDInfo = pDesc->td_info;
946 struct sk_buff *skb = pTDInfo->skb;
949 ieee80211_tx_status_irqsafe(pDevice->hw, skb);
955 static void vnt_check_bb_vga(struct vnt_private *priv)
960 if (!priv->bUpdateBBVGA)
963 if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
966 if (!(priv->vif->bss_conf.assoc && priv->uCurrRSSI))
969 RFvRSSITodBm(priv, (u8)priv->uCurrRSSI, &dbm);
971 for (i = 0; i < BB_VGA_LEVEL; i++) {
972 if (dbm < priv->ldBmThreshold[i]) {
973 priv->byBBVGANew = priv->abyBBVGA[i];
978 if (priv->byBBVGANew == priv->byBBVGACurrent) {
979 priv->uBBVGADiffCount = 1;
983 priv->uBBVGADiffCount++;
985 if (priv->uBBVGADiffCount == 1) {
986 /* first VGA diff gain */
987 BBvSetVGAGainOffset(priv, priv->byBBVGANew);
989 dev_dbg(&priv->pcid->dev,
990 "First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
991 (int)dbm, priv->byBBVGANew,
992 priv->byBBVGACurrent,
993 (int)priv->uBBVGADiffCount);
996 if (priv->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
997 dev_dbg(&priv->pcid->dev,
998 "RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
999 (int)dbm, priv->byBBVGANew,
1000 priv->byBBVGACurrent,
1001 (int)priv->uBBVGADiffCount);
1003 BBvSetVGAGainOffset(priv, priv->byBBVGANew);
1007 static void vnt_interrupt_process(struct vnt_private *priv)
1009 struct ieee80211_low_level_stats *low_stats = &priv->low_stats;
1013 unsigned long flags;
1015 MACvReadISR(priv->PortOffset, &isr);
1020 if (isr == 0xffffffff) {
1021 pr_debug("isr = 0xffff\n");
1025 MACvIntDisable(priv->PortOffset);
1027 spin_lock_irqsave(&priv->lock, flags);
1029 /* Read low level stats */
1030 MACvReadMIBCounter(priv->PortOffset, &mib_counter);
1032 low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
1033 low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
1034 low_stats->dot11ACKFailureCount += (mib_counter >> 16) & 0xff;
1035 low_stats->dot11FCSErrorCount += (mib_counter >> 24) & 0xff;
1039 * Must do this after doing rx/tx, cause ISR bit is slow
1040 * than RD/TD write back
1041 * update ISR counter
1043 while (isr && priv->vif) {
1044 MACvWriteISR(priv->PortOffset, isr);
1046 if (isr & ISR_FETALERR) {
1047 pr_debug(" ISR_FETALERR\n");
1048 VNSvOutPortB(priv->PortOffset + MAC_REG_SOFTPWRCTL, 0);
1049 VNSvOutPortW(priv->PortOffset +
1050 MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
1051 device_error(priv, isr);
1054 if (isr & ISR_TBTT) {
1055 if (priv->op_mode != NL80211_IFTYPE_ADHOC)
1056 vnt_check_bb_vga(priv);
1058 priv->bBeaconSent = false;
1059 if (priv->bEnablePSMode)
1060 PSbIsNextTBTTWakeUp((void *)priv);
1062 if ((priv->op_mode == NL80211_IFTYPE_AP ||
1063 priv->op_mode == NL80211_IFTYPE_ADHOC) &&
1064 priv->vif->bss_conf.enable_beacon) {
1065 MACvOneShotTimer1MicroSec(priv->PortOffset,
1066 (priv->vif->bss_conf.beacon_int - MAKE_BEACON_RESERVED) << 10);
1069 /* TODO: adhoc PS mode */
1073 if (isr & ISR_BNTX) {
1074 if (priv->op_mode == NL80211_IFTYPE_ADHOC) {
1075 priv->bIsBeaconBufReadySet = false;
1076 priv->cbBeaconBufReadySetCnt = 0;
1079 priv->bBeaconSent = true;
1082 if (isr & ISR_RXDMA0)
1083 max_count += device_rx_srv(priv, TYPE_RXDMA0);
1085 if (isr & ISR_RXDMA1)
1086 max_count += device_rx_srv(priv, TYPE_RXDMA1);
1088 if (isr & ISR_TXDMA0)
1089 max_count += device_tx_srv(priv, TYPE_TXDMA0);
1091 if (isr & ISR_AC0DMA)
1092 max_count += device_tx_srv(priv, TYPE_AC0DMA);
1094 if (isr & ISR_SOFTTIMER1) {
1095 if (priv->vif->bss_conf.enable_beacon)
1096 vnt_beacon_make(priv, priv->vif);
1099 /* If both buffers available wake the queue */
1100 if (AVAIL_TD(priv, TYPE_TXDMA0) &&
1101 AVAIL_TD(priv, TYPE_AC0DMA) &&
1102 ieee80211_queue_stopped(priv->hw, 0))
1103 ieee80211_wake_queues(priv->hw);
1105 MACvReadISR(priv->PortOffset, &isr);
1107 MACvReceive0(priv->PortOffset);
1108 MACvReceive1(priv->PortOffset);
1110 if (max_count > priv->sOpts.int_works)
1114 spin_unlock_irqrestore(&priv->lock, flags);
1116 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1119 static void vnt_interrupt_work(struct work_struct *work)
1121 struct vnt_private *priv =
1122 container_of(work, struct vnt_private, interrupt_work);
1125 vnt_interrupt_process(priv);
1128 static irqreturn_t vnt_interrupt(int irq, void *arg)
1130 struct vnt_private *priv = arg;
1133 schedule_work(&priv->interrupt_work);
1138 static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
1140 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1141 struct vnt_tx_desc *head_td;
1143 unsigned long flags;
1145 spin_lock_irqsave(&priv->lock, flags);
1147 if (ieee80211_is_data(hdr->frame_control))
1148 dma_idx = TYPE_AC0DMA;
1150 dma_idx = TYPE_TXDMA0;
1152 if (AVAIL_TD(priv, dma_idx) < 1) {
1153 spin_unlock_irqrestore(&priv->lock, flags);
1157 head_td = priv->apCurrTD[dma_idx];
1159 head_td->td1.tcr = 0;
1161 head_td->td_info->skb = skb;
1163 if (dma_idx == TYPE_AC0DMA)
1164 head_td->td_info->flags = TD_FLAGS_NETIF_SKB;
1166 priv->apCurrTD[dma_idx] = head_td->next;
1168 spin_unlock_irqrestore(&priv->lock, flags);
1170 vnt_generate_fifo_header(priv, dma_idx, head_td, skb);
1172 spin_lock_irqsave(&priv->lock, flags);
1174 priv->bPWBitOn = false;
1176 /* Set TSR1 & ReqCount in TxDescHead */
1177 head_td->td1.tcr |= (TCR_STP | TCR_EDP | EDMSDU);
1178 head_td->td1.req_count = cpu_to_le16(head_td->td_info->req_count);
1180 head_td->buff_addr = cpu_to_le32(head_td->td_info->buf_dma);
1182 /* Poll Transmit the adapter */
1184 head_td->td0.owner = OWNED_BY_NIC;
1185 wmb(); /* second memory barrier */
1187 if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
1188 MACvTransmitAC0(priv->PortOffset);
1190 MACvTransmit0(priv->PortOffset);
1192 priv->iTDUsed[dma_idx]++;
1194 spin_unlock_irqrestore(&priv->lock, flags);
1199 static void vnt_tx_80211(struct ieee80211_hw *hw,
1200 struct ieee80211_tx_control *control,
1201 struct sk_buff *skb)
1203 struct vnt_private *priv = hw->priv;
1205 ieee80211_stop_queues(hw);
1207 if (vnt_tx_packet(priv, skb)) {
1208 ieee80211_free_txskb(hw, skb);
1210 ieee80211_wake_queues(hw);
1214 static int vnt_start(struct ieee80211_hw *hw)
1216 struct vnt_private *priv = hw->priv;
1219 priv->rx_buf_sz = PKT_BUF_SZ;
1220 if (!device_init_rings(priv))
1223 ret = request_irq(priv->pcid->irq, &vnt_interrupt,
1224 IRQF_SHARED, "vt6655", priv);
1226 dev_dbg(&priv->pcid->dev, "failed to start irq\n");
1230 dev_dbg(&priv->pcid->dev, "call device init rd0 ring\n");
1231 device_init_rd0_ring(priv);
1232 device_init_rd1_ring(priv);
1233 device_init_td0_ring(priv);
1234 device_init_td1_ring(priv);
1236 device_init_registers(priv);
1238 dev_dbg(&priv->pcid->dev, "call MACvIntEnable\n");
1239 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1241 ieee80211_wake_queues(hw);
1246 static void vnt_stop(struct ieee80211_hw *hw)
1248 struct vnt_private *priv = hw->priv;
1250 ieee80211_stop_queues(hw);
1252 cancel_work_sync(&priv->interrupt_work);
1254 MACbShutdown(priv->PortOffset);
1255 MACbSoftwareReset(priv->PortOffset);
1256 CARDbRadioPowerOff(priv);
1258 device_free_td0_ring(priv);
1259 device_free_td1_ring(priv);
1260 device_free_rd0_ring(priv);
1261 device_free_rd1_ring(priv);
1262 device_free_rings(priv);
1264 free_irq(priv->pcid->irq, priv);
1267 static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1269 struct vnt_private *priv = hw->priv;
1273 switch (vif->type) {
1274 case NL80211_IFTYPE_STATION:
1276 case NL80211_IFTYPE_ADHOC:
1277 MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
1279 MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1282 case NL80211_IFTYPE_AP:
1283 MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
1285 MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
1292 priv->op_mode = vif->type;
1297 static void vnt_remove_interface(struct ieee80211_hw *hw,
1298 struct ieee80211_vif *vif)
1300 struct vnt_private *priv = hw->priv;
1302 switch (vif->type) {
1303 case NL80211_IFTYPE_STATION:
1305 case NL80211_IFTYPE_ADHOC:
1306 MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
1307 MACvRegBitsOff(priv->PortOffset,
1308 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1309 MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
1311 case NL80211_IFTYPE_AP:
1312 MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
1313 MACvRegBitsOff(priv->PortOffset,
1314 MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
1315 MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
1321 priv->op_mode = NL80211_IFTYPE_UNSPECIFIED;
1325 static int vnt_config(struct ieee80211_hw *hw, u32 changed)
1327 struct vnt_private *priv = hw->priv;
1328 struct ieee80211_conf *conf = &hw->conf;
1331 if (changed & IEEE80211_CONF_CHANGE_PS) {
1332 if (conf->flags & IEEE80211_CONF_PS)
1333 PSvEnablePowerSaving(priv, conf->listen_interval);
1335 PSvDisablePowerSaving(priv);
1338 if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
1339 (conf->flags & IEEE80211_CONF_OFFCHANNEL)) {
1340 set_channel(priv, conf->chandef.chan);
1342 if (conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
1343 bb_type = BB_TYPE_11A;
1345 bb_type = BB_TYPE_11G;
1347 if (priv->byBBType != bb_type) {
1348 priv->byBBType = bb_type;
1350 CARDbSetPhyParameter(priv, priv->byBBType);
1354 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1355 if (priv->byBBType == BB_TYPE_11B)
1356 priv->wCurrentRate = RATE_1M;
1358 priv->wCurrentRate = RATE_54M;
1360 RFbSetPower(priv, priv->wCurrentRate,
1361 conf->chandef.chan->hw_value);
1367 static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1368 struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf,
1371 struct vnt_private *priv = hw->priv;
1373 priv->current_aid = conf->aid;
1375 if (changed & BSS_CHANGED_BSSID && conf->bssid) {
1376 unsigned long flags;
1378 spin_lock_irqsave(&priv->lock, flags);
1380 MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
1382 spin_unlock_irqrestore(&priv->lock, flags);
1385 if (changed & BSS_CHANGED_BASIC_RATES) {
1386 priv->basic_rates = conf->basic_rates;
1388 CARDvUpdateBasicTopRate(priv);
1390 dev_dbg(&priv->pcid->dev,
1391 "basic rates %x\n", conf->basic_rates);
1394 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1395 if (conf->use_short_preamble) {
1396 MACvEnableBarkerPreambleMd(priv->PortOffset);
1397 priv->byPreambleType = true;
1399 MACvDisableBarkerPreambleMd(priv->PortOffset);
1400 priv->byPreambleType = false;
1404 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1405 if (conf->use_cts_prot)
1406 MACvEnableProtectMD(priv->PortOffset);
1408 MACvDisableProtectMD(priv->PortOffset);
1411 if (changed & BSS_CHANGED_ERP_SLOT) {
1412 if (conf->use_short_slot)
1413 priv->bShortSlotTime = true;
1415 priv->bShortSlotTime = false;
1417 CARDbSetPhyParameter(priv, priv->byBBType);
1418 BBvSetVGAGainOffset(priv, priv->abyBBVGA[0]);
1421 if (changed & BSS_CHANGED_TXPOWER)
1422 RFbSetPower(priv, priv->wCurrentRate,
1423 conf->chandef.chan->hw_value);
1425 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1426 dev_dbg(&priv->pcid->dev,
1427 "Beacon enable %d\n", conf->enable_beacon);
1429 if (conf->enable_beacon) {
1430 vnt_beacon_enable(priv, vif, conf);
1432 MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
1435 MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
1440 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1441 priv->op_mode != NL80211_IFTYPE_AP) {
1442 if (conf->assoc && conf->beacon_rate) {
1443 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1446 CARDbSetBeaconPeriod(priv, conf->beacon_int);
1448 CARDvSetFirstNextTBTT(priv, conf->beacon_int);
1450 VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
1452 VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
1458 static u64 vnt_prepare_multicast(struct ieee80211_hw *hw,
1459 struct netdev_hw_addr_list *mc_list)
1461 struct vnt_private *priv = hw->priv;
1462 struct netdev_hw_addr *ha;
1466 netdev_hw_addr_list_for_each(ha, mc_list) {
1467 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1469 mc_filter |= 1ULL << (bit_nr & 0x3f);
1472 priv->mc_list_count = mc_list->count;
1477 static void vnt_configure(struct ieee80211_hw *hw,
1478 unsigned int changed_flags, unsigned int *total_flags, u64 multicast)
1480 struct vnt_private *priv = hw->priv;
1483 *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
1485 VNSvInPortB(priv->PortOffset + MAC_REG_RCR, &rx_mode);
1487 dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
1489 if (changed_flags & FIF_ALLMULTI) {
1490 if (*total_flags & FIF_ALLMULTI) {
1491 unsigned long flags;
1493 spin_lock_irqsave(&priv->lock, flags);
1495 if (priv->mc_list_count > 2) {
1496 MACvSelectPage1(priv->PortOffset);
1498 VNSvOutPortD(priv->PortOffset +
1499 MAC_REG_MAR0, 0xffffffff);
1500 VNSvOutPortD(priv->PortOffset +
1501 MAC_REG_MAR0 + 4, 0xffffffff);
1503 MACvSelectPage0(priv->PortOffset);
1505 MACvSelectPage1(priv->PortOffset);
1507 VNSvOutPortD(priv->PortOffset +
1508 MAC_REG_MAR0, (u32)multicast);
1509 VNSvOutPortD(priv->PortOffset +
1511 (u32)(multicast >> 32));
1513 MACvSelectPage0(priv->PortOffset);
1516 spin_unlock_irqrestore(&priv->lock, flags);
1518 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1520 rx_mode &= ~(RCR_MULTICAST | RCR_BROADCAST);
1524 if (changed_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)) {
1525 rx_mode |= RCR_MULTICAST | RCR_BROADCAST;
1527 if (*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC))
1528 rx_mode &= ~RCR_BSSID;
1530 rx_mode |= RCR_BSSID;
1533 VNSvOutPortB(priv->PortOffset + MAC_REG_RCR, rx_mode);
1535 dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
1538 static int vnt_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1539 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
1540 struct ieee80211_key_conf *key)
1542 struct vnt_private *priv = hw->priv;
1546 if (vnt_set_keys(hw, sta, vif, key))
1550 if (test_bit(key->hw_key_idx, &priv->key_entry_inuse))
1551 clear_bit(key->hw_key_idx, &priv->key_entry_inuse);
1559 static int vnt_get_stats(struct ieee80211_hw *hw,
1560 struct ieee80211_low_level_stats *stats)
1562 struct vnt_private *priv = hw->priv;
1564 memcpy(stats, &priv->low_stats, sizeof(*stats));
1569 static u64 vnt_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1571 struct vnt_private *priv = hw->priv;
1574 CARDbGetCurrentTSF(priv, &tsf);
1579 static void vnt_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1582 struct vnt_private *priv = hw->priv;
1584 CARDvUpdateNextTBTT(priv, tsf, vif->bss_conf.beacon_int);
1587 static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1589 struct vnt_private *priv = hw->priv;
1591 /* reset TSF counter */
1592 VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
1595 static const struct ieee80211_ops vnt_mac_ops = {
1599 .add_interface = vnt_add_interface,
1600 .remove_interface = vnt_remove_interface,
1601 .config = vnt_config,
1602 .bss_info_changed = vnt_bss_info_changed,
1603 .prepare_multicast = vnt_prepare_multicast,
1604 .configure_filter = vnt_configure,
1605 .set_key = vnt_set_key,
1606 .get_stats = vnt_get_stats,
1607 .get_tsf = vnt_get_tsf,
1608 .set_tsf = vnt_set_tsf,
1609 .reset_tsf = vnt_reset_tsf,
1612 static int vnt_init(struct vnt_private *priv)
1614 SET_IEEE80211_PERM_ADDR(priv->hw, priv->abyCurrentNetAddr);
1616 vnt_init_bands(priv);
1618 if (ieee80211_register_hw(priv->hw))
1621 priv->mac_hw = true;
1623 CARDbRadioPowerOff(priv);
1629 vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
1631 struct vnt_private *priv;
1632 struct ieee80211_hw *hw;
1633 struct wiphy *wiphy;
1636 dev_notice(&pcid->dev,
1637 "%s Ver. %s\n", DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
1639 dev_notice(&pcid->dev,
1640 "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
1642 hw = ieee80211_alloc_hw(sizeof(*priv), &vnt_mac_ops);
1644 dev_err(&pcid->dev, "could not register ieee80211_hw\n");
1650 vt6655_init_info(pcid, &priv);
1654 SET_IEEE80211_DEV(priv->hw, &pcid->dev);
1656 if (pci_enable_device(pcid)) {
1657 device_free_info(priv);
1662 "Before get pci_info memaddr is %x\n", priv->memaddr);
1664 if (!device_get_pci_info(priv, pcid)) {
1665 dev_err(&pcid->dev, ": Failed to find PCI device.\n");
1666 device_free_info(priv);
1670 priv->PortOffset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
1672 if (!priv->PortOffset) {
1673 dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
1674 device_free_info(priv);
1678 rc = pci_request_regions(pcid, DEVICE_NAME);
1680 dev_err(&pcid->dev, ": Failed to find PCI device\n");
1681 device_free_info(priv);
1685 if (dma_set_mask(&pcid->dev, DMA_BIT_MASK(32))) {
1686 dev_err(&pcid->dev, ": Failed to set dma 32 bit mask\n");
1687 device_free_info(priv);
1691 INIT_WORK(&priv->interrupt_work, vnt_interrupt_work);
1694 if (!MACbSoftwareReset(priv->PortOffset)) {
1695 dev_err(&pcid->dev, ": Failed to access MAC hardware..\n");
1696 device_free_info(priv);
1699 /* initial to reload eeprom */
1700 MACvInitialize(priv->PortOffset);
1701 MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
1704 priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
1705 priv->byRFType &= RF_MASK;
1707 dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
1709 device_get_options(priv);
1710 device_set_options(priv);
1712 wiphy = priv->hw->wiphy;
1714 wiphy->frag_threshold = FRAG_THRESH_DEF;
1715 wiphy->rts_threshold = RTS_THRESH_DEF;
1716 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1717 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
1719 ieee80211_hw_set(priv->hw, TIMING_BEACON_ONLY);
1720 ieee80211_hw_set(priv->hw, SIGNAL_DBM);
1721 ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
1722 ieee80211_hw_set(priv->hw, REPORTS_TX_ACK_STATUS);
1723 ieee80211_hw_set(priv->hw, SUPPORTS_PS);
1725 priv->hw->max_signal = 100;
1730 device_print_info(priv);
1731 pci_set_drvdata(pcid, priv);
1736 /*------------------------------------------------------------------*/
1739 static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
1741 struct vnt_private *priv = pci_get_drvdata(pcid);
1742 unsigned long flags;
1744 spin_lock_irqsave(&priv->lock, flags);
1746 pci_save_state(pcid);
1748 MACbShutdown(priv->PortOffset);
1750 pci_disable_device(pcid);
1751 pci_set_power_state(pcid, pci_choose_state(pcid, state));
1753 spin_unlock_irqrestore(&priv->lock, flags);
1758 static int vt6655_resume(struct pci_dev *pcid)
1761 pci_set_power_state(pcid, PCI_D0);
1762 pci_enable_wake(pcid, PCI_D0, 0);
1763 pci_restore_state(pcid);
1769 MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
1771 static struct pci_driver device_driver = {
1772 .name = DEVICE_NAME,
1773 .id_table = vt6655_pci_id_table,
1774 .probe = vt6655_probe,
1775 .remove = vt6655_remove,
1777 .suspend = vt6655_suspend,
1778 .resume = vt6655_resume,
1782 module_pci_driver(device_driver);