if (!is_valid_ether_addr(ndev->dev_addr)) {
/* try reading from mac */
-
+
mac_src = "chip";
for (i = 0; i < 6; i++)
ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
irqflags = DEFAULT_TRIGGER;
}
-
+
irqflags |= IRQF_SHARED;
if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev))
if (netif_msg_ifdown(db))
dev_dbg(db->dev, "shutting down %s\n", ndev->name);
- cancel_delayed_work(&db->phy_poll);
+ cancel_delayed_work_sync(&db->phy_poll);
netif_stop_queue(ndev);
netif_carrier_off(ndev);
/* The DM9000 data sheets say we should be able to
* poll the ERRE bit in EPCR to wait for the EEPROM
* operation. From testing several chips, this bit
- * does not seem to work.
+ * does not seem to work.
*
* We attempt to use the bit, but fall back to the
* timeout (which is why we do not return an error
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev);
- static int pcnet32_alloc_ring(struct net_device *dev, char *name);
+ static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
static void pcnet32_free_ring(struct net_device *dev);
static void pcnet32_check_media(struct net_device *dev, int verbose);
err_free_ring:
pcnet32_free_ring(dev);
err_free_consistent:
- pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
err_free_netdev:
free_netdev(dev);
}
/* if any allocation fails, caller must also call pcnet32_free_ring */
- static int pcnet32_alloc_ring(struct net_device *dev, char *name)
+ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
{
struct pcnet32_private *lp = netdev_priv(dev);
unregister_netdev(dev);
pcnet32_free_ring(dev);
release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
- pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
free_netdev(dev);
pci_disable_device(pdev);
unregister_netdev(pcnet32_dev);
pcnet32_free_ring(pcnet32_dev);
release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
- pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
free_netdev(pcnet32_dev);
pcnet32_dev = next_dev;
#include "s2io.h"
#include "s2io-regs.h"
- #define DRV_VERSION "2.0.26.23"
+ #define DRV_VERSION "2.0.26.24"
/* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion";
struct pci_dev *tdev = NULL;
while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
- if (tdev->bus == s2io_pdev->bus->parent)
+ if (tdev->bus == s2io_pdev->bus->parent) {
pci_dev_put(tdev);
return 1;
+ }
}
}
return 0;
TTI_DATA1_MEM_TX_URNG_B(0x10) |
TTI_DATA1_MEM_TX_URNG_C(0x30) |
TTI_DATA1_MEM_TX_TIMER_AC_EN;
-
- if (use_continuous_tx_intrs && (link == LINK_UP))
- val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
+ if (i == 0)
+ if (use_continuous_tx_intrs && (link == LINK_UP))
+ val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
writeq(val64, &bar0->tti_data1_mem);
- val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
- TTI_DATA2_MEM_TX_UFC_B(0x20) |
- TTI_DATA2_MEM_TX_UFC_C(0x40) |
- TTI_DATA2_MEM_TX_UFC_D(0x80);
+ if (nic->config.intr_type == MSI_X) {
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
+ TTI_DATA2_MEM_TX_UFC_B(0x100) |
+ TTI_DATA2_MEM_TX_UFC_C(0x200) |
+ TTI_DATA2_MEM_TX_UFC_D(0x300);
+ } else {
+ if ((nic->config.tx_steering_type ==
+ TX_DEFAULT_STEERING) &&
+ (config->tx_fifo_num > 1) &&
+ (i >= nic->udp_fifo_idx) &&
+ (i < (nic->udp_fifo_idx +
+ nic->total_udp_fifos)))
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
+ TTI_DATA2_MEM_TX_UFC_B(0x80) |
+ TTI_DATA2_MEM_TX_UFC_C(0x100) |
+ TTI_DATA2_MEM_TX_UFC_D(0x120);
+ else
+ val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
+ TTI_DATA2_MEM_TX_UFC_B(0x20) |
+ TTI_DATA2_MEM_TX_UFC_C(0x40) |
+ TTI_DATA2_MEM_TX_UFC_D(0x80);
+ }
writeq(val64, &bar0->tti_data2_mem);
if (block_no)
rxd_index += (block_no * ring->rxd_count);
- if ((block_no == block_no1) &&
+ if ((block_no == block_no1) &&
(off == ring->rx_curr_get_info.offset) &&
(rxdp->Host_Control)) {
DBG_PRINT(INTR_DBG, "%s: Get and Put",
first_rxdp->Control_1 |= RXD_OWN_XENA;
}
stats->mem_alloc_fail_cnt++;
-
+
return -ENOMEM ;
}
stats->mem_allocated += skb->truesize;
}
}
+ static int s2io_chk_rx_buffers(struct ring_info *ring)
+ {
+ if (fill_rx_buffers(ring) == -ENOMEM) {
+ DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
+ DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
+ }
+ return 0;
+ }
+
/**
* s2io_poll - Rx interrupt handler for NAPI support
* @napi : pointer to the napi structure.
* 0 on success and 1 if there are No Rx packets to be processed.
*/
- static int s2io_poll(struct napi_struct *napi, int budget)
+ static int s2io_poll_msix(struct napi_struct *napi, int budget)
{
- struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
- struct net_device *dev = nic->dev;
- int pkt_cnt = 0, org_pkts_to_process;
- struct mac_info *mac_control;
+ struct ring_info *ring = container_of(napi, struct ring_info, napi);
+ struct net_device *dev = ring->dev;
struct config_param *config;
+ struct mac_info *mac_control;
+ int pkts_processed = 0;
+ u8 *addr = NULL, val8 = 0;
+ struct s2io_nic *nic = dev->priv;
struct XENA_dev_config __iomem *bar0 = nic->bar0;
- int i;
+ int budget_org = budget;
- mac_control = &nic->mac_control;
config = &nic->config;
+ mac_control = &nic->mac_control;
- nic->pkts_to_process = budget;
- org_pkts_to_process = nic->pkts_to_process;
+ if (unlikely(!is_s2io_card_up(nic)))
+ return 0;
- writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
- readl(&bar0->rx_traffic_int);
+ pkts_processed = rx_intr_handler(ring, budget);
+ s2io_chk_rx_buffers(ring);
- for (i = 0; i < config->rx_ring_num; i++) {
- rx_intr_handler(&mac_control->rings[i]);
- pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
- if (!nic->pkts_to_process) {
- /* Quota for the current iteration has been met */
- goto no_rx;
- }
+ if (pkts_processed < budget_org) {
+ netif_rx_complete(dev, napi);
+ /*Re Enable MSI-Rx Vector*/
+ addr = (u8 *)&bar0->xmsi_mask_reg;
+ addr += 7 - ring->ring_no;
+ val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
+ writeb(val8, addr);
+ val8 = readb(addr);
}
+ return pkts_processed;
+ }
+ static int s2io_poll_inta(struct napi_struct *napi, int budget)
+ {
+ struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
+ struct ring_info *ring;
+ struct net_device *dev = nic->dev;
+ struct config_param *config;
+ struct mac_info *mac_control;
+ int pkts_processed = 0;
+ int ring_pkts_processed, i;
+ struct XENA_dev_config __iomem *bar0 = nic->bar0;
+ int budget_org = budget;
- netif_rx_complete(dev, napi);
+ config = &nic->config;
+ mac_control = &nic->mac_control;
- for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
- DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
- break;
- }
- }
- /* Re enable the Rx interrupts. */
- writeq(0x0, &bar0->rx_traffic_mask);
- readl(&bar0->rx_traffic_mask);
- return pkt_cnt;
+ if (unlikely(!is_s2io_card_up(nic)))
+ return 0;
- no_rx:
for (i = 0; i < config->rx_ring_num; i++) {
- if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
- DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
+ ring = &mac_control->rings[i];
+ ring_pkts_processed = rx_intr_handler(ring, budget);
+ s2io_chk_rx_buffers(ring);
+ pkts_processed += ring_pkts_processed;
+ budget -= ring_pkts_processed;
+ if (budget <= 0)
break;
- }
}
- return pkt_cnt;
+ if (pkts_processed < budget_org) {
+ netif_rx_complete(dev, napi);
+ /* Re enable the Rx interrupts for the ring */
+ writeq(0, &bar0->rx_traffic_mask);
+ readl(&bar0->rx_traffic_mask);
+ }
+ return pkts_processed;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++)
- rx_intr_handler(&mac_control->rings[i]);
+ rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
/**
* rx_intr_handler - Rx interrupt handler
- * @nic: device private variable.
+ * @ring_info: per ring structure.
+ * @budget: budget for napi processing.
* Description:
* If the interrupt is because of a received frame or if the
* receive ring contains fresh as yet un-processed frames,this function is
* stopped and sends the skb to the OSM's Rx handler and then increments
* the offset.
* Return Value:
- * NONE.
+ * No. of napi packets processed.
*/
- static void rx_intr_handler(struct ring_info *ring_data)
+ static int rx_intr_handler(struct ring_info *ring_data, int budget)
{
int get_block, put_block;
struct rx_curr_get_info get_info, put_info;
struct RxD_t *rxdp;
struct sk_buff *skb;
- int pkt_cnt = 0;
+ int pkt_cnt = 0, napi_pkts = 0;
int i;
struct RxD1* rxdp1;
struct RxD3* rxdp3;
DBG_PRINT(ERR_DBG, "%s: The skb is ",
ring_data->dev->name);
DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
- return;
+ return 0;
}
if (ring_data->rxd_mode == RXD_MODE_1) {
rxdp1 = (struct RxD1*)rxdp;
rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
}
- if(ring_data->nic->config.napi){
- ring_data->nic->pkts_to_process -= 1;
- if (!ring_data->nic->pkts_to_process)
+ if (ring_data->nic->config.napi) {
+ budget--;
+ napi_pkts++;
+ if (!budget)
break;
}
pkt_cnt++;
}
}
}
+ return(napi_pkts);
}
/**
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64;
- int i;
+ int i, msix_index;
+
+
+ if (nic->device_type == XFRAME_I_DEVICE)
+ return;
for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
+ msix_index = (i) ? ((i-1) * 8 + 1): 0;
writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data);
- val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
+ val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, i)) {
+ if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
continue;
}
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64, addr, data;
- int i;
+ int i, msix_index;
+
+ if (nic->device_type == XFRAME_I_DEVICE)
+ return;
/* Store and display */
for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
- val64 = (s2BIT(15) | vBIT(i, 26, 6));
+ msix_index = (i) ? ((i-1) * 8 + 1): 0;
+ val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access);
- if (wait_for_msix_trans(nic, i)) {
+ if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
continue;
}
static int s2io_enable_msi_x(struct s2io_nic *nic)
{
struct XENA_dev_config __iomem *bar0 = nic->bar0;
- u64 tx_mat, rx_mat;
+ u64 rx_mat;
u16 msi_control; /* Temp variable */
int ret, i, j, msix_indx = 1;
- nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
+ nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
GFP_KERNEL);
if (!nic->entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
return -ENOMEM;
}
nic->mac_control.stats_info->sw_stat.mem_allocated
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (nic->num_entries * sizeof(struct msix_entry));
+
+ memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
nic->s2io_entries =
- kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
+ kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
GFP_KERNEL);
if (!nic->s2io_entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (nic->num_entries * sizeof(struct msix_entry));
return -ENOMEM;
}
nic->mac_control.stats_info->sw_stat.mem_allocated
- += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
-
- for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
- nic->entries[i].entry = i;
- nic->s2io_entries[i].entry = i;
+ += (nic->num_entries * sizeof(struct s2io_msix_entry));
+ memset(nic->s2io_entries, 0,
+ nic->num_entries * sizeof(struct s2io_msix_entry));
+
+ nic->entries[0].entry = 0;
+ nic->s2io_entries[0].entry = 0;
+ nic->s2io_entries[0].in_use = MSIX_FLG;
+ nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
+ nic->s2io_entries[0].arg = &nic->mac_control.fifos;
+
+ for (i = 1; i < nic->num_entries; i++) {
+ nic->entries[i].entry = ((i - 1) * 8) + 1;
+ nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].arg = NULL;
nic->s2io_entries[i].in_use = 0;
}
- tx_mat = readq(&bar0->tx_mat0_n[0]);
- for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
- tx_mat |= TX_MAT_SET(i, msix_indx);
- nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
- nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
- nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
- }
- writeq(tx_mat, &bar0->tx_mat0_n[0]);
-
rx_mat = readq(&bar0->rx_mat);
- for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
+ for (j = 0; j < nic->config.rx_ring_num; j++) {
rx_mat |= RX_MAT_SET(j, msix_indx);
- nic->s2io_entries[msix_indx].arg
- = &nic->mac_control.rings[j];
- nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
- nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
+ nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
+ nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
+ nic->s2io_entries[j+1].in_use = MSIX_FLG;
+ msix_indx += 8;
}
writeq(rx_mat, &bar0->rx_mat);
+ readq(&bar0->rx_mat);
- nic->avail_msix_vectors = 0;
- ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
+ ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
/* We fail init if error or we get less vectors than min required */
- if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
- nic->avail_msix_vectors = ret;
- ret = pci_enable_msix(nic->pdev, nic->entries, ret);
- }
if (ret) {
DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (nic->num_entries * sizeof(struct msix_entry));
kfree(nic->s2io_entries);
nic->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
+ += (nic->num_entries * sizeof(struct s2io_msix_entry));
nic->entries = NULL;
nic->s2io_entries = NULL;
- nic->avail_msix_vectors = 0;
return -ENOMEM;
}
- if (!nic->avail_msix_vectors)
- nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
/*
* To enable MSI-X, MSI also needs to be enabled, due to a bug
int i;
u16 msi_control;
- for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
+ for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].in_use ==
MSIX_REGISTERED_SUCCESS) {
int vector = sp->entries[i].vector;
netif_carrier_off(dev);
sp->last_link_state = 0;
- if (sp->config.intr_type == MSI_X) {
- int ret = s2io_enable_msi_x(sp);
-
- if (!ret) {
- ret = s2io_test_msi(sp);
- /* rollback MSI-X, will re-enable during add_isr() */
- remove_msix_isr(sp);
- }
- if (ret) {
-
- DBG_PRINT(ERR_DBG,
- "%s: MSI-X requested but failed to enable\n",
- dev->name);
- sp->config.intr_type = INTA;
- }
- }
-
- /* NAPI doesn't work well with MSI(X) */
- if (sp->config.intr_type != INTA) {
- if(sp->config.napi)
- sp->config.napi = 0;
- }
-
/* Initialize H/W and enable interrupts */
err = s2io_card_up(sp);
if (err) {
if (sp->entries) {
kfree(sp->entries);
sp->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
+ += (sp->num_entries * sizeof(struct msix_entry));
}
if (sp->s2io_entries) {
kfree(sp->s2io_entries);
sp->mac_control.stats_info->sw_stat.mem_freed
- += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
+ += (sp->num_entries * sizeof(struct s2io_msix_entry));
}
}
return err;
mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
}
- static int s2io_chk_rx_buffers(struct ring_info *ring)
- {
- if (fill_rx_buffers(ring) == -ENOMEM) {
- DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
- DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
- }
- return 0;
- }
-
static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
{
struct ring_info *ring = (struct ring_info *)dev_id;
struct s2io_nic *sp = ring->nic;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ struct net_device *dev = sp->dev;
- if (!is_s2io_card_up(sp))
+ if (unlikely(!is_s2io_card_up(sp)))
return IRQ_HANDLED;
- rx_intr_handler(ring);
- s2io_chk_rx_buffers(ring);
+ if (sp->config.napi) {
+ u8 *addr = NULL, val8 = 0;
+
+ addr = (u8 *)&bar0->xmsi_mask_reg;
+ addr += (7 - ring->ring_no);
+ val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
+ writeb(val8, addr);
+ val8 = readb(addr);
+ netif_rx_schedule(dev, &ring->napi);
+ } else {
+ rx_intr_handler(ring, 0);
+ s2io_chk_rx_buffers(ring);
+ }
return IRQ_HANDLED;
}
static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
{
- struct fifo_info *fifo = (struct fifo_info *)dev_id;
- struct s2io_nic *sp = fifo->nic;
+ int i;
+ struct fifo_info *fifos = (struct fifo_info *)dev_id;
+ struct s2io_nic *sp = fifos->nic;
+ struct XENA_dev_config __iomem *bar0 = sp->bar0;
+ struct config_param *config = &sp->config;
+ u64 reason;
- if (!is_s2io_card_up(sp))
+ if (unlikely(!is_s2io_card_up(sp)))
+ return IRQ_NONE;
+
+ reason = readq(&bar0->general_int_status);
+ if (unlikely(reason == S2IO_MINUS_ONE))
+ /* Nothing much can be done. Get out */
return IRQ_HANDLED;
- tx_intr_handler(fifo);
+ writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
+
+ if (reason & GEN_INTR_TXTRAFFIC)
+ writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
+
+ for (i = 0; i < config->tx_fifo_num; i++)
+ tx_intr_handler(&fifos[i]);
+
+ writeq(sp->general_int_mask, &bar0->general_int_mask);
+ readl(&bar0->general_int_status);
+
return IRQ_HANDLED;
}
+
static void s2io_txpic_intr_handle(struct s2io_nic *sp)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
if (config->napi) {
if (reason & GEN_INTR_RXTRAFFIC) {
- if (likely(netif_rx_schedule_prep(dev,
- &sp->napi))) {
- __netif_rx_schedule(dev, &sp->napi);
- writeq(S2IO_MINUS_ONE,
- &bar0->rx_traffic_mask);
- } else
- writeq(S2IO_MINUS_ONE,
- &bar0->rx_traffic_int);
+ netif_rx_schedule(dev, &sp->napi);
+ writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
+ writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
+ readl(&bar0->rx_traffic_int);
}
} else {
/*
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++)
- rx_intr_handler(&mac_control->rings[i]);
+ rx_intr_handler(&mac_control->rings[i], 0);
}
/*
/* After proper initialization of H/W, register ISR */
if (sp->config.intr_type == MSI_X) {
- int i, msix_tx_cnt=0,msix_rx_cnt=0;
-
- for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
- if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ int i, msix_rx_cnt = 0;
+
+ for (i = 0; i < sp->num_entries; i++) {
+ if (sp->s2io_entries[i].in_use == MSIX_FLG) {
+ if (sp->s2io_entries[i].type ==
+ MSIX_RING_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ dev->name, i);
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_ring_handle, 0,
+ sp->desc[i],
+ sp->s2io_entries[i].arg);
+ } else if (sp->s2io_entries[i].type ==
+ MSIX_ALARM_TYPE) {
+ sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_fifo_handle, 0, sp->desc[i],
- sp->s2io_entries[i].arg);
- /* If either data or addr is zero print it */
- if(!(sp->msix_info[i].addr &&
- sp->msix_info[i].data)) {
- DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
- "Data:0x%llx\n",sp->desc[i],
- (unsigned long long)
- sp->msix_info[i].addr,
- (unsigned long long)
- sp->msix_info[i].data);
- } else {
- msix_tx_cnt++;
+ err = request_irq(sp->entries[i].vector,
+ s2io_msix_fifo_handle, 0,
+ sp->desc[i],
+ sp->s2io_entries[i].arg);
+
}
- } else {
- sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
- dev->name, i);
- err = request_irq(sp->entries[i].vector,
- s2io_msix_ring_handle, 0, sp->desc[i],
- sp->s2io_entries[i].arg);
- /* If either data or addr is zero print it */
- if(!(sp->msix_info[i].addr &&
+ /* if either data or addr is zero print it. */
+ if (!(sp->msix_info[i].addr &&
sp->msix_info[i].data)) {
- DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
- "Data:0x%llx\n",sp->desc[i],
+ DBG_PRINT(ERR_DBG,
+ "%s @Addr:0x%llx Data:0x%llx\n",
+ sp->desc[i],
(unsigned long long)
sp->msix_info[i].addr,
(unsigned long long)
- sp->msix_info[i].data);
- } else {
+ ntohl(sp->msix_info[i].data));
+ } else
msix_rx_cnt++;
+ if (err) {
+ remove_msix_isr(sp);
+
+ DBG_PRINT(ERR_DBG,
+ "%s:MSI-X-%d registration "
+ "failed\n", dev->name, i);
+
+ DBG_PRINT(ERR_DBG,
+ "%s: Defaulting to INTA\n",
+ dev->name);
+ sp->config.intr_type = INTA;
+ break;
}
+ sp->s2io_entries[i].in_use =
+ MSIX_REGISTERED_SUCCESS;
}
- if (err) {
- remove_msix_isr(sp);
- DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
- "failed\n", dev->name, i);
- DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
- dev->name);
- sp->config.intr_type = INTA;
- break;
- }
- sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
}
if (!err) {
- printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
- msix_tx_cnt);
printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
- msix_rx_cnt);
+ --msix_rx_cnt);
+ DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
+ " through alarm vector\n");
}
}
if (sp->config.intr_type == INTA) {
clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Disable napi */
- if (config->napi)
- napi_disable(&sp->napi);
+ if (sp->config.napi) {
+ int off = 0;
+ if (config->intr_type == MSI_X) {
+ for (; off < sp->config.rx_ring_num; off++)
+ napi_disable(&sp->mac_control.rings[off].napi);
+ }
+ else
+ napi_disable(&sp->napi);
+ }
/* disable Tx and Rx traffic on the NIC */
if (do_io)
}
/* Initialise napi */
- if (config->napi)
- napi_enable(&sp->napi);
+ if (config->napi) {
+ int i;
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < sp->config.rx_ring_num; i++)
+ napi_enable(&sp->mac_control.rings[i].napi);
+ } else {
+ napi_enable(&sp->napi);
+ }
+ }
/* Maintain the state prior to the open */
if (sp->promisc_flg)
/* Enable select interrupts */
en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
if (sp->config.intr_type != INTA)
- en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
+ en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
else {
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
interruptible |= TX_PIC_INTR;
rx_ring_num = MAX_RX_RINGS;
}
- if (*dev_intr_type != INTA)
- napi = 0;
-
if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
"Defaulting to INTA\n");
* will use eth_mac_addr() for dev->set_mac_address
* mac address will be set every time dev->open() is called
*/
- netif_napi_add(dev, &sp->napi, s2io_poll, 32);
-
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = s2io_netpoll;
#endif
}
}
+ if (sp->config.intr_type == MSI_X) {
+ sp->num_entries = config->rx_ring_num + 1;
+ ret = s2io_enable_msi_x(sp);
+
+ if (!ret) {
+ ret = s2io_test_msi(sp);
+ /* rollback MSI-X, will re-enable during add_isr() */
+ remove_msix_isr(sp);
+ }
+ if (ret) {
+
+ DBG_PRINT(ERR_DBG,
+ "%s: MSI-X requested but failed to enable\n",
+ dev->name);
+ sp->config.intr_type = INTA;
+ }
+ }
+
+ if (config->intr_type == MSI_X) {
+ for (i = 0; i < config->rx_ring_num ; i++)
+ netif_napi_add(dev, &mac_control->rings[i].napi,
+ s2io_poll_msix, 64);
+ } else {
+ netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
+ }
+
/* Not needed for Herc */
if (sp->device_type & XFRAME_I_DEVICE) {
/*
/* store mac addresses from CAM to s2io_nic structure */
do_s2io_store_unicast_mc(sp);
+ /* Configure MSIX vector for number of rings configured plus one */
+ if ((sp->device_type == XFRAME_II_DEVICE) &&
+ (config->intr_type == MSI_X))
+ sp->num_entries = config->rx_ring_num + 1;
+
/* Store the values of the MSIX table in the s2io_nic structure */
store_xmsi_data(sp);
/* reset Nic and bring it to known state */
break;
}
- if (napi)
+ switch (sp->config.napi) {
+ case 0:
+ DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
+ break;
+ case 1:
DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
+ break;
+ }
DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
sp->config.tx_fifo_num);
/* per-ring buffer counter */
u32 rx_bufs_left;
- #define MAX_LRO_SESSIONS 32
+ #define MAX_LRO_SESSIONS 32
struct lro lro0_n[MAX_LRO_SESSIONS];
u8 lro;
/* copy of sp->pdev pointer */
struct pci_dev *pdev;
+ /* Per ring napi struct */
+ struct napi_struct napi;
+
+ unsigned long interrupt_count;
+
/*
* Place holders for the virtual and physical addresses of
* all the Rx Blocks
/* interface MTU value */
unsigned mtu;
-
+
/* Buffer Address store. */
struct buffAdd **ba;
* Structure to keep track of the MSI-X vectors and the corresponding
* argument registered against each vector
*/
- #define MAX_REQUESTED_MSI_X 17
+ #define MAX_REQUESTED_MSI_X 9
struct s2io_msix_entry
{
u16 vector;
void *arg;
u8 type;
- #define MSIX_FIFO_TYPE 1
- #define MSIX_RING_TYPE 2
+ #define MSIX_ALARM_TYPE 1
+ #define MSIX_RING_TYPE 2
u8 in_use;
#define MSIX_REGISTERED_SUCCESS 0xAA
*/
int pkts_to_process;
struct net_device *dev;
- struct napi_struct napi;
struct mac_info mac_control;
struct config_param config;
struct pci_dev *pdev;
*/
u8 other_fifo_idx;
+ struct napi_struct napi;
/* after blink, the adapter must be restored with original
* values.
*/
unsigned long long start_time;
struct vlan_group *vlgrp;
#define MSIX_FLG 0xA5
+ int num_entries;
struct msix_entry *entries;
int msi_detected;
wait_queue_head_t msi_wait;
u16 lro_max_aggr_per_sess;
volatile unsigned long state;
u64 general_int_mask;
+
#define VPD_STRING_LEN 80
u8 product_name[VPD_STRING_LEN];
u8 serial_num[VPD_STRING_LEN];
static int init_shared_mem(struct s2io_nic *sp);
static void free_shared_mem(struct s2io_nic *sp);
static int init_nic(struct s2io_nic *nic);
- static void rx_intr_handler(struct ring_info *ring_data);
+ static int rx_intr_handler(struct ring_info *ring_data, int budget);
static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link);
static void s2io_reset(struct s2io_nic * sp);
- static int s2io_poll(struct napi_struct *napi, int budget);
+ static int s2io_poll_msix(struct napi_struct *napi, int budget);
+ static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp);
static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
static void s2io_alarm_handle(unsigned long data);
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
- #define ETHER_ALIGN 2
- #define ETHER_ADDR_LEN 6
+ #define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */
spinlock_t sbm_lock; /* spin lock */
int sbm_devflags; /* current device flags */
- int sbm_buffersize;
-
/*
* Controller-specific things
*/
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
int txrx, int maxdescr);
static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
- static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m);
+ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+ struct sk_buff *m);
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
static void sbdma_emptyring(struct sbmacdma *d);
- static void sbdma_fillring(struct sbmacdma *d);
+ static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
int work_to_do, int poll);
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
d->sbdma_remptr = NULL;
}
- static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
+ static inline void sbdma_align_skb(struct sk_buff *skb,
+ unsigned int power2, unsigned int offset)
{
- unsigned long addr;
- unsigned long newaddr;
-
- addr = (unsigned long) skb->data;
-
- newaddr = (addr + power2 - 1) & ~(power2 - 1);
+ unsigned char *addr = skb->data;
+ unsigned char *newaddr = PTR_ALIGN(addr, power2);
- skb_reserve(skb,newaddr-addr+offset);
+ skb_reserve(skb, newaddr - addr + offset);
}
* this queues a buffer for inbound packets.
*
* Input parameters:
- * d - DMA channel descriptor
+ * sc - softc structure
+ * d - DMA channel descriptor
* sb - sk_buff to add, or NULL if we should allocate one
*
* Return value:
********************************************************************* */
- static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
+ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+ struct sk_buff *sb)
{
+ struct net_device *dev = sc->sbm_dev;
struct sbdmadscr *dsc;
struct sbdmadscr *nextdsc;
struct sk_buff *sb_new = NULL;
*/
if (sb == NULL) {
- sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
+ sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
+ SMP_CACHE_BYTES * 2 +
+ NET_IP_ALIGN);
if (sb_new == NULL) {
pr_info("%s: sk_buff allocation failed\n",
d->sbdma_eth->sbm_dev->name);
return -ENOBUFS;
}
- sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
+ sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
}
else {
sb_new = sb;
* Do not interrupt per DMA transfer.
*/
dsc->dscr_a = virt_to_phys(sb_new->data) |
- V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
+ V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
#else
dsc->dscr_a = virt_to_phys(sb_new->data) |
- V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
+ V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
M_DMA_DSCRA_INTERRUPT;
#endif
* with sk_buffs
*
* Input parameters:
- * d - DMA channel
+ * sc - softc structure
+ * d - DMA channel
*
* Return value:
* nothing
********************************************************************* */
- static void sbdma_fillring(struct sbmacdma *d)
+ static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
{
int idx;
- for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
- if (sbdma_add_rcvbuffer(d,NULL) != 0)
+ for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
+ if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
break;
}
}
((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
sc->sbm_imr);
#else
- __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+ __raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
#endif
}
* packet and put it right back on the receive ring.
*/
- if (unlikely (sbdma_add_rcvbuffer(d,NULL) ==
- -ENOBUFS)) {
+ if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
+ -ENOBUFS)) {
dev->stats.rx_dropped++;
- sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
+ /* Re-add old buffer */
+ sbdma_add_rcvbuffer(sc, d, sb);
/* No point in continuing at the moment */
printk(KERN_ERR "dropped packet (1)\n");
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
* put it back on the receive ring.
*/
dev->stats.rx_errors++;
- sbdma_add_rcvbuffer(d,sb);
+ sbdma_add_rcvbuffer(sc, d, sb);
}
* Fill the receive ring
*/
- sbdma_fillring(&(s->sbm_rxdma));
+ sbdma_fillring(s, &(s->sbm_rxdma));
/*
* Turn on the rest of the bits in the enable register
dev->dev_addr[i] = eaddr[i];
}
-
- /*
- * Init packet size
- */
-
- sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
-
/*
* Initialize context (get pointers to registers and stuff), then
* allocate the memory for the descriptor tables.
}
#ifdef SKY2_VLAN_TAG_USED
- static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+ static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
{
- struct sky2_port *sky2 = netdev_priv(dev);
- struct sky2_hw *hw = sky2->hw;
- u16 port = sky2->port;
-
- netif_tx_lock_bh(dev);
- napi_disable(&hw->napi);
-
- sky2->vlgrp = grp;
- if (grp) {
+ if (onoff) {
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF);
}
+ }
+
+ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+ {
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ u16 port = sky2->port;
+
+ netif_tx_lock_bh(dev);
+ napi_disable(&hw->napi);
+
+ sky2->vlgrp = grp;
+ sky2_set_vlan_mode(hw, port, grp != NULL);
sky2_read32(hw, B0_Y2_SP_LISR);
napi_enable(&hw->napi);
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1);
+ #ifdef SKY2_VLAN_TAG_USED
+ sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
+ #endif
+
err = sky2_rx_start(sky2);
if (err)
goto err_out;
gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg);
} else
- gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+ gm_phy_write(hw, port, PHY_MARV_LED_OVER,
PHY_M_LED_MO_DUP(mode) |
PHY_M_LED_MO_10(mode) |
PHY_M_LED_MO_100(mode) |
u16 asb;
u8 __iomem *xl_mmio;
- char *xl_card_name;
+ const char *xl_card_name;
struct pci_dev *pdev ;
spinlock_t xl_lock ;
struct wait_queue *srb_wait;
volatile int asb_queued;
- struct net_device_stats xl_stats ;
-
u16 mac_buffer ;
u16 xl_lan_status ;
u8 xl_ring_speed ;
static void airo_networks_free(struct airo_info *ai);
struct airo_info {
- struct net_device_stats stats;
struct net_device *dev;
struct list_head dev_list;
/* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we
if (npacks >= MAXTXQ - 1) {
netif_stop_queue (dev);
if (npacks > MAXTXQ) {
- ai->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
return 1;
}
skb_queue_tail (&ai->txq, skb);
bap_read(ai, &status, 2, BAP0);
}
if (le16_to_cpu(status) & 2) /* Too many retries */
- ai->stats.tx_aborted_errors++;
+ ai->dev->stats.tx_aborted_errors++;
if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */
- ai->stats.tx_heartbeat_errors++;
+ ai->dev->stats.tx_heartbeat_errors++;
if (le16_to_cpu(status) & 8) /* Aid fail */
{ }
if (le16_to_cpu(status) & 0x10) /* MAC disabled */
- ai->stats.tx_carrier_errors++;
+ ai->dev->stats.tx_carrier_errors++;
if (le16_to_cpu(status) & 0x20) /* Association lost */
{ }
/* We produce a TXDROP event only for retry or lifetime
for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
} else {
priv->fids[fid] &= 0xffff;
- priv->stats.tx_window_errors++;
+ dev->stats.tx_window_errors++;
}
if (i < MAX_FIDS / 2)
netif_wake_queue(dev);
netif_stop_queue(dev);
if (i == MAX_FIDS / 2) {
- priv->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
return 1;
}
}
for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
} else {
priv->fids[fid] &= 0xffff;
- priv->stats.tx_window_errors++;
+ dev->stats.tx_window_errors++;
}
if (i < MAX_FIDS)
netif_wake_queue(dev);
netif_stop_queue(dev);
if (i == MAX_FIDS) {
- priv->stats.tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
return 1;
}
}
return 0;
}
-static void airo_read_stats(struct airo_info *ai)
+static void airo_read_stats(struct net_device *dev)
{
+ struct airo_info *ai = dev->priv;
StatsRid stats_rid;
__le32 *vals = stats_rid.vals;
readStatsRid(ai, &stats_rid, RID_STATS, 0);
up(&ai->sem);
- ai->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) +
+ dev->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) +
le32_to_cpu(vals[45]);
- ai->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) +
+ dev->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) +
le32_to_cpu(vals[41]);
- ai->stats.rx_bytes = le32_to_cpu(vals[92]);
- ai->stats.tx_bytes = le32_to_cpu(vals[91]);
- ai->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) +
+ dev->stats.rx_bytes = le32_to_cpu(vals[92]);
+ dev->stats.tx_bytes = le32_to_cpu(vals[91]);
+ dev->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) +
le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]);
- ai->stats.tx_errors = le32_to_cpu(vals[42]) + ai->stats.tx_fifo_errors;
- ai->stats.multicast = le32_to_cpu(vals[43]);
- ai->stats.collisions = le32_to_cpu(vals[89]);
+ dev->stats.tx_errors = le32_to_cpu(vals[42]) +
+ dev->stats.tx_fifo_errors;
+ dev->stats.multicast = le32_to_cpu(vals[43]);
+ dev->stats.collisions = le32_to_cpu(vals[89]);
/* detailed rx_errors: */
- ai->stats.rx_length_errors = le32_to_cpu(vals[3]);
- ai->stats.rx_crc_errors = le32_to_cpu(vals[4]);
- ai->stats.rx_frame_errors = le32_to_cpu(vals[2]);
- ai->stats.rx_fifo_errors = le32_to_cpu(vals[0]);
+ dev->stats.rx_length_errors = le32_to_cpu(vals[3]);
+ dev->stats.rx_crc_errors = le32_to_cpu(vals[4]);
+ dev->stats.rx_frame_errors = le32_to_cpu(vals[2]);
+ dev->stats.rx_fifo_errors = le32_to_cpu(vals[0]);
}
static struct net_device_stats *airo_get_stats(struct net_device *dev)
set_bit(JOB_STATS, &local->jobs);
wake_up_interruptible(&local->thr_wait);
} else
- airo_read_stats(local);
+ airo_read_stats(dev);
}
- return &local->stats;
+ return &dev->stats;
}
static void airo_set_promisc(struct airo_info *ai) {
dev->irq = ethdev->irq;
dev->base_addr = ethdev->base_addr;
dev->wireless_data = ethdev->wireless_data;
+ SET_NETDEV_DEV(dev, ethdev->dev.parent);
memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
err = register_netdev(dev);
if (err<0) {
static int waitbusy (struct airo_info *ai) {
int delay = 0;
- while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) & (delay < 10000)) {
+ while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
udelay (10);
if ((++delay % 20) == 0)
OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
else if (test_bit(JOB_XMIT11, &ai->jobs))
airo_end_xmit11(dev);
else if (test_bit(JOB_STATS, &ai->jobs))
- airo_read_stats(ai);
+ airo_read_stats(dev);
else if (test_bit(JOB_WSTATS, &ai->jobs))
airo_read_wireless_stats(ai);
else if (test_bit(JOB_PROMISC, &ai->jobs))
skb = dev_alloc_skb( len + hdrlen + 2 + 2 );
if ( !skb ) {
- apriv->stats.rx_dropped++;
+ dev->stats.rx_dropped++;
goto badrx;
}
skb_reserve(skb, 2); /* This way the IP header is aligned */
skb = dev_alloc_skb(len);
if (!skb) {
- ai->stats.rx_dropped++;
+ ai->dev->stats.rx_dropped++;
goto badrx;
}
buffer = skb_put(skb,len);
skb = dev_alloc_skb( len + hdrlen + 2 );
if ( !skb ) {
- ai->stats.rx_dropped++;
+ ai->dev->stats.rx_dropped++;
goto badrx;
}
buffer = (u16*)skb_put (skb, len + hdrlen);
if (shouldsleep) {
lbs_deb_thread("sleeping, connect_status %d, "
- "ps_mode %d, ps_state %d\n",
+ "psmode %d, psstate %d\n",
priv->connect_status,
priv->psmode, priv->psstate);
spin_unlock_irq(&priv->driver_lock);
priv->nr_retries = 0;
} else {
priv->cur_cmd = NULL;
+ priv->dnld_sent = DNLD_RES_RECEIVED;
lbs_pr_info("requeueing command %x due to timeout (#%d)\n",
le16_to_cpu(cmdnode->cmdbuf->command), priv->nr_retries);
}
EXPORT_SYMBOL_GPL(lbs_suspend);
-int lbs_resume(struct lbs_private *priv)
+void lbs_resume(struct lbs_private *priv)
{
lbs_deb_enter(LBS_DEB_FW);
netif_device_attach(priv->mesh_dev);
lbs_deb_leave(LBS_DEB_FW);
- return 0;
}
EXPORT_SYMBOL_GPL(lbs_resume);
*/
memset(priv->current_addr, 0xff, ETH_ALEN);
ret = lbs_update_hw_spec(priv);
- if (ret) {
- ret = -1;
+ if (ret)
goto done;
- }
lbs_set_mac_control(priv);
-
- ret = lbs_get_data_rate(priv);
- if (ret < 0) {
- ret = -1;
- goto done;
- }
-
- ret = 0;
done:
lbs_deb_leave_args(LBS_DEB_FW, "ret %d", ret);
return ret;
EXPORT_SYMBOL_GPL(lbs_add_card);
-int lbs_remove_card(struct lbs_private *priv)
+void lbs_remove_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
union iwreq_data wrqu;
dev = priv->dev;
- cancel_delayed_work(&priv->scan_work);
- cancel_delayed_work(&priv->assoc_work);
+ cancel_delayed_work_sync(&priv->scan_work);
+ cancel_delayed_work_sync(&priv->assoc_work);
destroy_workqueue(priv->work_thread);
if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
free_netdev(dev);
lbs_deb_leave(LBS_DEB_MAIN);
- return 0;
}
EXPORT_SYMBOL_GPL(lbs_remove_card);
EXPORT_SYMBOL_GPL(lbs_start_card);
-int lbs_stop_card(struct lbs_private *priv)
+void lbs_stop_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
- int ret = -1;
struct cmd_ctrl_node *cmdnode;
unsigned long flags;
lbs_deb_enter(LBS_DEB_MAIN);
+ if (!priv)
+ goto out;
+
netif_stop_queue(priv->dev);
netif_carrier_off(priv->dev);
device_remove_file(&dev->dev, &dev_attr_lbs_mesh);
/* Flush pending command nodes */
+ del_timer_sync(&priv->command_timer);
spin_lock_irqsave(&priv->driver_lock, flags);
list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
cmdnode->result = -ENOENT;
unregister_netdev(dev);
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
- return ret;
+out:
+ lbs_deb_leave(LBS_DEB_MAIN);
}
EXPORT_SYMBOL_GPL(lbs_stop_card);
{
lbs_deb_enter(LBS_DEB_MAIN);
if (priv->rtap_net_dev == NULL)
- return;
+ goto out;
unregister_netdev(priv->rtap_net_dev);
free_netdev(priv->rtap_net_dev);
priv->rtap_net_dev = NULL;
+out:
lbs_deb_leave(LBS_DEB_MAIN);
}
rtap_dev->hard_start_xmit = lbs_rtap_hard_start_xmit;
rtap_dev->set_multicast_list = lbs_set_multicast_list;
rtap_dev->priv = priv;
+ SET_NETDEV_DEV(rtap_dev, priv->dev->dev.parent);
ret = register_netdev(rtap_dev);
if (ret) {
u8 data[4];
struct usb_ctrlrequest dr;
} *buf;
+ int rc;
buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
if (!buf)
usb_fill_control_urb(urb, priv->udev, usb_sndctrlpipe(priv->udev, 0),
(unsigned char *)dr, buf, len,
rtl8187_iowrite_async_cb, buf);
- usb_submit_urb(urb, GFP_ATOMIC);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc < 0) {
+ kfree(buf);
+ usb_free_urb(urb);
+ }
}
static inline void rtl818x_iowrite32_async(struct rtl8187_priv *priv,
struct urb *urb;
__le16 rts_dur = 0;
u32 flags;
+ int rc;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
info->dev = dev;
usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
hdr, skb->len, rtl8187_tx_cb, skb);
- usb_submit_urb(urb, GFP_ATOMIC);
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (rc < 0) {
+ usb_free_urb(urb);
+ kfree_skb(skb);
+ }
return 0;
}
}
rx_status.antenna = (hdr->signal >> 7) & 1;
- rx_status.signal = 64 - min(hdr->noise, (u8)64);
- rx_status.ssi = signal;
+ rx_status.qual = 64 - min(hdr->noise, (u8)64);
+ rx_status.signal = signal;
rx_status.rate_idx = rate;
rx_status.freq = dev->conf.channel->center_freq;
rx_status.band = dev->conf.channel->band;
priv->mode = IEEE80211_IF_TYPE_MNTR;
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_RX_INCLUDES_FCS;
+ IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_SIGNAL_UNSPEC;
dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr);
dev->queues = 1;
- dev->max_rssi = 65;
- dev->max_signal = 64;
+ dev->max_signal = 65;
eeprom.data = dev;
eeprom.register_read = rtl8187_eeprom_register_read;
u8 bss_op_mode; /* use IEEE80211_HT_IE_ */
};
+/**
+ * enum ieee80211_max_queues - maximum number of queues
+ *
+ * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
+ * @IEEE80211_MAX_AMPDU_QUEUES: Maximum number of queues usable
+ * for A-MPDU operation.
+ */
+enum ieee80211_max_queues {
+ IEEE80211_MAX_QUEUES = 16,
+ IEEE80211_MAX_AMPDU_QUEUES = 16,
+};
+
/**
* struct ieee80211_tx_queue_params - transmit queue configuration
*
};
/**
- * struct ieee80211_tx_queue_stats_data - transmit queue statistics
+ * struct ieee80211_tx_queue_stats - transmit queue statistics
*
* @len: number of packets in queue
* @limit: queue length limit
* @count: number of frames sent
*/
-struct ieee80211_tx_queue_stats_data {
+struct ieee80211_tx_queue_stats {
unsigned int len;
unsigned int limit;
unsigned int count;
};
-/**
- * enum ieee80211_tx_queue - transmit queue number
- *
- * These constants are used with some callbacks that take a
- * queue number to set parameters for a queue.
- *
- * @IEEE80211_TX_QUEUE_DATA0: data queue 0
- * @IEEE80211_TX_QUEUE_DATA1: data queue 1
- * @IEEE80211_TX_QUEUE_DATA2: data queue 2
- * @IEEE80211_TX_QUEUE_DATA3: data queue 3
- * @IEEE80211_TX_QUEUE_DATA4: data queue 4
- * @IEEE80211_TX_QUEUE_SVP: ??
- * @NUM_TX_DATA_QUEUES: number of data queues
- * @IEEE80211_TX_QUEUE_AFTER_BEACON: transmit queue for frames to be
- * sent after a beacon
- * @IEEE80211_TX_QUEUE_BEACON: transmit queue for beacon frames
- * @NUM_TX_DATA_QUEUES_AMPDU: adding more queues for A-MPDU
- */
-enum ieee80211_tx_queue {
- IEEE80211_TX_QUEUE_DATA0,
- IEEE80211_TX_QUEUE_DATA1,
- IEEE80211_TX_QUEUE_DATA2,
- IEEE80211_TX_QUEUE_DATA3,
- IEEE80211_TX_QUEUE_DATA4,
- IEEE80211_TX_QUEUE_SVP,
-
- NUM_TX_DATA_QUEUES,
-
-/* due to stupidity in the sub-ioctl userspace interface, the items in
- * this struct need to have fixed values. As soon as it is removed, we can
- * fix these entries. */
- IEEE80211_TX_QUEUE_AFTER_BEACON = 6,
- IEEE80211_TX_QUEUE_BEACON = 7,
- NUM_TX_DATA_QUEUES_AMPDU = 16
-};
-
-struct ieee80211_tx_queue_stats {
- struct ieee80211_tx_queue_stats_data data[NUM_TX_DATA_QUEUES_AMPDU];
-};
-
struct ieee80211_low_level_stats {
unsigned int dot11ACKFailureCount;
unsigned int dot11RTSFailureCount;
/* Transmit control fields. This data structure is passed to low-level driver
* with each TX frame. The low-level driver is responsible for configuring
- * the hardware to use given values (depending on what is supported). */
-
+ * the hardware to use given values (depending on what is supported).
+ *
+ * NOTE: Be careful with using the pointers outside of the ieee80211_ops->tx()
+ * context (i.e. when defering the work to a workqueue).
+ * The vif pointer is valid until the it has been removed with the
+ * ieee80211_ops->remove_interface() callback funtion.
+ * The hw_key pointer is valid until it has been removed with the
+ * ieee80211_ops->set_key() callback function.
+ * The tx_rate and alt_retry_rate pointers are valid until the phy is
+ * deregistered.
+ */
struct ieee80211_tx_control {
struct ieee80211_vif *vif;
struct ieee80211_rate *tx_rate;
/* retry rate for the last retries */
struct ieee80211_rate *alt_retry_rate;
+ /* Key used for hardware encryption
+ * NULL if IEEE80211_TXCTL_DO_NOT_ENCRYPT is set */
+ struct ieee80211_key_conf *hw_key;
+
u32 flags; /* tx control flags defined above */
- u8 key_idx; /* keyidx from hw->set_key(), undefined if
- * IEEE80211_TXCTL_DO_NOT_ENCRYPT is set */
u8 retry_limit; /* 1 = only first attempt, 2 = one retry, ..
* This could be used when set_retry_limit
* is not implemented by the driver */
* position represents antenna number used */
u8 icv_len; /* length of the ICV/MIC field in octets */
u8 iv_len; /* length of the IV field in octets */
- u8 queue; /* hardware queue to use for this frame;
+ u16 queue; /* hardware queue to use for this frame;
* 0 = highest, hw->queues-1 = lowest */
u16 aid; /* Station AID */
int type; /* internal */
* The low-level driver should provide this information (the subset
* supported by hardware) to the 802.11 code with each received
* frame.
+ *
* @mactime: value in microseconds of the 64-bit Time Synchronization Function
* (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
* @band: the active band when this frame was received
* @freq: frequency the radio was tuned to when receiving this frame, in MHz
- * @ssi: signal strength when receiving this frame
- * @signal: used as 'qual' in statistics reporting
- * @noise: PHY noise when receiving this frame
+ * @signal: signal strength when receiving this frame, either in dBm, in dB or
+ * unspecified depending on the hardware capabilities flags
+ * @IEEE80211_HW_SIGNAL_*
+ * @noise: noise when receiving this frame, in dBm.
+ * @qual: overall signal quality indication, in percent (0-100).
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates
* @flag: %RX_FLAG_*
u64 mactime;
enum ieee80211_band band;
int freq;
- int ssi;
int signal;
int noise;
+ int qual;
int antenna;
int rate_idx;
int flag;
* relevant only if IEEE80211_TX_STATUS_AMPDU was set.
* @ampdu_ack_map: block ack bit map for the aggregation.
* relevant only if IEEE80211_TX_STATUS_AMPDU was set.
- * @ack_signal: signal strength of the ACK frame
- * @queue_length: ?? REMOVE
- * @queue_number: ?? REMOVE
+ * @ack_signal: signal strength of the ACK frame either in dBm, dB or unspec
+ * depending on hardware capabilites flags @IEEE80211_HW_SIGNAL_*
*/
struct ieee80211_tx_status {
struct ieee80211_tx_control control;
u8 ampdu_ack_len;
u64 ampdu_ack_map;
int ack_signal;
- int queue_length;
- int queue_number;
};
/**
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
+ * @IEEE80211_KEY_FLAG_PAIRWISE: Set by mac80211, this flag indicates
+ * that the key is pairwise rather then a shared key.
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1,
IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
+ IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
};
/**
* @IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE:
* Hardware is not capable of receiving frames with short preamble on
* the 2.4 GHz band.
+ *
+ * @IEEE80211_HW_SIGNAL_UNSPEC:
+ * Hardware can provide signal values but we don't know its units. We
+ * expect values between 0 and @max_signal.
+ * If possible please provide dB or dBm instead.
+ *
+ * @IEEE80211_HW_SIGNAL_DB:
+ * Hardware gives signal values in dB, decibel difference from an
+ * arbitrary, fixed reference. We expect values between 0 and @max_signal.
+ * If possible please provide dBm instead.
+ *
+ * @IEEE80211_HW_SIGNAL_DBM:
+ * Hardware gives signal values in dBm, decibel difference from
+ * one milliwatt. This is the preferred method since it is standardized
+ * between different devices. @max_signal does not need to be set.
+ *
+ * @IEEE80211_HW_NOISE_DBM:
+ * Hardware can provide noise (radio interference) values in units dBm,
+ * decibel difference from one milliwatt.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE = 1<<0,
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING = 1<<2,
IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE = 1<<3,
IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE = 1<<4,
+ IEEE80211_HW_SIGNAL_UNSPEC = 1<<5,
+ IEEE80211_HW_SIGNAL_DB = 1<<6,
+ IEEE80211_HW_SIGNAL_DBM = 1<<7,
+ IEEE80211_HW_NOISE_DBM = 1<<8,
};
/**
*
* @channel_change_time: time (in microseconds) it takes to change channels.
*
- * @max_rssi: Maximum value for ssi in RX information, use
- * negative numbers for dBm and 0 to indicate no support.
- *
- * @max_signal: like @max_rssi, but for the signal value.
- *
- * @max_noise: like @max_rssi, but for the noise value.
+ * @max_signal: Maximum value for signal (rssi) in RX information, used
+ * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
*
* @queues: number of available hardware transmit queues for
- * data packets. WMM/QoS requires at least four.
+ * data packets. WMM/QoS requires at least four, these
+ * queues need to have configurable access parameters.
+ *
+ * @ampdu_queues: number of available hardware transmit queues
+ * for A-MPDU packets, these have no access parameters
+ * because they're used only for A-MPDU frames. Note that
+ * mac80211 will not currently use any of the regular queues
+ * for aggregation.
*
* @rate_control_algorithm: rate control algorithm for this hardware.
* If unset (NULL), the default algorithm will be used. Must be
unsigned int extra_tx_headroom;
int channel_change_time;
int vif_data_size;
- u8 queues;
- s8 max_rssi;
+ u16 queues, ampdu_queues;
s8 max_signal;
- s8 max_noise;
};
/**
* of assocaited station or AP.
*
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
- * bursting) for a hardware TX queue. The @queue parameter uses the
- * %IEEE80211_TX_QUEUE_* constants. Must be atomic.
+ * bursting) for a hardware TX queue. Must be atomic.
*
* @get_tx_stats: Get statistics of the current TX queue status. This is used
* to get number of currently queued packets (queue length), maximum queue
* size (limit), and total number of packets sent using each TX queue
- * (count). This information is used for WMM to find out which TX
- * queues have room for more packets and by hostapd to provide
- * statistics about the current queueing state to external programs.
+ * (count). The 'stats' pointer points to an array that has hw->queues +
+ * hw->ampdu_queues items.
*
* @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
* this is only used for IBSS mode debugging and, as such, is not a
u32 short_retry, u32 long_retr);
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, const u8 *addr);
- int (*conf_tx)(struct ieee80211_hw *hw, int queue,
+ int (*conf_tx)(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *params);
int (*get_tx_stats)(struct ieee80211_hw *hw,
struct ieee80211_tx_queue_stats *stats);
void ieee80211_scan_completed(struct ieee80211_hw *hw);
/**
- * ieee80211_iterate_active_interfaces - iterate active interfaces
+ * ieee80211_iterate_active_interfaces- iterate active interfaces
*
* This function iterates over the interfaces associated with a given
* hardware that are currently active and calls the callback for them.
+ * This function allows the iterator function to sleep, when the iterator
+ * function is atomic @ieee80211_iterate_active_interfaces_atomic can
+ * be used.
*
* @hw: the hardware struct of which the interfaces should be iterated over
- * @iterator: the iterator function to call, cannot sleep
+ * @iterator: the iterator function to call
* @data: first argument of the iterator function
*/
void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw,
struct ieee80211_vif *vif),
void *data);
+ /**
+ * ieee80211_iterate_active_interfaces_atomic - iterate active interfaces
+ *
+ * This function iterates over the interfaces associated with a given
+ * hardware that are currently active and calls the callback for them.
+ * This function requires the iterator callback function to be atomic,
+ * if that is not desired, use @ieee80211_iterate_active_interfaces instead.
+ *
+ * @hw: the hardware struct of which the interfaces should be iterated over
+ * @iterator: the iterator function to call, cannot sleep
+ * @data: first argument of the iterator function
+ */
+ void ieee80211_iterate_active_interfaces_atomic(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ u8 *mac,
+ struct ieee80211_vif *vif),
+ void *data);
+
/**
* ieee80211_start_tx_ba_session - Start a tx Block Ack session.
* @hw: pointer as obtained from ieee80211_alloc_hw().
static void ipgre_err(struct sk_buff *skb, u32 info)
{
- #ifndef I_WISH_WORLD_WERE_PERFECT
- /* It is not :-( All the routers (except for Linux) return only
+ /* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
out:
read_unlock(&ipgre_lock);
return;
- #else
- struct iphdr *iph = (struct iphdr*)dp;
- struct iphdr *eiph;
- __be16 *p = (__be16*)(dp+(iph->ihl<<2));
- const int type = icmp_hdr(skb)->type;
- const int code = icmp_hdr(skb)->code;
- int rel_type = 0;
- int rel_code = 0;
- __be32 rel_info = 0;
- __u32 n = 0;
- __be16 flags;
- int grehlen = (iph->ihl<<2) + 4;
- struct sk_buff *skb2;
- struct flowi fl;
- struct rtable *rt;
-
- if (p[1] != htons(ETH_P_IP))
- return;
-
- flags = p[0];
- if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
- if (flags&(GRE_VERSION|GRE_ROUTING))
- return;
- if (flags&GRE_CSUM)
- grehlen += 4;
- if (flags&GRE_KEY)
- grehlen += 4;
- if (flags&GRE_SEQ)
- grehlen += 4;
- }
- if (len < grehlen + sizeof(struct iphdr))
- return;
- eiph = (struct iphdr*)(dp + grehlen);
-
- switch (type) {
- default:
- return;
- case ICMP_PARAMETERPROB:
- n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
- if (n < (iph->ihl<<2))
- return;
-
- /* So... This guy found something strange INSIDE encapsulated
- packet. Well, he is fool, but what can we do ?
- */
- rel_type = ICMP_PARAMETERPROB;
- n -= grehlen;
- rel_info = htonl(n << 24);
- break;
-
- case ICMP_DEST_UNREACH:
- switch (code) {
- case ICMP_SR_FAILED:
- case ICMP_PORT_UNREACH:
- /* Impossible event. */
- return;
- case ICMP_FRAG_NEEDED:
- /* And it is the only really necessary thing :-) */
- n = ntohs(icmp_hdr(skb)->un.frag.mtu);
- if (n < grehlen+68)
- return;
- n -= grehlen;
- /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
- if (n > ntohs(eiph->tot_len))
- return;
- rel_info = htonl(n);
- break;
- default:
- /* All others are translated to HOST_UNREACH.
- rfc2003 contains "deep thoughts" about NET_UNREACH,
- I believe, it is just ether pollution. --ANK
- */
- rel_type = ICMP_DEST_UNREACH;
- rel_code = ICMP_HOST_UNREACH;
- break;
- }
- break;
- case ICMP_TIME_EXCEEDED:
- if (code != ICMP_EXC_TTL)
- return;
- break;
- }
-
- /* Prepare fake skb to feed it to icmp_send */
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL)
- return;
- dst_release(skb2->dst);
- skb2->dst = NULL;
- skb_pull(skb2, skb->data - (u8*)eiph);
- skb_reset_network_header(skb2);
-
- /* Try to guess incoming interface */
- memset(&fl, 0, sizeof(fl));
- fl.fl4_dst = eiph->saddr;
- fl.fl4_tos = RT_TOS(eiph->tos);
- fl.proto = IPPROTO_GRE;
- if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) {
- kfree_skb(skb2);
- return;
- }
- skb2->dev = rt->u.dst.dev;
-
- /* route "incoming" packet */
- if (rt->rt_flags&RTCF_LOCAL) {
- ip_rt_put(rt);
- rt = NULL;
- fl.fl4_dst = eiph->daddr;
- fl.fl4_src = eiph->saddr;
- fl.fl4_tos = eiph->tos;
- if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
- rt->u.dst.dev->type != ARPHRD_IPGRE) {
- ip_rt_put(rt);
- kfree_skb(skb2);
- return;
- }
- } else {
- ip_rt_put(rt);
- if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
- skb2->dst->dev->type != ARPHRD_IPGRE) {
- kfree_skb(skb2);
- return;
- }
- }
-
- /* change mtu on this route */
- if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- if (n > dst_mtu(skb2->dst)) {
- kfree_skb(skb2);
- return;
- }
- skb2->dst->ops->update_pmtu(skb2->dst, n);
- } else if (type == ICMP_TIME_EXCEEDED) {
- struct ip_tunnel *t = netdev_priv(skb2->dev);
- if (t->parms.iph.ttl) {
- rel_type = ICMP_DEST_UNREACH;
- rel_code = ICMP_HOST_UNREACH;
- }
- }
-
- icmp_send(skb2, rel_type, rel_code, rel_info);
- kfree_skb(skb2);
- #endif
}
static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
read_lock(&ipgre_lock);
if ((tunnel = ipgre_tunnel_lookup(dev_net(skb->dev),
iph->saddr, iph->daddr, key)) != NULL) {
+ struct net_device_stats *stats = &tunnel->dev->stats;
+
secpath_reset(skb);
skb->protocol = *(__be16*)(h + 2);
/* Looped back packet, drop it! */
if (skb->rtable->fl.iif == 0)
goto drop;
- tunnel->stat.multicast++;
+ stats->multicast++;
skb->pkt_type = PACKET_BROADCAST;
}
#endif
if (((flags&GRE_CSUM) && csum) ||
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
- tunnel->stat.rx_crc_errors++;
- tunnel->stat.rx_errors++;
+ stats->rx_crc_errors++;
+ stats->rx_errors++;
goto drop;
}
if (tunnel->parms.i_flags&GRE_SEQ) {
if (!(flags&GRE_SEQ) ||
(tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
- tunnel->stat.rx_fifo_errors++;
- tunnel->stat.rx_errors++;
+ stats->rx_fifo_errors++;
+ stats->rx_errors++;
goto drop;
}
tunnel->i_seqno = seqno + 1;
}
- tunnel->stat.rx_packets++;
- tunnel->stat.rx_bytes += skb->len;
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
skb->dev = tunnel->dev;
dst_release(skb->dst);
skb->dst = NULL;
static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &tunnel->stat;
+ struct net_device_stats *stats = &tunnel->dev->stats;
struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *tiph;
u8 tos;
int mtu;
if (tunnel->recursion++) {
- tunnel->stat.collisions++;
+ stats->collisions++;
goto tx_error;
}
/* NBMA tunnel */
if (skb->dst == NULL) {
- tunnel->stat.tx_fifo_errors++;
+ stats->tx_fifo_errors++;
goto tx_error;
}
.tos = RT_TOS(tos) } },
.proto = IPPROTO_GRE };
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- tunnel->stat.tx_carrier_errors++;
+ stats->tx_carrier_errors++;
goto tx_error;
}
}
if (tdev == dev) {
ip_rt_put(rt);
- tunnel->stat.collisions++;
+ stats->collisions++;
goto tx_error;
}
return err;
}
-static struct net_device_stats *ipgre_tunnel_get_stats(struct net_device *dev)
-{
- return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
-}
-
static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
dev->uninit = ipgre_tunnel_uninit;
dev->destructor = free_netdev;
dev->hard_start_xmit = ipgre_tunnel_xmit;
- dev->get_stats = ipgre_tunnel_get_stats;
dev->do_ioctl = ipgre_tunnel_ioctl;
dev->change_mtu = ipgre_tunnel_change_mtu;
static int ipip_err(struct sk_buff *skb, u32 info)
{
- #ifndef I_WISH_WORLD_WERE_PERFECT
- /* It is not :-( All the routers (except for Linux) return only
+ /* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
*/
out:
read_unlock(&ipip_lock);
return err;
- #else
- struct iphdr *iph = (struct iphdr*)dp;
- int hlen = iph->ihl<<2;
- struct iphdr *eiph;
- const int type = icmp_hdr(skb)->type;
- const int code = icmp_hdr(skb)->code;
- int rel_type = 0;
- int rel_code = 0;
- __be32 rel_info = 0;
- __u32 n = 0;
- struct sk_buff *skb2;
- struct flowi fl;
- struct rtable *rt;
-
- if (len < hlen + sizeof(struct iphdr))
- return 0;
- eiph = (struct iphdr*)(dp + hlen);
-
- switch (type) {
- default:
- return 0;
- case ICMP_PARAMETERPROB:
- n = ntohl(icmp_hdr(skb)->un.gateway) >> 24;
- if (n < hlen)
- return 0;
-
- /* So... This guy found something strange INSIDE encapsulated
- packet. Well, he is fool, but what can we do ?
- */
- rel_type = ICMP_PARAMETERPROB;
- rel_info = htonl((n - hlen) << 24);
- break;
-
- case ICMP_DEST_UNREACH:
- switch (code) {
- case ICMP_SR_FAILED:
- case ICMP_PORT_UNREACH:
- /* Impossible event. */
- return 0;
- case ICMP_FRAG_NEEDED:
- /* And it is the only really necessary thing :-) */
- n = ntohs(icmp_hdr(skb)->un.frag.mtu);
- if (n < hlen+68)
- return 0;
- n -= hlen;
- /* BSD 4.2 MORE DOES NOT EXIST IN NATURE. */
- if (n > ntohs(eiph->tot_len))
- return 0;
- rel_info = htonl(n);
- break;
- default:
- /* All others are translated to HOST_UNREACH.
- rfc2003 contains "deep thoughts" about NET_UNREACH,
- I believe, it is just ether pollution. --ANK
- */
- rel_type = ICMP_DEST_UNREACH;
- rel_code = ICMP_HOST_UNREACH;
- break;
- }
- break;
- case ICMP_TIME_EXCEEDED:
- if (code != ICMP_EXC_TTL)
- return 0;
- break;
- }
-
- /* Prepare fake skb to feed it to icmp_send */
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL)
- return 0;
- dst_release(skb2->dst);
- skb2->dst = NULL;
- skb_pull(skb2, skb->data - (u8*)eiph);
- skb_reset_network_header(skb2);
-
- /* Try to guess incoming interface */
- memset(&fl, 0, sizeof(fl));
- fl.fl4_daddr = eiph->saddr;
- fl.fl4_tos = RT_TOS(eiph->tos);
- fl.proto = IPPROTO_IPIP;
- if (ip_route_output_key(dev_net(skb->dev), &rt, &key)) {
- kfree_skb(skb2);
- return 0;
- }
- skb2->dev = rt->u.dst.dev;
-
- /* route "incoming" packet */
- if (rt->rt_flags&RTCF_LOCAL) {
- ip_rt_put(rt);
- rt = NULL;
- fl.fl4_daddr = eiph->daddr;
- fl.fl4_src = eiph->saddr;
- fl.fl4_tos = eiph->tos;
- if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
- rt->u.dst.dev->type != ARPHRD_TUNNEL) {
- ip_rt_put(rt);
- kfree_skb(skb2);
- return 0;
- }
- } else {
- ip_rt_put(rt);
- if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, skb2->dev) ||
- skb2->dst->dev->type != ARPHRD_TUNNEL) {
- kfree_skb(skb2);
- return 0;
- }
- }
-
- /* change mtu on this route */
- if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- if (n > dst_mtu(skb2->dst)) {
- kfree_skb(skb2);
- return 0;
- }
- skb2->dst->ops->update_pmtu(skb2->dst, n);
- } else if (type == ICMP_TIME_EXCEEDED) {
- struct ip_tunnel *t = netdev_priv(skb2->dev);
- if (t->parms.iph.ttl) {
- rel_type = ICMP_DEST_UNREACH;
- rel_code = ICMP_HOST_UNREACH;
- }
- }
-
- icmp_send(skb2, rel_type, rel_code, rel_info);
- kfree_skb(skb2);
- return 0;
- #endif
}
static inline void ipip_ecn_decapsulate(const struct iphdr *outer_iph,
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
- tunnel->stat.rx_packets++;
- tunnel->stat.rx_bytes += skb->len;
+ tunnel->dev->stats.rx_packets++;
+ tunnel->dev->stats.rx_bytes += skb->len;
skb->dev = tunnel->dev;
dst_release(skb->dst);
skb->dst = NULL;
static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &tunnel->stat;
+ struct net_device_stats *stats = &tunnel->dev->stats;
struct iphdr *tiph = &tunnel->parms.iph;
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
int mtu;
if (tunnel->recursion++) {
- tunnel->stat.collisions++;
+ stats->collisions++;
goto tx_error;
}
if (!dst) {
/* NBMA tunnel */
if ((rt = skb->rtable) == NULL) {
- tunnel->stat.tx_fifo_errors++;
+ stats->tx_fifo_errors++;
goto tx_error;
}
if ((dst = rt->rt_gateway) == 0)
.tos = RT_TOS(tos) } },
.proto = IPPROTO_IPIP };
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- tunnel->stat.tx_carrier_errors++;
+ stats->tx_carrier_errors++;
goto tx_error_icmp;
}
}
if (tdev == dev) {
ip_rt_put(rt);
- tunnel->stat.collisions++;
+ stats->collisions++;
goto tx_error;
}
mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
if (mtu < 68) {
- tunnel->stat.collisions++;
+ stats->collisions++;
ip_rt_put(rt);
goto tx_error;
}
return err;
}
-static struct net_device_stats *ipip_tunnel_get_stats(struct net_device *dev)
-{
- return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
-}
-
static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr))
{
dev->uninit = ipip_tunnel_uninit;
dev->hard_start_xmit = ipip_tunnel_xmit;
- dev->get_stats = ipip_tunnel_get_stats;
dev->do_ioctl = ipip_tunnel_ioctl;
dev->change_mtu = ipip_tunnel_change_mtu;
dev->destructor = free_netdev;
static int ipip6_err(struct sk_buff *skb, u32 info)
{
- #ifndef I_WISH_WORLD_WERE_PERFECT
- /* It is not :-( All the routers (except for Linux) return only
+ /* All the routers (except for Linux) return only
8 bytes of packet payload. It means, that precise relaying of
ICMP in the real Internet is absolutely infeasible.
*/
out:
read_unlock(&ipip6_lock);
return err;
- #else
- struct iphdr *iph = (struct iphdr*)dp;
- int hlen = iph->ihl<<2;
- struct ipv6hdr *iph6;
- const int type = icmp_hdr(skb)->type;
- const int code = icmp_hdr(skb)->code;
- int rel_type = 0;
- int rel_code = 0;
- int rel_info = 0;
- struct sk_buff *skb2;
- struct rt6_info *rt6i;
-
- if (len < hlen + sizeof(struct ipv6hdr))
- return;
- iph6 = (struct ipv6hdr*)(dp + hlen);
-
- switch (type) {
- default:
- return;
- case ICMP_PARAMETERPROB:
- if (icmp_hdr(skb)->un.gateway < hlen)
- return;
-
- /* So... This guy found something strange INSIDE encapsulated
- packet. Well, he is fool, but what can we do ?
- */
- rel_type = ICMPV6_PARAMPROB;
- rel_info = icmp_hdr(skb)->un.gateway - hlen;
- break;
-
- case ICMP_DEST_UNREACH:
- switch (code) {
- case ICMP_SR_FAILED:
- case ICMP_PORT_UNREACH:
- /* Impossible event. */
- return;
- case ICMP_FRAG_NEEDED:
- /* Too complicated case ... */
- return;
- default:
- /* All others are translated to HOST_UNREACH.
- rfc2003 contains "deep thoughts" about NET_UNREACH,
- I believe, it is just ether pollution. --ANK
- */
- rel_type = ICMPV6_DEST_UNREACH;
- rel_code = ICMPV6_ADDR_UNREACH;
- break;
- }
- break;
- case ICMP_TIME_EXCEEDED:
- if (code != ICMP_EXC_TTL)
- return;
- rel_type = ICMPV6_TIME_EXCEED;
- rel_code = ICMPV6_EXC_HOPLIMIT;
- break;
- }
-
- /* Prepare fake skb to feed it to icmpv6_send */
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (skb2 == NULL)
- return 0;
- dst_release(skb2->dst);
- skb2->dst = NULL;
- skb_pull(skb2, skb->data - (u8*)iph6);
- skb_reset_network_header(skb2);
-
- /* Try to guess incoming interface */
- rt6i = rt6_lookup(dev_net(skb->dev), &iph6->saddr, NULL, NULL, 0);
- if (rt6i && rt6i->rt6i_dev) {
- skb2->dev = rt6i->rt6i_dev;
-
- rt6i = rt6_lookup(dev_net(skb->dev),
- &iph6->daddr, &iph6->saddr, NULL, 0);
-
- if (rt6i && rt6i->rt6i_dev && rt6i->rt6i_dev->type == ARPHRD_SIT) {
- struct ip_tunnel *t = netdev_priv(rt6i->rt6i_dev);
- if (rel_type == ICMPV6_TIME_EXCEED && t->parms.iph.ttl) {
- rel_type = ICMPV6_DEST_UNREACH;
- rel_code = ICMPV6_ADDR_UNREACH;
- }
- icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);
- }
- }
- kfree_skb(skb2);
- return 0;
- #endif
}
static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
!isatap_chksrc(skb, iph, tunnel)) {
- tunnel->stat.rx_errors++;
+ tunnel->dev->stats.rx_errors++;
read_unlock(&ipip6_lock);
kfree_skb(skb);
return 0;
}
- tunnel->stat.rx_packets++;
- tunnel->stat.rx_bytes += skb->len;
+ tunnel->dev->stats.rx_packets++;
+ tunnel->dev->stats.rx_bytes += skb->len;
skb->dev = tunnel->dev;
dst_release(skb->dst);
skb->dst = NULL;
static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &tunnel->stat;
+ struct net_device_stats *stats = &tunnel->dev->stats;
struct iphdr *tiph = &tunnel->parms.iph;
struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
int addr_type;
if (tunnel->recursion++) {
- tunnel->stat.collisions++;
+ stats->collisions++;
goto tx_error;
}
.oif = tunnel->parms.link,
.proto = IPPROTO_IPV6 };
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- tunnel->stat.tx_carrier_errors++;
+ stats->tx_carrier_errors++;
goto tx_error_icmp;
}
}
if (rt->rt_type != RTN_UNICAST) {
ip_rt_put(rt);
- tunnel->stat.tx_carrier_errors++;
+ stats->tx_carrier_errors++;
goto tx_error_icmp;
}
tdev = rt->u.dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
- tunnel->stat.collisions++;
+ stats->collisions++;
goto tx_error;
}
mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu;
if (mtu < 68) {
- tunnel->stat.collisions++;
+ stats->collisions++;
ip_rt_put(rt);
goto tx_error;
}
return err;
}
-static struct net_device_stats *ipip6_tunnel_get_stats(struct net_device *dev)
-{
- return &(((struct ip_tunnel*)netdev_priv(dev))->stat);
-}
-
static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
dev->uninit = ipip6_tunnel_uninit;
dev->destructor = free_netdev;
dev->hard_start_xmit = ipip6_tunnel_xmit;
- dev->get_stats = ipip6_tunnel_get_stats;
dev->do_ioctl = ipip6_tunnel_ioctl;
dev->change_mtu = ipip6_tunnel_change_mtu;
u8 *ssid, size_t ssid_len);
static int ieee80211_sta_config_auth(struct net_device *dev,
struct ieee80211_if_sta *ifsta);
+static void sta_rx_agg_session_timer_expired(unsigned long data);
void ieee802_11_parse_elems(u8 *start, size_t len,
qparam.cw_max = 1023;
qparam.txop = 0;
- for (i = IEEE80211_TX_QUEUE_DATA0; i < NUM_TX_DATA_QUEUES; i++)
- local->ops->conf_tx(local_to_hw(local),
- i + IEEE80211_TX_QUEUE_DATA0,
- &qparam);
-
- if (ibss) {
- /* IBSS uses different parameters for Beacon sending */
- qparam.cw_min++;
- qparam.cw_min *= 2;
- qparam.cw_min--;
- local->ops->conf_tx(local_to_hw(local),
- IEEE80211_TX_QUEUE_BEACON, &qparam);
- }
+ for (i = 0; i < local_to_hw(local)->queues; i++)
+ local->ops->conf_tx(local_to_hw(local), i, &qparam);
}
}
int count;
u8 *pos;
+ if (!(ifsta->flags & IEEE80211_STA_WMM_ENABLED))
+ return;
+
+ if (!wmm_param)
+ return;
+
if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
return;
count = wmm_param[6] & 0x0f;
switch (aci) {
case 1:
- queue = IEEE80211_TX_QUEUE_DATA3;
- if (acm) {
+ queue = 3;
+ if (acm)
local->wmm_acm |= BIT(0) | BIT(3);
- }
break;
case 2:
- queue = IEEE80211_TX_QUEUE_DATA1;
- if (acm) {
+ queue = 1;
+ if (acm)
local->wmm_acm |= BIT(4) | BIT(5);
- }
break;
case 3:
- queue = IEEE80211_TX_QUEUE_DATA0;
- if (acm) {
+ queue = 0;
+ if (acm)
local->wmm_acm |= BIT(6) | BIT(7);
- }
break;
case 0:
default:
- queue = IEEE80211_TX_QUEUE_DATA2;
- if (acm) {
+ queue = 2;
+ if (acm)
local->wmm_acm |= BIT(1) | BIT(2);
- }
break;
}
if (bss) {
if (bss->capability & WLAN_CAPABILITY_PRIVACY)
capab |= WLAN_CAPABILITY_PRIVACY;
- if (bss->wmm_ie) {
+ if (bss->wmm_ie)
wmm = 1;
- }
+
+ /* get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode) */
+ rates_len = ieee80211_compatible_rates(bss, sband, &rates);
+
ieee80211_rx_bss_put(dev, bss);
+ } else {
+ rates = ~0;
+ rates_len = sband->n_bitrates;
}
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
*pos++ = ifsta->ssid_len;
memcpy(pos, ifsta->ssid, ifsta->ssid_len);
- /* all supported rates should be added here but some APs
- * (e.g. D-Link DAP 1353 in b-only mode) don't like that
- * Therefore only add rates the AP supports */
- rates_len = ieee80211_compatible_rates(bss, sband, &rates);
+ /* add all rates which were marked to be used above */
supp_rates_len = rates_len;
if (supp_rates_len > 8)
supp_rates_len = 8;
*pos++ = 1; /* WME ver */
*pos++ = 0;
}
+
/* wmm support is a must to HT */
- if (wmm && sband->ht_info.ht_supported) {
+ if (wmm && (ifsta->flags & IEEE80211_STA_WMM_ENABLED) &&
+ sband->ht_info.ht_supported) {
__le16 tmp = cpu_to_le16(sband->ht_info.cap);
pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
*pos++ = WLAN_EID_HT_CAPABILITY;
struct ieee80211_mgmt *mgmt;
u16 capab;
- skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 +
- sizeof(mgmt->u.action.u.addba_resp));
+ skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
+
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer "
"for addba resp frame\n", dev->name);
struct ieee80211_mgmt *mgmt;
u16 capab;
- skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 +
- sizeof(mgmt->u.action.u.addba_req));
-
+ skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer "
/* examine state machine */
- spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
+ spin_lock_bh(&sta->lock);
if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) {
#ifdef CONFIG_MAC80211_HT_DEBUG
tid_agg_rx->stored_mpdu_num = 0;
status = WLAN_STATUS_SUCCESS;
end:
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
+ spin_unlock_bh(&sta->lock);
end_no_lock:
ieee80211_send_addba_resp(sta->sdata->dev, sta->addr, tid,
state = &sta->ampdu_mlme.tid_state_tx[tid];
- spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_lock_bh(&sta->lock);
if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_unlock_bh(&sta->lock);
printk(KERN_DEBUG "state not HT_ADDBA_REQUESTED_MSK:"
"%d\n", *state);
goto addba_resp_exit;
if (mgmt->u.action.u.addba_resp.dialog_token !=
sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]);
}
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_unlock_bh(&sta->lock);
printk(KERN_DEBUG "recipient accepted agg: tid %d \n", tid);
} else {
printk(KERN_DEBUG "recipient rejected agg: tid %d \n", tid);
sta->ampdu_mlme.addba_req_num[tid]++;
/* this will allow the state check in stop_BA_session */
*state = HT_AGG_STATE_OPERATIONAL;
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_unlock_bh(&sta->lock);
ieee80211_stop_tx_ba_session(hw, sta->addr, tid,
WLAN_BACK_INITIATOR);
}
struct ieee80211_mgmt *mgmt;
u16 params;
- skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom + 1 +
- sizeof(mgmt->u.action.u.delba));
+ skb = dev_alloc_skb(sizeof(*mgmt) + local->hw.extra_tx_headroom);
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer "
}
/* check if TID is in operational state */
- spin_lock_bh(&sta->ampdu_mlme.ampdu_rx);
+ spin_lock_bh(&sta->lock);
if (sta->ampdu_mlme.tid_state_rx[tid]
!= HT_AGG_STATE_OPERATIONAL) {
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
+ spin_unlock_bh(&sta->lock);
rcu_read_unlock();
return;
}
sta->ampdu_mlme.tid_state_rx[tid] =
HT_AGG_STATE_REQ_STOP_BA_MSK |
(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_rx);
+ spin_unlock_bh(&sta->lock);
/* stop HW Rx aggregation. ampdu_action existence
* already verified in session init so we add the BUG_ON */
ieee80211_sta_stop_rx_ba_session(dev, sta->addr, tid,
WLAN_BACK_INITIATOR, 0);
else { /* WLAN_BACK_RECIPIENT */
- spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_lock_bh(&sta->lock);
sta->ampdu_mlme.tid_state_tx[tid] =
HT_AGG_STATE_OPERATIONAL;
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_unlock_bh(&sta->lock);
ieee80211_stop_tx_ba_session(&local->hw, sta->addr, tid,
WLAN_BACK_RECIPIENT);
}
state = &sta->ampdu_mlme.tid_state_tx[tid];
/* check if the TID waits for addBA response */
- spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_lock_bh(&sta->lock);
if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_unlock_bh(&sta->lock);
*state = HT_AGG_STATE_IDLE;
printk(KERN_DEBUG "timer expired on tid %d but we are not "
"expecting addBA response there", tid);
/* go through the state check in stop_BA_session */
*state = HT_AGG_STATE_OPERATIONAL;
- spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
+ spin_unlock_bh(&sta->lock);
ieee80211_stop_tx_ba_session(hw, temp_sta->addr, tid,
WLAN_BACK_INITIATOR);
* resetting it after each frame that arrives from the originator.
* if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
*/
-void sta_rx_agg_session_timer_expired(unsigned long data)
+static void sta_rx_agg_session_timer_expired(unsigned long data)
{
/* not an elegant detour, but there is no choice as the timer passes
* only one argument, and verious sta_info are needed here, so init
" (reason=%d)\n",
dev->name, print_mac(mac, mgmt->sa), reason_code);
- if (ifsta->flags & IEEE80211_STA_AUTHENTICATED) {
+ if (ifsta->flags & IEEE80211_STA_AUTHENTICATED)
printk(KERN_DEBUG "%s: deauthenticated\n", dev->name);
- }
if (ifsta->state == IEEE80211_AUTHENTICATE ||
ifsta->state == IEEE80211_ASSOCIATE ||
local->hw.conf.channel->center_freq,
ifsta->ssid, ifsta->ssid_len);
if (bss) {
- sta->last_rssi = bss->rssi;
sta->last_signal = bss->signal;
+ sta->last_qual = bss->qual;
sta->last_noise = bss->noise;
ieee80211_rx_bss_put(dev, bss);
}
* to between the sta_info_alloc() and sta_info_insert() above.
*/
- sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP |
- WLAN_STA_AUTHORIZED;
+ set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP |
+ WLAN_STA_AUTHORIZED);
rates = 0;
basic_rates = 0;
else
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
- if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param) {
+ if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
+ (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
struct ieee80211_ht_bss_info bss_info;
ieee80211_ht_cap_ie_to_ht_info(
(struct ieee80211_ht_cap *)
rate_control_rate_init(sta, local);
- if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
- sta->flags |= WLAN_STA_WME;
+ if (elems.wmm_param) {
+ set_sta_flags(sta, WLAN_STA_WME);
rcu_read_unlock();
ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
elems.wmm_param_len);
bss->timestamp = beacon_timestamp;
bss->last_update = jiffies;
- bss->rssi = rx_status->ssi;
bss->signal = rx_status->signal;
bss->noise = rx_status->noise;
+ bss->qual = rx_status->qual;
if (!beacon && !bss->probe_resp)
bss->probe_resp = true;
ieee802_11_parse_elems(mgmt->u.beacon.variable, len - baselen, &elems);
- if (elems.wmm_param && (ifsta->flags & IEEE80211_STA_WMM_ENABLED)) {
- ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
- elems.wmm_param_len);
- }
+ ieee80211_sta_wmm_params(dev, ifsta, elems.wmm_param,
+ elems.wmm_param_len);
/* Do not send changes to driver if we are scanning. This removes
* requirement that driver's bss_info_changed function needs to be
struct ieee80211_sta_bss *bss, *selected = NULL;
int top_rssi = 0, freq;
- if (!(ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
- IEEE80211_STA_AUTO_BSSID_SEL | IEEE80211_STA_AUTO_CHANNEL_SEL))) {
- ifsta->state = IEEE80211_AUTHENTICATE;
- ieee80211_sta_reset_auth(dev, ifsta);
- return 0;
- }
-
spin_lock_bh(&local->sta_bss_lock);
freq = local->oper_channel->center_freq;
list_for_each_entry(bss, &local->sta_bss_list, list) {
if (!(bss->capability & WLAN_CAPABILITY_ESS))
continue;
- if (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
- !!sdata->default_key)
+ if ((ifsta->flags & (IEEE80211_STA_AUTO_SSID_SEL |
+ IEEE80211_STA_AUTO_BSSID_SEL |
+ IEEE80211_STA_AUTO_CHANNEL_SEL)) &&
+ (!!(bss->capability & WLAN_CAPABILITY_PRIVACY) ^
+ !!sdata->default_key))
continue;
if (!(ifsta->flags & IEEE80211_STA_AUTO_CHANNEL_SEL) &&
!ieee80211_sta_match_ssid(ifsta, bss->ssid, bss->ssid_len))
continue;
- if (!selected || top_rssi < bss->rssi) {
+ if (!selected || top_rssi < bss->signal) {
selected = bss;
- top_rssi = bss->rssi;
+ top_rssi = bss->signal;
}
}
if (selected)
bss->beacon_int = local->hw.conf.beacon_int;
bss->last_update = jiffies;
bss->capability = WLAN_CAPABILITY_IBSS;
- if (sdata->default_key) {
+
+ if (sdata->default_key)
bss->capability |= WLAN_CAPABILITY_PRIVACY;
- } else
+ else
sdata->drop_unencrypted = 0;
+
bss->supp_rates_len = sband->n_bitrates;
pos = bss->supp_rates;
for (i = 0; i < sband->n_bitrates; i++) {
memset(&iwe, 0, sizeof(iwe));
iwe.cmd = IWEVQUAL;
- iwe.u.qual.qual = bss->signal;
- iwe.u.qual.level = bss->rssi;
+ iwe.u.qual.qual = bss->qual;
+ iwe.u.qual.level = bss->signal;
iwe.u.qual.noise = bss->noise;
iwe.u.qual.updated = local->wstats_flags;
current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_if_sta *ifsta = &sdata->u.sta;
+
kfree(ifsta->extra_ie);
if (len == 0) {
ifsta->extra_ie = NULL;
}
-struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev,
- struct sk_buff *skb, u8 *bssid,
- u8 *addr)
+struct sta_info *ieee80211_ibss_add_sta(struct net_device *dev,
+ struct sk_buff *skb, u8 *bssid,
+ u8 *addr)
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct sta_info *sta;
if (!sta)
return NULL;
- sta->flags |= WLAN_STA_AUTHORIZED;
+ set_sta_flags(sta, WLAN_STA_AUTHORIZED);
sta->supp_rates[local->hw.conf.channel->band] =
sdata->u.sta.supp_rates_bits[local->hw.conf.channel->band];
range->num_encoding_sizes = 2;
range->max_encoding_tokens = NUM_DEFAULT_KEYS;
- range->max_qual.qual = local->hw.max_signal;
- range->max_qual.level = local->hw.max_rssi;
- range->max_qual.noise = local->hw.max_noise;
+ if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC ||
+ local->hw.flags & IEEE80211_HW_SIGNAL_DB)
+ range->max_qual.level = local->hw.max_signal;
+ else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ range->max_qual.level = -110;
+ else
+ range->max_qual.level = 0;
+
+ if (local->hw.flags & IEEE80211_HW_NOISE_DBM)
+ range->max_qual.noise = -110;
+ else
+ range->max_qual.noise = 0;
+
+ range->max_qual.qual = 100;
range->max_qual.updated = local->wstats_flags;
- range->avg_qual.qual = local->hw.max_signal/2;
- range->avg_qual.level = 0;
- range->avg_qual.noise = 0;
+ range->avg_qual.qual = 50;
+ /* not always true but better than nothing */
+ range->avg_qual.level = range->max_qual.level / 2;
+ range->avg_qual.noise = range->max_qual.noise / 2;
range->avg_qual.updated = local->wstats_flags;
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
range->num_frequency = c;
IW_EVENT_CAPA_SET_KERNEL(range->event_capa);
- IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWTHRSPY);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
wstats->qual.noise = 0;
wstats->qual.updated = IW_QUAL_ALL_INVALID;
} else {
- wstats->qual.level = sta->last_rssi;
- wstats->qual.qual = sta->last_signal;
+ wstats->qual.level = sta->last_signal;
+ wstats->qual.qual = sta->last_qual;
wstats->qual.noise = sta->last_noise;
wstats->qual.updated = local->wstats_flags;
}