1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
65 #include "s2io-regs.h"
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
82 * Cards with following subsystem_id have a link state indication
83 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84 * macro below identifies these cards given the subsystem_id.
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 mac_info_t *mac_control;
100 mac_control = &sp->mac_control;
101 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) <
104 MAX_RXDS_PER_BLOCK) {
112 /* Ethtool related variables and Macros. */
113 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
114 "Register test\t(offline)",
115 "Eeprom test\t(offline)",
116 "Link test\t(online)",
117 "RLDRAM test\t(offline)",
118 "BIST Test\t(offline)"
121 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123 {"tmac_data_octets"},
127 {"tmac_pause_ctrl_frms"},
128 {"tmac_any_err_frms"},
129 {"tmac_vld_ip_octets"},
137 {"rmac_data_octets"},
138 {"rmac_fcs_err_frms"},
140 {"rmac_vld_mcst_frms"},
141 {"rmac_vld_bcst_frms"},
142 {"rmac_in_rng_len_err_frms"},
144 {"rmac_pause_ctrl_frms"},
145 {"rmac_discarded_frms"},
146 {"rmac_usized_frms"},
147 {"rmac_osized_frms"},
149 {"rmac_jabber_frms"},
157 {"rmac_err_drp_udp"},
159 {"rmac_accepted_ip"},
163 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
164 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
166 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
167 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
170 * Constants to be programmed into the Xena's registers, to configure
174 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
177 static u64 default_mdio_cfg[] = {
179 0xC001010000000000ULL, 0xC0010100000000E0ULL,
180 0xC0010100008000E4ULL,
181 /* Remove Reset from PMA PLL */
182 0xC001010000000000ULL, 0xC0010100000000E0ULL,
183 0xC0010100000000E4ULL,
187 static u64 default_dtx_cfg[] = {
188 0x8000051500000000ULL, 0x80000515000000E0ULL,
189 0x80000515D93500E4ULL, 0x8001051500000000ULL,
190 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
191 0x8002051500000000ULL, 0x80020515000000E0ULL,
192 0x80020515F21000E4ULL,
193 /* Set PADLOOPBACKN */
194 0x8002051500000000ULL, 0x80020515000000E0ULL,
195 0x80020515B20000E4ULL, 0x8003051500000000ULL,
196 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
197 0x8004051500000000ULL, 0x80040515000000E0ULL,
198 0x80040515B20000E4ULL, 0x8005051500000000ULL,
199 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
201 /* Remove PADLOOPBACKN */
202 0x8002051500000000ULL, 0x80020515000000E0ULL,
203 0x80020515F20000E4ULL, 0x8003051500000000ULL,
204 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
205 0x8004051500000000ULL, 0x80040515000000E0ULL,
206 0x80040515F20000E4ULL, 0x8005051500000000ULL,
207 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
212 * Constants for Fixing the MacAddress problem seen mostly on
215 static u64 fix_mac[] = {
216 0x0060000000000000ULL, 0x0060600000000000ULL,
217 0x0040600000000000ULL, 0x0000600000000000ULL,
218 0x0020600000000000ULL, 0x0060600000000000ULL,
219 0x0020600000000000ULL, 0x0060600000000000ULL,
220 0x0020600000000000ULL, 0x0060600000000000ULL,
221 0x0020600000000000ULL, 0x0060600000000000ULL,
222 0x0020600000000000ULL, 0x0060600000000000ULL,
223 0x0020600000000000ULL, 0x0060600000000000ULL,
224 0x0020600000000000ULL, 0x0060600000000000ULL,
225 0x0020600000000000ULL, 0x0060600000000000ULL,
226 0x0020600000000000ULL, 0x0060600000000000ULL,
227 0x0020600000000000ULL, 0x0060600000000000ULL,
228 0x0020600000000000ULL, 0x0000600000000000ULL,
229 0x0040600000000000ULL, 0x0060600000000000ULL,
233 /* Module Loadable parameters. */
234 static unsigned int tx_fifo_num = 1;
235 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
236 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
237 static unsigned int rx_ring_num = 1;
238 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
239 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
240 static unsigned int Stats_refresh_time = 4;
241 static unsigned int rts_frm_len[MAX_RX_RINGS] =
242 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
243 static unsigned int use_continuous_tx_intrs = 1;
244 static unsigned int rmac_pause_time = 65535;
245 static unsigned int mc_pause_threshold_q0q3 = 187;
246 static unsigned int mc_pause_threshold_q4q7 = 187;
247 static unsigned int shared_splits;
248 static unsigned int tmac_util_period = 5;
249 static unsigned int rmac_util_period = 5;
250 #ifndef CONFIG_S2IO_NAPI
251 static unsigned int indicate_max_pkts;
256 * This table lists all the devices that this driver supports.
258 static struct pci_device_id s2io_tbl[] __devinitdata = {
259 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
260 PCI_ANY_ID, PCI_ANY_ID},
261 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
262 PCI_ANY_ID, PCI_ANY_ID},
263 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
264 PCI_ANY_ID, PCI_ANY_ID},
265 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
266 PCI_ANY_ID, PCI_ANY_ID},
270 MODULE_DEVICE_TABLE(pci, s2io_tbl);
272 static struct pci_driver s2io_driver = {
274 .id_table = s2io_tbl,
275 .probe = s2io_init_nic,
276 .remove = __devexit_p(s2io_rem_nic),
279 /* A simplifier macro used both by init and free shared_mem Fns(). */
280 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
283 * init_shared_mem - Allocation and Initialization of Memory
284 * @nic: Device private variable.
285 * Description: The function allocates all the memory areas shared
286 * between the NIC and the driver. This includes Tx descriptors,
287 * Rx descriptors and the statistics block.
290 static int init_shared_mem(struct s2io_nic *nic)
293 void *tmp_v_addr, *tmp_v_addr_next;
294 dma_addr_t tmp_p_addr, tmp_p_addr_next;
295 RxD_block_t *pre_rxd_blk = NULL;
296 int i, j, blk_cnt, rx_sz, tx_sz;
297 int lst_size, lst_per_page;
298 struct net_device *dev = nic->dev;
299 #ifdef CONFIG_2BUFF_MODE
304 mac_info_t *mac_control;
305 struct config_param *config;
307 mac_control = &nic->mac_control;
308 config = &nic->config;
311 /* Allocation and initialization of TXDLs in FIOFs */
313 for (i = 0; i < config->tx_fifo_num; i++) {
314 size += config->tx_cfg[i].fifo_len;
316 if (size > MAX_AVAILABLE_TXDS) {
317 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
319 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
320 DBG_PRINT(ERR_DBG, "that can be used\n");
324 lst_size = (sizeof(TxD_t) * config->max_txds);
325 tx_sz = lst_size * size;
326 lst_per_page = PAGE_SIZE / lst_size;
328 for (i = 0; i < config->tx_fifo_num; i++) {
329 int fifo_len = config->tx_cfg[i].fifo_len;
330 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
331 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
333 if (!mac_control->fifos[i].list_info) {
335 "Malloc failed for list_info\n");
338 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
340 for (i = 0; i < config->tx_fifo_num; i++) {
341 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
343 mac_control->fifos[i].tx_curr_put_info.offset = 0;
344 mac_control->fifos[i].tx_curr_put_info.fifo_len =
345 config->tx_cfg[i].fifo_len - 1;
346 mac_control->fifos[i].tx_curr_get_info.offset = 0;
347 mac_control->fifos[i].tx_curr_get_info.fifo_len =
348 config->tx_cfg[i].fifo_len - 1;
349 mac_control->fifos[i].fifo_no = i;
350 mac_control->fifos[i].nic = nic;
351 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
353 for (j = 0; j < page_num; j++) {
357 tmp_v = pci_alloc_consistent(nic->pdev,
361 "pci_alloc_consistent ");
362 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
365 while (k < lst_per_page) {
366 int l = (j * lst_per_page) + k;
367 if (l == config->tx_cfg[i].fifo_len)
369 mac_control->fifos[i].list_info[l].list_virt_addr =
370 tmp_v + (k * lst_size);
371 mac_control->fifos[i].list_info[l].list_phy_addr =
372 tmp_p + (k * lst_size);
378 /* Allocation and initialization of RXDs in Rings */
380 for (i = 0; i < config->rx_ring_num; i++) {
381 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
382 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
383 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
385 DBG_PRINT(ERR_DBG, "RxDs per Block");
388 size += config->rx_cfg[i].num_rxd;
389 mac_control->rings[i].block_count =
390 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
391 mac_control->rings[i].pkt_cnt =
392 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
394 size = (size * (sizeof(RxD_t)));
397 for (i = 0; i < config->rx_ring_num; i++) {
398 mac_control->rings[i].rx_curr_get_info.block_index = 0;
399 mac_control->rings[i].rx_curr_get_info.offset = 0;
400 mac_control->rings[i].rx_curr_get_info.ring_len =
401 config->rx_cfg[i].num_rxd - 1;
402 mac_control->rings[i].rx_curr_put_info.block_index = 0;
403 mac_control->rings[i].rx_curr_put_info.offset = 0;
404 mac_control->rings[i].rx_curr_put_info.ring_len =
405 config->rx_cfg[i].num_rxd - 1;
406 mac_control->rings[i].nic = nic;
407 mac_control->rings[i].ring_no = i;
410 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
411 /* Allocating all the Rx blocks */
412 for (j = 0; j < blk_cnt; j++) {
413 #ifndef CONFIG_2BUFF_MODE
414 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
416 size = SIZE_OF_BLOCK;
418 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
420 if (tmp_v_addr == NULL) {
422 * In case of failure, free_shared_mem()
423 * is called, which should free any
424 * memory that was alloced till the
427 mac_control->rings[i].rx_blocks[j].block_virt_addr =
431 memset(tmp_v_addr, 0, size);
432 mac_control->rings[i].rx_blocks[j].block_virt_addr =
434 mac_control->rings[i].rx_blocks[j].block_dma_addr =
437 /* Interlinking all Rx Blocks */
438 for (j = 0; j < blk_cnt; j++) {
440 mac_control->rings[i].rx_blocks[j].block_virt_addr;
442 mac_control->rings[i].rx_blocks[(j + 1) %
443 blk_cnt].block_virt_addr;
445 mac_control->rings[i].rx_blocks[j].block_dma_addr;
447 mac_control->rings[i].rx_blocks[(j + 1) %
448 blk_cnt].block_dma_addr;
450 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
451 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
454 #ifndef CONFIG_2BUFF_MODE
455 pre_rxd_blk->reserved_2_pNext_RxD_block =
456 (unsigned long) tmp_v_addr_next;
458 pre_rxd_blk->pNext_RxD_Blk_physical =
459 (u64) tmp_p_addr_next;
463 #ifdef CONFIG_2BUFF_MODE
465 * Allocation of Storages for buffer addresses in 2BUFF mode
466 * and the buffers as well.
468 for (i = 0; i < config->rx_ring_num; i++) {
470 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
471 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
473 if (!mac_control->rings[i].ba)
475 for (j = 0; j < blk_cnt; j++) {
477 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
478 (MAX_RXDS_PER_BLOCK + 1)),
480 if (!mac_control->rings[i].ba[j])
482 while (k != MAX_RXDS_PER_BLOCK) {
483 ba = &mac_control->rings[i].ba[j][k];
485 ba->ba_0_org = (void *) kmalloc
486 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
489 tmp = (u64) ba->ba_0_org;
491 tmp &= ~((u64) ALIGN_SIZE);
492 ba->ba_0 = (void *) tmp;
494 ba->ba_1_org = (void *) kmalloc
495 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
498 tmp = (u64) ba->ba_1_org;
500 tmp &= ~((u64) ALIGN_SIZE);
501 ba->ba_1 = (void *) tmp;
508 /* Allocation and initialization of Statistics block */
509 size = sizeof(StatInfo_t);
510 mac_control->stats_mem = pci_alloc_consistent
511 (nic->pdev, size, &mac_control->stats_mem_phy);
513 if (!mac_control->stats_mem) {
515 * In case of failure, free_shared_mem() is called, which
516 * should free any memory that was alloced till the
521 mac_control->stats_mem_sz = size;
523 tmp_v_addr = mac_control->stats_mem;
524 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
525 memset(tmp_v_addr, 0, size);
526 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
527 (unsigned long long) tmp_p_addr);
533 * free_shared_mem - Free the allocated Memory
534 * @nic: Device private variable.
535 * Description: This function is to free all memory locations allocated by
536 * the init_shared_mem() function and return it to the kernel.
539 static void free_shared_mem(struct s2io_nic *nic)
541 int i, j, blk_cnt, size;
543 dma_addr_t tmp_p_addr;
544 mac_info_t *mac_control;
545 struct config_param *config;
546 int lst_size, lst_per_page;
552 mac_control = &nic->mac_control;
553 config = &nic->config;
555 lst_size = (sizeof(TxD_t) * config->max_txds);
556 lst_per_page = PAGE_SIZE / lst_size;
558 for (i = 0; i < config->tx_fifo_num; i++) {
559 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
561 for (j = 0; j < page_num; j++) {
562 int mem_blks = (j * lst_per_page);
563 if (!mac_control->fifos[i].list_info[mem_blks].
566 pci_free_consistent(nic->pdev, PAGE_SIZE,
567 mac_control->fifos[i].
570 mac_control->fifos[i].
574 kfree(mac_control->fifos[i].list_info);
577 #ifndef CONFIG_2BUFF_MODE
578 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
580 size = SIZE_OF_BLOCK;
582 for (i = 0; i < config->rx_ring_num; i++) {
583 blk_cnt = mac_control->rings[i].block_count;
584 for (j = 0; j < blk_cnt; j++) {
585 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
587 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
589 if (tmp_v_addr == NULL)
591 pci_free_consistent(nic->pdev, size,
592 tmp_v_addr, tmp_p_addr);
596 #ifdef CONFIG_2BUFF_MODE
597 /* Freeing buffer storage addresses in 2BUFF mode. */
598 for (i = 0; i < config->rx_ring_num; i++) {
600 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
601 for (j = 0; j < blk_cnt; j++) {
603 if (!mac_control->rings[i].ba[j])
605 while (k != MAX_RXDS_PER_BLOCK) {
606 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
611 kfree(mac_control->rings[i].ba[j]);
613 if (mac_control->rings[i].ba)
614 kfree(mac_control->rings[i].ba);
618 if (mac_control->stats_mem) {
619 pci_free_consistent(nic->pdev,
620 mac_control->stats_mem_sz,
621 mac_control->stats_mem,
622 mac_control->stats_mem_phy);
627 * init_nic - Initialization of hardware
628 * @nic: device peivate variable
629 * Description: The function sequentially configures every block
630 * of the H/W from their reset values.
631 * Return Value: SUCCESS on success and
632 * '-1' on failure (endian settings incorrect).
635 static int init_nic(struct s2io_nic *nic)
637 XENA_dev_config_t __iomem *bar0 = nic->bar0;
638 struct net_device *dev = nic->dev;
639 register u64 val64 = 0;
643 mac_info_t *mac_control;
644 struct config_param *config;
645 int mdio_cnt = 0, dtx_cnt = 0;
646 unsigned long long mem_share;
649 mac_control = &nic->mac_control;
650 config = &nic->config;
652 /* to set the swapper controle on the card */
653 if(s2io_set_swapper(nic)) {
654 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
658 /* Remove XGXS from reset state */
660 writeq(val64, &bar0->sw_reset);
662 val64 = readq(&bar0->sw_reset);
664 /* Enable Receiving broadcasts */
665 add = &bar0->mac_cfg;
666 val64 = readq(&bar0->mac_cfg);
667 val64 |= MAC_RMAC_BCAST_ENABLE;
668 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
669 writel((u32) val64, add);
670 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
671 writel((u32) (val64 >> 32), (add + 4));
673 /* Read registers in all blocks */
674 val64 = readq(&bar0->mac_int_mask);
675 val64 = readq(&bar0->mc_int_mask);
676 val64 = readq(&bar0->xgxs_int_mask);
680 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
683 * Configuring the XAUI Interface of Xena.
684 * ***************************************
685 * To Configure the Xena's XAUI, one has to write a series
686 * of 64 bit values into two registers in a particular
687 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
688 * which will be defined in the array of configuration values
689 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
690 * to switch writing from one regsiter to another. We continue
691 * writing these values until we encounter the 'END_SIGN' macro.
692 * For example, After making a series of 21 writes into
693 * dtx_control register the 'SWITCH_SIGN' appears and hence we
694 * start writing into mdio_control until we encounter END_SIGN.
698 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
699 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
703 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
704 &bar0->dtx_control, UF);
705 val64 = readq(&bar0->dtx_control);
709 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
710 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
714 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
715 &bar0->mdio_control, UF);
716 val64 = readq(&bar0->mdio_control);
719 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
720 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
727 /* Tx DMA Initialization */
729 writeq(val64, &bar0->tx_fifo_partition_0);
730 writeq(val64, &bar0->tx_fifo_partition_1);
731 writeq(val64, &bar0->tx_fifo_partition_2);
732 writeq(val64, &bar0->tx_fifo_partition_3);
735 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
737 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
738 13) | vBIT(config->tx_cfg[i].fifo_priority,
741 if (i == (config->tx_fifo_num - 1)) {
748 writeq(val64, &bar0->tx_fifo_partition_0);
752 writeq(val64, &bar0->tx_fifo_partition_1);
756 writeq(val64, &bar0->tx_fifo_partition_2);
760 writeq(val64, &bar0->tx_fifo_partition_3);
765 /* Enable Tx FIFO partition 0. */
766 val64 = readq(&bar0->tx_fifo_partition_0);
767 val64 |= BIT(0); /* To enable the FIFO partition. */
768 writeq(val64, &bar0->tx_fifo_partition_0);
771 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
772 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
774 if (get_xena_rev_id(nic->pdev) < 4)
775 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
777 val64 = readq(&bar0->tx_fifo_partition_0);
778 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
779 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
782 * Initialization of Tx_PA_CONFIG register to ignore packet
783 * integrity checking.
785 val64 = readq(&bar0->tx_pa_cfg);
786 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
787 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
788 writeq(val64, &bar0->tx_pa_cfg);
790 /* Rx DMA intialization. */
792 for (i = 0; i < config->rx_ring_num; i++) {
794 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
797 writeq(val64, &bar0->rx_queue_priority);
800 * Allocating equal share of memory to all the
805 for (i = 0; i < config->rx_ring_num; i++) {
808 mem_share = (mem_size / config->rx_ring_num +
809 mem_size % config->rx_ring_num);
810 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
813 mem_share = (mem_size / config->rx_ring_num);
814 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
817 mem_share = (mem_size / config->rx_ring_num);
818 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
821 mem_share = (mem_size / config->rx_ring_num);
822 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
825 mem_share = (mem_size / config->rx_ring_num);
826 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
829 mem_share = (mem_size / config->rx_ring_num);
830 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
833 mem_share = (mem_size / config->rx_ring_num);
834 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
837 mem_share = (mem_size / config->rx_ring_num);
838 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
842 writeq(val64, &bar0->rx_queue_cfg);
845 * Filling Tx round robin registers
846 * as per the number of FIFOs
848 switch (config->tx_fifo_num) {
850 val64 = 0x0000000000000000ULL;
851 writeq(val64, &bar0->tx_w_round_robin_0);
852 writeq(val64, &bar0->tx_w_round_robin_1);
853 writeq(val64, &bar0->tx_w_round_robin_2);
854 writeq(val64, &bar0->tx_w_round_robin_3);
855 writeq(val64, &bar0->tx_w_round_robin_4);
858 val64 = 0x0000010000010000ULL;
859 writeq(val64, &bar0->tx_w_round_robin_0);
860 val64 = 0x0100000100000100ULL;
861 writeq(val64, &bar0->tx_w_round_robin_1);
862 val64 = 0x0001000001000001ULL;
863 writeq(val64, &bar0->tx_w_round_robin_2);
864 val64 = 0x0000010000010000ULL;
865 writeq(val64, &bar0->tx_w_round_robin_3);
866 val64 = 0x0100000000000000ULL;
867 writeq(val64, &bar0->tx_w_round_robin_4);
870 val64 = 0x0001000102000001ULL;
871 writeq(val64, &bar0->tx_w_round_robin_0);
872 val64 = 0x0001020000010001ULL;
873 writeq(val64, &bar0->tx_w_round_robin_1);
874 val64 = 0x0200000100010200ULL;
875 writeq(val64, &bar0->tx_w_round_robin_2);
876 val64 = 0x0001000102000001ULL;
877 writeq(val64, &bar0->tx_w_round_robin_3);
878 val64 = 0x0001020000000000ULL;
879 writeq(val64, &bar0->tx_w_round_robin_4);
882 val64 = 0x0001020300010200ULL;
883 writeq(val64, &bar0->tx_w_round_robin_0);
884 val64 = 0x0100000102030001ULL;
885 writeq(val64, &bar0->tx_w_round_robin_1);
886 val64 = 0x0200010000010203ULL;
887 writeq(val64, &bar0->tx_w_round_robin_2);
888 val64 = 0x0001020001000001ULL;
889 writeq(val64, &bar0->tx_w_round_robin_3);
890 val64 = 0x0203000100000000ULL;
891 writeq(val64, &bar0->tx_w_round_robin_4);
894 val64 = 0x0001000203000102ULL;
895 writeq(val64, &bar0->tx_w_round_robin_0);
896 val64 = 0x0001020001030004ULL;
897 writeq(val64, &bar0->tx_w_round_robin_1);
898 val64 = 0x0001000203000102ULL;
899 writeq(val64, &bar0->tx_w_round_robin_2);
900 val64 = 0x0001020001030004ULL;
901 writeq(val64, &bar0->tx_w_round_robin_3);
902 val64 = 0x0001000000000000ULL;
903 writeq(val64, &bar0->tx_w_round_robin_4);
906 val64 = 0x0001020304000102ULL;
907 writeq(val64, &bar0->tx_w_round_robin_0);
908 val64 = 0x0304050001020001ULL;
909 writeq(val64, &bar0->tx_w_round_robin_1);
910 val64 = 0x0203000100000102ULL;
911 writeq(val64, &bar0->tx_w_round_robin_2);
912 val64 = 0x0304000102030405ULL;
913 writeq(val64, &bar0->tx_w_round_robin_3);
914 val64 = 0x0001000200000000ULL;
915 writeq(val64, &bar0->tx_w_round_robin_4);
918 val64 = 0x0001020001020300ULL;
919 writeq(val64, &bar0->tx_w_round_robin_0);
920 val64 = 0x0102030400010203ULL;
921 writeq(val64, &bar0->tx_w_round_robin_1);
922 val64 = 0x0405060001020001ULL;
923 writeq(val64, &bar0->tx_w_round_robin_2);
924 val64 = 0x0304050000010200ULL;
925 writeq(val64, &bar0->tx_w_round_robin_3);
926 val64 = 0x0102030000000000ULL;
927 writeq(val64, &bar0->tx_w_round_robin_4);
930 val64 = 0x0001020300040105ULL;
931 writeq(val64, &bar0->tx_w_round_robin_0);
932 val64 = 0x0200030106000204ULL;
933 writeq(val64, &bar0->tx_w_round_robin_1);
934 val64 = 0x0103000502010007ULL;
935 writeq(val64, &bar0->tx_w_round_robin_2);
936 val64 = 0x0304010002060500ULL;
937 writeq(val64, &bar0->tx_w_round_robin_3);
938 val64 = 0x0103020400000000ULL;
939 writeq(val64, &bar0->tx_w_round_robin_4);
943 /* Filling the Rx round robin registers as per the
944 * number of Rings and steering based on QoS.
946 switch (config->rx_ring_num) {
948 val64 = 0x8080808080808080ULL;
949 writeq(val64, &bar0->rts_qos_steering);
952 val64 = 0x0000010000010000ULL;
953 writeq(val64, &bar0->rx_w_round_robin_0);
954 val64 = 0x0100000100000100ULL;
955 writeq(val64, &bar0->rx_w_round_robin_1);
956 val64 = 0x0001000001000001ULL;
957 writeq(val64, &bar0->rx_w_round_robin_2);
958 val64 = 0x0000010000010000ULL;
959 writeq(val64, &bar0->rx_w_round_robin_3);
960 val64 = 0x0100000000000000ULL;
961 writeq(val64, &bar0->rx_w_round_robin_4);
963 val64 = 0x8080808040404040ULL;
964 writeq(val64, &bar0->rts_qos_steering);
967 val64 = 0x0001000102000001ULL;
968 writeq(val64, &bar0->rx_w_round_robin_0);
969 val64 = 0x0001020000010001ULL;
970 writeq(val64, &bar0->rx_w_round_robin_1);
971 val64 = 0x0200000100010200ULL;
972 writeq(val64, &bar0->rx_w_round_robin_2);
973 val64 = 0x0001000102000001ULL;
974 writeq(val64, &bar0->rx_w_round_robin_3);
975 val64 = 0x0001020000000000ULL;
976 writeq(val64, &bar0->rx_w_round_robin_4);
978 val64 = 0x8080804040402020ULL;
979 writeq(val64, &bar0->rts_qos_steering);
982 val64 = 0x0001020300010200ULL;
983 writeq(val64, &bar0->rx_w_round_robin_0);
984 val64 = 0x0100000102030001ULL;
985 writeq(val64, &bar0->rx_w_round_robin_1);
986 val64 = 0x0200010000010203ULL;
987 writeq(val64, &bar0->rx_w_round_robin_2);
988 val64 = 0x0001020001000001ULL;
989 writeq(val64, &bar0->rx_w_round_robin_3);
990 val64 = 0x0203000100000000ULL;
991 writeq(val64, &bar0->rx_w_round_robin_4);
993 val64 = 0x8080404020201010ULL;
994 writeq(val64, &bar0->rts_qos_steering);
997 val64 = 0x0001000203000102ULL;
998 writeq(val64, &bar0->rx_w_round_robin_0);
999 val64 = 0x0001020001030004ULL;
1000 writeq(val64, &bar0->rx_w_round_robin_1);
1001 val64 = 0x0001000203000102ULL;
1002 writeq(val64, &bar0->rx_w_round_robin_2);
1003 val64 = 0x0001020001030004ULL;
1004 writeq(val64, &bar0->rx_w_round_robin_3);
1005 val64 = 0x0001000000000000ULL;
1006 writeq(val64, &bar0->rx_w_round_robin_4);
1008 val64 = 0x8080404020201008ULL;
1009 writeq(val64, &bar0->rts_qos_steering);
1012 val64 = 0x0001020304000102ULL;
1013 writeq(val64, &bar0->rx_w_round_robin_0);
1014 val64 = 0x0304050001020001ULL;
1015 writeq(val64, &bar0->rx_w_round_robin_1);
1016 val64 = 0x0203000100000102ULL;
1017 writeq(val64, &bar0->rx_w_round_robin_2);
1018 val64 = 0x0304000102030405ULL;
1019 writeq(val64, &bar0->rx_w_round_robin_3);
1020 val64 = 0x0001000200000000ULL;
1021 writeq(val64, &bar0->rx_w_round_robin_4);
1023 val64 = 0x8080404020100804ULL;
1024 writeq(val64, &bar0->rts_qos_steering);
1027 val64 = 0x0001020001020300ULL;
1028 writeq(val64, &bar0->rx_w_round_robin_0);
1029 val64 = 0x0102030400010203ULL;
1030 writeq(val64, &bar0->rx_w_round_robin_1);
1031 val64 = 0x0405060001020001ULL;
1032 writeq(val64, &bar0->rx_w_round_robin_2);
1033 val64 = 0x0304050000010200ULL;
1034 writeq(val64, &bar0->rx_w_round_robin_3);
1035 val64 = 0x0102030000000000ULL;
1036 writeq(val64, &bar0->rx_w_round_robin_4);
1038 val64 = 0x8080402010080402ULL;
1039 writeq(val64, &bar0->rts_qos_steering);
1042 val64 = 0x0001020300040105ULL;
1043 writeq(val64, &bar0->rx_w_round_robin_0);
1044 val64 = 0x0200030106000204ULL;
1045 writeq(val64, &bar0->rx_w_round_robin_1);
1046 val64 = 0x0103000502010007ULL;
1047 writeq(val64, &bar0->rx_w_round_robin_2);
1048 val64 = 0x0304010002060500ULL;
1049 writeq(val64, &bar0->rx_w_round_robin_3);
1050 val64 = 0x0103020400000000ULL;
1051 writeq(val64, &bar0->rx_w_round_robin_4);
1053 val64 = 0x8040201008040201ULL;
1054 writeq(val64, &bar0->rts_qos_steering);
1060 for (i = 0; i < 8; i++)
1061 writeq(val64, &bar0->rts_frm_len_n[i]);
1063 /* Set the default rts frame length for the rings configured */
1064 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1065 for (i = 0 ; i < config->rx_ring_num ; i++)
1066 writeq(val64, &bar0->rts_frm_len_n[i]);
1068 /* Set the frame length for the configured rings
1069 * desired by the user
1071 for (i = 0; i < config->rx_ring_num; i++) {
1072 /* If rts_frm_len[i] == 0 then it is assumed that user not
1073 * specified frame length steering.
1074 * If the user provides the frame length then program
1075 * the rts_frm_len register for those values or else
1076 * leave it as it is.
1078 if (rts_frm_len[i] != 0) {
1079 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1080 &bar0->rts_frm_len_n[i]);
1084 /* Program statistics memory */
1085 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1086 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
1087 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
1088 writeq(val64, &bar0->stat_cfg);
1091 * Initializing the sampling rate for the device to calculate the
1092 * bandwidth utilization.
1094 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1095 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1096 writeq(val64, &bar0->mac_link_util);
1100 * Initializing the Transmit and Receive Traffic Interrupt
1104 * TTI Initialization. Default Tx timer gets us about
1105 * 250 interrupts per sec. Continuous interrupts are enabled
1108 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1109 TTI_DATA1_MEM_TX_URNG_A(0xA) |
1110 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1111 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1112 if (use_continuous_tx_intrs)
1113 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1114 writeq(val64, &bar0->tti_data1_mem);
1116 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1117 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1118 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1119 writeq(val64, &bar0->tti_data2_mem);
1121 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1122 writeq(val64, &bar0->tti_command_mem);
1125 * Once the operation completes, the Strobe bit of the command
1126 * register will be reset. We poll for this particular condition
1127 * We wait for a maximum of 500ms for the operation to complete,
1128 * if it's not complete by then we return error.
1132 val64 = readq(&bar0->tti_command_mem);
1133 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1137 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1145 /* RTI Initialization */
1146 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1147 RTI_DATA1_MEM_RX_URNG_A(0xA) |
1148 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1149 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1151 writeq(val64, &bar0->rti_data1_mem);
1153 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1154 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1155 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1156 writeq(val64, &bar0->rti_data2_mem);
1158 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1159 writeq(val64, &bar0->rti_command_mem);
1162 * Once the operation completes, the Strobe bit of the
1163 * command register will be reset. We poll for this
1164 * particular condition. We wait for a maximum of 500ms
1165 * for the operation to complete, if it's not complete
1166 * by then we return error.
1170 val64 = readq(&bar0->rti_command_mem);
1171 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1175 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1184 * Initializing proper values as Pause threshold into all
1185 * the 8 Queues on Rx side.
1187 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1188 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1190 /* Disable RMAC PAD STRIPPING */
1191 add = (void *) &bar0->mac_cfg;
1192 val64 = readq(&bar0->mac_cfg);
1193 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1194 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1195 writel((u32) (val64), add);
1196 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1197 writel((u32) (val64 >> 32), (add + 4));
1198 val64 = readq(&bar0->mac_cfg);
1201 * Set the time value to be inserted in the pause frame
1202 * generated by xena.
1204 val64 = readq(&bar0->rmac_pause_cfg);
1205 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1206 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1207 writeq(val64, &bar0->rmac_pause_cfg);
1210 * Set the Threshold Limit for Generating the pause frame
1211 * If the amount of data in any Queue exceeds ratio of
1212 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1213 * pause frame is generated
1216 for (i = 0; i < 4; i++) {
1218 (((u64) 0xFF00 | nic->mac_control.
1219 mc_pause_threshold_q0q3)
1222 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1225 for (i = 0; i < 4; i++) {
1227 (((u64) 0xFF00 | nic->mac_control.
1228 mc_pause_threshold_q4q7)
1231 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1234 * TxDMA will stop Read request if the number of read split has
1235 * exceeded the limit pointed by shared_splits
1237 val64 = readq(&bar0->pic_control);
1238 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1239 writeq(val64, &bar0->pic_control);
1245 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1246 * @nic: device private variable,
1247 * @mask: A mask indicating which Intr block must be modified and,
1248 * @flag: A flag indicating whether to enable or disable the Intrs.
1249 * Description: This function will either disable or enable the interrupts
1250 * depending on the flag argument. The mask argument can be used to
1251 * enable/disable any Intr block.
1252 * Return Value: NONE.
1255 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1257 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1258 register u64 val64 = 0, temp64 = 0;
1260 /* Top level interrupt classification */
1261 /* PIC Interrupts */
1262 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1263 /* Enable PIC Intrs in the general intr mask register */
1264 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1265 if (flag == ENABLE_INTRS) {
1266 temp64 = readq(&bar0->general_int_mask);
1267 temp64 &= ~((u64) val64);
1268 writeq(temp64, &bar0->general_int_mask);
1270 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1271 * interrupts for now.
1274 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1276 * No MSI Support is available presently, so TTI and
1277 * RTI interrupts are also disabled.
1279 } else if (flag == DISABLE_INTRS) {
1281 * Disable PIC Intrs in the general
1282 * intr mask register
1284 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1285 temp64 = readq(&bar0->general_int_mask);
1287 writeq(val64, &bar0->general_int_mask);
1291 /* DMA Interrupts */
1292 /* Enabling/Disabling Tx DMA interrupts */
1293 if (mask & TX_DMA_INTR) {
1294 /* Enable TxDMA Intrs in the general intr mask register */
1295 val64 = TXDMA_INT_M;
1296 if (flag == ENABLE_INTRS) {
1297 temp64 = readq(&bar0->general_int_mask);
1298 temp64 &= ~((u64) val64);
1299 writeq(temp64, &bar0->general_int_mask);
1301 * Keep all interrupts other than PFC interrupt
1302 * and PCC interrupt disabled in DMA level.
1304 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1306 writeq(val64, &bar0->txdma_int_mask);
1308 * Enable only the MISC error 1 interrupt in PFC block
1310 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1311 writeq(val64, &bar0->pfc_err_mask);
1313 * Enable only the FB_ECC error interrupt in PCC block
1315 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1316 writeq(val64, &bar0->pcc_err_mask);
1317 } else if (flag == DISABLE_INTRS) {
1319 * Disable TxDMA Intrs in the general intr mask
1322 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1323 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1324 temp64 = readq(&bar0->general_int_mask);
1326 writeq(val64, &bar0->general_int_mask);
1330 /* Enabling/Disabling Rx DMA interrupts */
1331 if (mask & RX_DMA_INTR) {
1332 /* Enable RxDMA Intrs in the general intr mask register */
1333 val64 = RXDMA_INT_M;
1334 if (flag == ENABLE_INTRS) {
1335 temp64 = readq(&bar0->general_int_mask);
1336 temp64 &= ~((u64) val64);
1337 writeq(temp64, &bar0->general_int_mask);
1339 * All RxDMA block interrupts are disabled for now
1342 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1343 } else if (flag == DISABLE_INTRS) {
1345 * Disable RxDMA Intrs in the general intr mask
1348 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1349 temp64 = readq(&bar0->general_int_mask);
1351 writeq(val64, &bar0->general_int_mask);
1355 /* MAC Interrupts */
1356 /* Enabling/Disabling MAC interrupts */
1357 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1358 val64 = TXMAC_INT_M | RXMAC_INT_M;
1359 if (flag == ENABLE_INTRS) {
1360 temp64 = readq(&bar0->general_int_mask);
1361 temp64 &= ~((u64) val64);
1362 writeq(temp64, &bar0->general_int_mask);
1364 * All MAC block error interrupts are disabled for now
1365 * except the link status change interrupt.
1368 val64 = MAC_INT_STATUS_RMAC_INT;
1369 temp64 = readq(&bar0->mac_int_mask);
1370 temp64 &= ~((u64) val64);
1371 writeq(temp64, &bar0->mac_int_mask);
1373 val64 = readq(&bar0->mac_rmac_err_mask);
1374 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1375 writeq(val64, &bar0->mac_rmac_err_mask);
1376 } else if (flag == DISABLE_INTRS) {
1378 * Disable MAC Intrs in the general intr mask register
1380 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1381 writeq(DISABLE_ALL_INTRS,
1382 &bar0->mac_rmac_err_mask);
1384 temp64 = readq(&bar0->general_int_mask);
1386 writeq(val64, &bar0->general_int_mask);
1390 /* XGXS Interrupts */
1391 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1392 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1393 if (flag == ENABLE_INTRS) {
1394 temp64 = readq(&bar0->general_int_mask);
1395 temp64 &= ~((u64) val64);
1396 writeq(temp64, &bar0->general_int_mask);
1398 * All XGXS block error interrupts are disabled for now
1401 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1402 } else if (flag == DISABLE_INTRS) {
1404 * Disable MC Intrs in the general intr mask register
1406 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1407 temp64 = readq(&bar0->general_int_mask);
1409 writeq(val64, &bar0->general_int_mask);
1413 /* Memory Controller(MC) interrupts */
1414 if (mask & MC_INTR) {
1416 if (flag == ENABLE_INTRS) {
1417 temp64 = readq(&bar0->general_int_mask);
1418 temp64 &= ~((u64) val64);
1419 writeq(temp64, &bar0->general_int_mask);
1421 * Enable all MC Intrs.
1423 writeq(0x0, &bar0->mc_int_mask);
1424 writeq(0x0, &bar0->mc_err_mask);
1425 } else if (flag == DISABLE_INTRS) {
1427 * Disable MC Intrs in the general intr mask register
1429 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1430 temp64 = readq(&bar0->general_int_mask);
1432 writeq(val64, &bar0->general_int_mask);
1437 /* Tx traffic interrupts */
1438 if (mask & TX_TRAFFIC_INTR) {
1439 val64 = TXTRAFFIC_INT_M;
1440 if (flag == ENABLE_INTRS) {
1441 temp64 = readq(&bar0->general_int_mask);
1442 temp64 &= ~((u64) val64);
1443 writeq(temp64, &bar0->general_int_mask);
1445 * Enable all the Tx side interrupts
1446 * writing 0 Enables all 64 TX interrupt levels
1448 writeq(0x0, &bar0->tx_traffic_mask);
1449 } else if (flag == DISABLE_INTRS) {
1451 * Disable Tx Traffic Intrs in the general intr mask
1454 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1455 temp64 = readq(&bar0->general_int_mask);
1457 writeq(val64, &bar0->general_int_mask);
1461 /* Rx traffic interrupts */
1462 if (mask & RX_TRAFFIC_INTR) {
1463 val64 = RXTRAFFIC_INT_M;
1464 if (flag == ENABLE_INTRS) {
1465 temp64 = readq(&bar0->general_int_mask);
1466 temp64 &= ~((u64) val64);
1467 writeq(temp64, &bar0->general_int_mask);
1468 /* writing 0 Enables all 8 RX interrupt levels */
1469 writeq(0x0, &bar0->rx_traffic_mask);
1470 } else if (flag == DISABLE_INTRS) {
1472 * Disable Rx Traffic Intrs in the general intr mask
1475 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1476 temp64 = readq(&bar0->general_int_mask);
1478 writeq(val64, &bar0->general_int_mask);
1483 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1487 if (flag == FALSE) {
1489 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1490 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1491 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1495 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1496 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1497 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1503 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1504 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1505 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1506 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1507 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1511 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1512 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1513 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1514 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1515 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1524 * verify_xena_quiescence - Checks whether the H/W is ready
1525 * @val64 : Value read from adapter status register.
1526 * @flag : indicates if the adapter enable bit was ever written once
1528 * Description: Returns whether the H/W is ready to go or not. Depending
1529 * on whether adapter enable bit was written or not the comparison
1530 * differs and the calling function passes the input argument flag to
1532 * Return: 1 If xena is quiescence
1533 * 0 If Xena is not quiescence
1536 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1539 u64 tmp64 = ~((u64) val64);
1540 int rev_id = get_xena_rev_id(sp->pdev);
1544 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1545 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1546 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1547 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1548 ADAPTER_STATUS_P_PLL_LOCK))) {
1549 ret = check_prc_pcc_state(val64, flag, rev_id);
1556 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1557 * @sp: Pointer to device specifc structure
1559 * New procedure to clear mac address reading problems on Alpha platforms
1563 void fix_mac_address(nic_t * sp)
1565 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1569 while (fix_mac[i] != END_SIGN) {
1570 writeq(fix_mac[i++], &bar0->gpio_control);
1572 val64 = readq(&bar0->gpio_control);
1577 * start_nic - Turns the device on
1578 * @nic : device private variable.
1580 * This function actually turns the device on. Before this function is
1581 * called,all Registers are configured from their reset states
1582 * and shared memory is allocated but the NIC is still quiescent. On
1583 * calling this function, the device interrupts are cleared and the NIC is
1584 * literally switched on by writing into the adapter control register.
1586 * SUCCESS on success and -1 on failure.
1589 static int start_nic(struct s2io_nic *nic)
1591 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1592 struct net_device *dev = nic->dev;
1593 register u64 val64 = 0;
1596 mac_info_t *mac_control;
1597 struct config_param *config;
1599 mac_control = &nic->mac_control;
1600 config = &nic->config;
1602 /* PRC Initialization and configuration */
1603 for (i = 0; i < config->rx_ring_num; i++) {
1604 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1605 &bar0->prc_rxd0_n[i]);
1607 val64 = readq(&bar0->prc_ctrl_n[i]);
1608 #ifndef CONFIG_2BUFF_MODE
1609 val64 |= PRC_CTRL_RC_ENABLED;
1611 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1613 writeq(val64, &bar0->prc_ctrl_n[i]);
1616 #ifdef CONFIG_2BUFF_MODE
1617 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1618 val64 = readq(&bar0->rx_pa_cfg);
1619 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1620 writeq(val64, &bar0->rx_pa_cfg);
1624 * Enabling MC-RLDRAM. After enabling the device, we timeout
1625 * for around 100ms, which is approximately the time required
1626 * for the device to be ready for operation.
1628 val64 = readq(&bar0->mc_rldram_mrs);
1629 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1630 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1631 val64 = readq(&bar0->mc_rldram_mrs);
1633 msleep(100); /* Delay by around 100 ms. */
1635 /* Enabling ECC Protection. */
1636 val64 = readq(&bar0->adapter_control);
1637 val64 &= ~ADAPTER_ECC_EN;
1638 writeq(val64, &bar0->adapter_control);
1641 * Clearing any possible Link state change interrupts that
1642 * could have popped up just before Enabling the card.
1644 val64 = readq(&bar0->mac_rmac_err_reg);
1646 writeq(val64, &bar0->mac_rmac_err_reg);
1649 * Verify if the device is ready to be enabled, if so enable
1652 val64 = readq(&bar0->adapter_status);
1653 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1654 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1655 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1656 (unsigned long long) val64);
1660 /* Enable select interrupts */
1661 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1662 RX_MAC_INTR | MC_INTR;
1663 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1666 * With some switches, link might be already up at this point.
1667 * Because of this weird behavior, when we enable laser,
1668 * we may not get link. We need to handle this. We cannot
1669 * figure out which switch is misbehaving. So we are forced to
1670 * make a global change.
1673 /* Enabling Laser. */
1674 val64 = readq(&bar0->adapter_control);
1675 val64 |= ADAPTER_EOI_TX_ON;
1676 writeq(val64, &bar0->adapter_control);
1678 /* SXE-002: Initialize link and activity LED */
1679 subid = nic->pdev->subsystem_device;
1680 if ((subid & 0xFF) >= 0x07) {
1681 val64 = readq(&bar0->gpio_control);
1682 val64 |= 0x0000800000000000ULL;
1683 writeq(val64, &bar0->gpio_control);
1684 val64 = 0x0411040400000000ULL;
1685 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1689 * Don't see link state interrupts on certain switches, so
1690 * directly scheduling a link state task from here.
1692 schedule_work(&nic->set_link_task);
1698 * free_tx_buffers - Free all queued Tx buffers
1699 * @nic : device private variable.
1701 * Free all queued Tx buffers.
1702 * Return Value: void
1705 static void free_tx_buffers(struct s2io_nic *nic)
1707 struct net_device *dev = nic->dev;
1708 struct sk_buff *skb;
1711 mac_info_t *mac_control;
1712 struct config_param *config;
1715 mac_control = &nic->mac_control;
1716 config = &nic->config;
1718 for (i = 0; i < config->tx_fifo_num; i++) {
1719 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1720 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1723 (struct sk_buff *) ((unsigned long) txdp->
1726 memset(txdp, 0, sizeof(TxD_t));
1730 memset(txdp, 0, sizeof(TxD_t));
1734 "%s:forcibly freeing %d skbs on FIFO%d\n",
1736 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1737 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1742 * stop_nic - To stop the nic
1743 * @nic ; device private variable.
1745 * This function does exactly the opposite of what the start_nic()
1746 * function does. This function is called to stop the device.
1751 static void stop_nic(struct s2io_nic *nic)
1753 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1754 register u64 val64 = 0;
1755 u16 interruptible, i;
1756 mac_info_t *mac_control;
1757 struct config_param *config;
1759 mac_control = &nic->mac_control;
1760 config = &nic->config;
1762 /* Disable all interrupts */
1763 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1764 RX_MAC_INTR | MC_INTR;
1765 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1768 for (i = 0; i < config->rx_ring_num; i++) {
1769 val64 = readq(&bar0->prc_ctrl_n[i]);
1770 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1771 writeq(val64, &bar0->prc_ctrl_n[i]);
1776 * fill_rx_buffers - Allocates the Rx side skbs
1777 * @nic: device private variable
1778 * @ring_no: ring number
1780 * The function allocates Rx side skbs and puts the physical
1781 * address of these buffers into the RxD buffer pointers, so that the NIC
1782 * can DMA the received frame into these locations.
1783 * The NIC supports 3 receive modes, viz
1785 * 2. three buffer and
1786 * 3. Five buffer modes.
1787 * Each mode defines how many fragments the received frame will be split
1788 * up into by the NIC. The frame is split into L3 header, L4 Header,
1789 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1790 * is split into 3 fragments. As of now only single buffer mode is
1793 * SUCCESS on success or an appropriate -ve value on failure.
1796 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1798 struct net_device *dev = nic->dev;
1799 struct sk_buff *skb;
1801 int off, off1, size, block_no, block_no1;
1802 int offset, offset1;
1805 mac_info_t *mac_control;
1806 struct config_param *config;
1807 #ifdef CONFIG_2BUFF_MODE
1812 dma_addr_t rxdpphys;
1814 #ifndef CONFIG_S2IO_NAPI
1815 unsigned long flags;
1818 mac_control = &nic->mac_control;
1819 config = &nic->config;
1820 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1821 atomic_read(&nic->rx_bufs_left[ring_no]);
1822 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1823 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1825 while (alloc_tab < alloc_cnt) {
1826 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1828 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1830 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1831 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1832 #ifndef CONFIG_2BUFF_MODE
1833 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1834 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1836 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1837 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1840 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1841 block_virt_addr + off;
1842 if ((offset == offset1) && (rxdp->Host_Control)) {
1843 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1844 DBG_PRINT(INTR_DBG, " info equated\n");
1847 #ifndef CONFIG_2BUFF_MODE
1848 if (rxdp->Control_1 == END_OF_BLOCK) {
1849 mac_control->rings[ring_no].rx_curr_put_info.
1851 mac_control->rings[ring_no].rx_curr_put_info.
1852 block_index %= mac_control->rings[ring_no].block_count;
1853 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1856 off %= (MAX_RXDS_PER_BLOCK + 1);
1857 mac_control->rings[ring_no].rx_curr_put_info.offset =
1859 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1860 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1863 #ifndef CONFIG_S2IO_NAPI
1864 spin_lock_irqsave(&nic->put_lock, flags);
1865 mac_control->rings[ring_no].put_pos =
1866 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1867 spin_unlock_irqrestore(&nic->put_lock, flags);
1870 if (rxdp->Host_Control == END_OF_BLOCK) {
1871 mac_control->rings[ring_no].rx_curr_put_info.
1873 mac_control->rings[ring_no].rx_curr_put_info.block_index
1874 %= mac_control->rings[ring_no].block_count;
1875 block_no = mac_control->rings[ring_no].rx_curr_put_info
1878 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1879 dev->name, block_no,
1880 (unsigned long long) rxdp->Control_1);
1881 mac_control->rings[ring_no].rx_curr_put_info.offset =
1883 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1886 #ifndef CONFIG_S2IO_NAPI
1887 spin_lock_irqsave(&nic->put_lock, flags);
1888 mac_control->rings[ring_no].put_pos = (block_no *
1889 (MAX_RXDS_PER_BLOCK + 1)) + off;
1890 spin_unlock_irqrestore(&nic->put_lock, flags);
1894 #ifndef CONFIG_2BUFF_MODE
1895 if (rxdp->Control_1 & RXD_OWN_XENA)
1897 if (rxdp->Control_2 & BIT(0))
1900 mac_control->rings[ring_no].rx_curr_put_info.
1904 #ifdef CONFIG_2BUFF_MODE
1906 * RxDs Spanning cache lines will be replenished only
1907 * if the succeeding RxD is also owned by Host. It
1908 * will always be the ((8*i)+3) and ((8*i)+6)
1909 * descriptors for the 48 byte descriptor. The offending
1910 * decsriptor is of-course the 3rd descriptor.
1912 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1913 block_dma_addr + (off * sizeof(RxD_t));
1914 if (((u64) (rxdpphys)) % 128 > 80) {
1915 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1916 block_virt_addr + (off + 1);
1917 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1918 nextblk = (block_no + 1) %
1919 (mac_control->rings[ring_no].block_count);
1920 rxdpnext = mac_control->rings[ring_no].rx_blocks
1921 [nextblk].block_virt_addr;
1923 if (rxdpnext->Control_2 & BIT(0))
1928 #ifndef CONFIG_2BUFF_MODE
1929 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1931 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1934 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1935 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1938 #ifndef CONFIG_2BUFF_MODE
1939 skb_reserve(skb, NET_IP_ALIGN);
1940 memset(rxdp, 0, sizeof(RxD_t));
1941 rxdp->Buffer0_ptr = pci_map_single
1942 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1943 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1944 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1945 rxdp->Host_Control = (unsigned long) (skb);
1946 rxdp->Control_1 |= RXD_OWN_XENA;
1948 off %= (MAX_RXDS_PER_BLOCK + 1);
1949 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1951 ba = &mac_control->rings[ring_no].ba[block_no][off];
1952 skb_reserve(skb, BUF0_LEN);
1953 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1955 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1957 memset(rxdp, 0, sizeof(RxD_t));
1958 rxdp->Buffer2_ptr = pci_map_single
1959 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1960 PCI_DMA_FROMDEVICE);
1962 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1963 PCI_DMA_FROMDEVICE);
1965 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1966 PCI_DMA_FROMDEVICE);
1968 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1969 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1970 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1971 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1972 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1973 rxdp->Control_1 |= RXD_OWN_XENA;
1975 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1977 rxdp->Control_2 |= SET_RXD_MARKER;
1979 atomic_inc(&nic->rx_bufs_left[ring_no]);
1988 * free_rx_buffers - Frees all Rx buffers
1989 * @sp: device private variable.
1991 * This function will free all Rx buffers allocated by host.
1996 static void free_rx_buffers(struct s2io_nic *sp)
1998 struct net_device *dev = sp->dev;
1999 int i, j, blk = 0, off, buf_cnt = 0;
2001 struct sk_buff *skb;
2002 mac_info_t *mac_control;
2003 struct config_param *config;
2004 #ifdef CONFIG_2BUFF_MODE
2008 mac_control = &sp->mac_control;
2009 config = &sp->config;
2011 for (i = 0; i < config->rx_ring_num; i++) {
2012 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2013 off = j % (MAX_RXDS_PER_BLOCK + 1);
2014 rxdp = mac_control->rings[i].rx_blocks[blk].
2015 block_virt_addr + off;
2017 #ifndef CONFIG_2BUFF_MODE
2018 if (rxdp->Control_1 == END_OF_BLOCK) {
2020 (RxD_t *) ((unsigned long) rxdp->
2026 if (rxdp->Host_Control == END_OF_BLOCK) {
2032 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2033 memset(rxdp, 0, sizeof(RxD_t));
2038 (struct sk_buff *) ((unsigned long) rxdp->
2041 #ifndef CONFIG_2BUFF_MODE
2042 pci_unmap_single(sp->pdev, (dma_addr_t)
2045 HEADER_ETHERNET_II_802_3_SIZE
2046 + HEADER_802_2_SIZE +
2048 PCI_DMA_FROMDEVICE);
2050 ba = &mac_control->rings[i].ba[blk][off];
2051 pci_unmap_single(sp->pdev, (dma_addr_t)
2054 PCI_DMA_FROMDEVICE);
2055 pci_unmap_single(sp->pdev, (dma_addr_t)
2058 PCI_DMA_FROMDEVICE);
2059 pci_unmap_single(sp->pdev, (dma_addr_t)
2061 dev->mtu + BUF0_LEN + 4,
2062 PCI_DMA_FROMDEVICE);
2065 atomic_dec(&sp->rx_bufs_left[i]);
2068 memset(rxdp, 0, sizeof(RxD_t));
2070 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2071 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2072 mac_control->rings[i].rx_curr_put_info.offset = 0;
2073 mac_control->rings[i].rx_curr_get_info.offset = 0;
2074 atomic_set(&sp->rx_bufs_left[i], 0);
2075 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2076 dev->name, buf_cnt, i);
2081 * s2io_poll - Rx interrupt handler for NAPI support
2082 * @dev : pointer to the device structure.
2083 * @budget : The number of packets that were budgeted to be processed
2084 * during one pass through the 'Poll" function.
2086 * Comes into picture only if NAPI support has been incorporated. It does
2087 * the same thing that rx_intr_handler does, but not in a interrupt context
2088 * also It will process only a given number of packets.
2090 * 0 on success and 1 if there are No Rx packets to be processed.
2093 #if defined(CONFIG_S2IO_NAPI)
2094 static int s2io_poll(struct net_device *dev, int *budget)
2096 nic_t *nic = dev->priv;
2097 int pkt_cnt = 0, org_pkts_to_process;
2098 mac_info_t *mac_control;
2099 struct config_param *config;
2100 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2104 mac_control = &nic->mac_control;
2105 config = &nic->config;
2107 nic->pkts_to_process = *budget;
2108 if (nic->pkts_to_process > dev->quota)
2109 nic->pkts_to_process = dev->quota;
2110 org_pkts_to_process = nic->pkts_to_process;
2112 val64 = readq(&bar0->rx_traffic_int);
2113 writeq(val64, &bar0->rx_traffic_int);
2115 for (i = 0; i < config->rx_ring_num; i++) {
2116 rx_intr_handler(&mac_control->rings[i]);
2117 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2118 if (!nic->pkts_to_process) {
2119 /* Quota for the current iteration has been met */
2126 dev->quota -= pkt_cnt;
2128 netif_rx_complete(dev);
2130 for (i = 0; i < config->rx_ring_num; i++) {
2131 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2132 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2133 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2137 /* Re enable the Rx interrupts. */
2138 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2142 dev->quota -= pkt_cnt;
2145 for (i = 0; i < config->rx_ring_num; i++) {
2146 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2147 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2148 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2157 * rx_intr_handler - Rx interrupt handler
2158 * @nic: device private variable.
2160 * If the interrupt is because of a received frame or if the
2161 * receive ring contains fresh as yet un-processed frames,this function is
2162 * called. It picks out the RxD at which place the last Rx processing had
2163 * stopped and sends the skb to the OSM's Rx handler and then increments
2168 static void rx_intr_handler(ring_info_t *ring_data)
2170 nic_t *nic = ring_data->nic;
2171 struct net_device *dev = (struct net_device *) nic->dev;
2172 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2173 int get_block, get_offset, put_block, put_offset, ring_bufs;
2174 rx_curr_get_info_t get_info, put_info;
2176 struct sk_buff *skb;
2177 #ifndef CONFIG_S2IO_NAPI
2183 * rx_traffic_int reg is an R1 register, hence we read and write
2184 * back the same value in the register to clear it
2186 val64 = readq(&bar0->tx_traffic_int);
2187 writeq(val64, &bar0->tx_traffic_int);
2189 get_info = ring_data->rx_curr_get_info;
2190 get_block = get_info.block_index;
2191 put_info = ring_data->rx_curr_put_info;
2192 put_block = put_info.block_index;
2193 ring_bufs = get_info.ring_len+1;
2194 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2196 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2198 #ifndef CONFIG_S2IO_NAPI
2199 spin_lock(&nic->put_lock);
2200 put_offset = ring_data->put_pos;
2201 spin_unlock(&nic->put_lock);
2203 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2206 while (RXD_IS_UP2DT(rxdp) &&
2207 (((get_offset + 1) % ring_bufs) != put_offset)) {
2208 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2210 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2212 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2215 #ifndef CONFIG_2BUFF_MODE
2216 pci_unmap_single(nic->pdev, (dma_addr_t)
2219 HEADER_ETHERNET_II_802_3_SIZE +
2222 PCI_DMA_FROMDEVICE);
2224 pci_unmap_single(nic->pdev, (dma_addr_t)
2226 BUF0_LEN, PCI_DMA_FROMDEVICE);
2227 pci_unmap_single(nic->pdev, (dma_addr_t)
2229 BUF1_LEN, PCI_DMA_FROMDEVICE);
2230 pci_unmap_single(nic->pdev, (dma_addr_t)
2232 dev->mtu + BUF0_LEN + 4,
2233 PCI_DMA_FROMDEVICE);
2235 rx_osm_handler(ring_data, rxdp);
2237 ring_data->rx_curr_get_info.offset =
2239 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2241 if (get_info.offset &&
2242 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2243 get_info.offset = 0;
2244 ring_data->rx_curr_get_info.offset
2247 get_block %= ring_data->block_count;
2248 ring_data->rx_curr_get_info.block_index
2250 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2253 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2255 #ifdef CONFIG_S2IO_NAPI
2256 nic->pkts_to_process -= 1;
2257 if (!nic->pkts_to_process)
2261 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2268 * tx_intr_handler - Transmit interrupt handler
2269 * @nic : device private variable
2271 * If an interrupt was raised to indicate DMA complete of the
2272 * Tx packet, this function is called. It identifies the last TxD
2273 * whose buffer was freed and frees all skbs whose data have already
2274 * DMA'ed into the NICs internal memory.
2279 static void tx_intr_handler(fifo_info_t *fifo_data)
2281 nic_t *nic = fifo_data->nic;
2282 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2283 struct net_device *dev = (struct net_device *) nic->dev;
2284 tx_curr_get_info_t get_info, put_info;
2285 struct sk_buff *skb;
2288 register u64 val64 = 0;
2291 * tx_traffic_int reg is an R1 register, hence we read and write
2292 * back the same value in the register to clear it
2294 val64 = readq(&bar0->tx_traffic_int);
2295 writeq(val64, &bar0->tx_traffic_int);
2297 get_info = fifo_data->tx_curr_get_info;
2298 put_info = fifo_data->tx_curr_put_info;
2299 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2301 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2302 (get_info.offset != put_info.offset) &&
2303 (txdlp->Host_Control)) {
2304 /* Check for TxD errors */
2305 if (txdlp->Control_1 & TXD_T_CODE) {
2306 unsigned long long err;
2307 err = txdlp->Control_1 & TXD_T_CODE;
2308 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2312 skb = (struct sk_buff *) ((unsigned long)
2313 txdlp->Host_Control);
2315 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2317 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2321 frg_cnt = skb_shinfo(skb)->nr_frags;
2322 nic->tx_pkt_count++;
2324 pci_unmap_single(nic->pdev, (dma_addr_t)
2325 txdlp->Buffer_Pointer,
2326 skb->len - skb->data_len,
2332 for (j = 0; j < frg_cnt; j++, txdlp++) {
2334 &skb_shinfo(skb)->frags[j];
2335 pci_unmap_page(nic->pdev,
2345 (sizeof(TxD_t) * fifo_data->max_txds));
2347 /* Updating the statistics block */
2348 nic->stats.tx_packets++;
2349 nic->stats.tx_bytes += skb->len;
2350 dev_kfree_skb_irq(skb);
2353 get_info.offset %= get_info.fifo_len + 1;
2354 txdlp = (TxD_t *) fifo_data->list_info
2355 [get_info.offset].list_virt_addr;
2356 fifo_data->tx_curr_get_info.offset =
2360 spin_lock(&nic->tx_lock);
2361 if (netif_queue_stopped(dev))
2362 netif_wake_queue(dev);
2363 spin_unlock(&nic->tx_lock);
2367 * alarm_intr_handler - Alarm Interrrupt handler
2368 * @nic: device private variable
2369 * Description: If the interrupt was neither because of Rx packet or Tx
2370 * complete, this function is called. If the interrupt was to indicate
2371 * a loss of link, the OSM link status handler is invoked for any other
2372 * alarm interrupt the block that raised the interrupt is displayed
2373 * and a H/W reset is issued.
2378 static void alarm_intr_handler(struct s2io_nic *nic)
2380 struct net_device *dev = (struct net_device *) nic->dev;
2381 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2382 register u64 val64 = 0, err_reg = 0;
2384 /* Handling link status change error Intr */
2385 err_reg = readq(&bar0->mac_rmac_err_reg);
2386 writeq(err_reg, &bar0->mac_rmac_err_reg);
2387 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2388 schedule_work(&nic->set_link_task);
2391 /* Handling Ecc errors */
2392 val64 = readq(&bar0->mc_err_reg);
2393 writeq(val64, &bar0->mc_err_reg);
2394 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2395 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2396 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2398 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2399 netif_stop_queue(dev);
2400 schedule_work(&nic->rst_timer_task);
2402 /* Device can recover from Single ECC errors */
2406 /* In case of a serious error, the device will be Reset. */
2407 val64 = readq(&bar0->serr_source);
2408 if (val64 & SERR_SOURCE_ANY) {
2409 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2410 DBG_PRINT(ERR_DBG, "serious error!!\n");
2411 netif_stop_queue(dev);
2412 schedule_work(&nic->rst_timer_task);
2416 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2417 * Error occurs, the adapter will be recycled by disabling the
2418 * adapter enable bit and enabling it again after the device
2419 * becomes Quiescent.
2421 val64 = readq(&bar0->pcc_err_reg);
2422 writeq(val64, &bar0->pcc_err_reg);
2423 if (val64 & PCC_FB_ECC_DB_ERR) {
2424 u64 ac = readq(&bar0->adapter_control);
2425 ac &= ~(ADAPTER_CNTL_EN);
2426 writeq(ac, &bar0->adapter_control);
2427 ac = readq(&bar0->adapter_control);
2428 schedule_work(&nic->set_link_task);
2431 /* Other type of interrupts are not being handled now, TODO */
2435 * wait_for_cmd_complete - waits for a command to complete.
2436 * @sp : private member of the device structure, which is a pointer to the
2437 * s2io_nic structure.
2438 * Description: Function that waits for a command to Write into RMAC
2439 * ADDR DATA registers to be completed and returns either success or
2440 * error depending on whether the command was complete or not.
2442 * SUCCESS on success and FAILURE on failure.
2445 int wait_for_cmd_complete(nic_t * sp)
2447 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2448 int ret = FAILURE, cnt = 0;
2452 val64 = readq(&bar0->rmac_addr_cmd_mem);
2453 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2466 * s2io_reset - Resets the card.
2467 * @sp : private member of the device structure.
2468 * Description: Function to Reset the card. This function then also
2469 * restores the previously saved PCI configuration space registers as
2470 * the card reset also resets the configuration space.
2475 void s2io_reset(nic_t * sp)
2477 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2481 val64 = SW_RESET_ALL;
2482 writeq(val64, &bar0->sw_reset);
2485 * At this stage, if the PCI write is indeed completed, the
2486 * card is reset and so is the PCI Config space of the device.
2487 * So a read cannot be issued at this stage on any of the
2488 * registers to ensure the write into "sw_reset" register
2490 * Question: Is there any system call that will explicitly force
2491 * all the write commands still pending on the bus to be pushed
2493 * As of now I'am just giving a 250ms delay and hoping that the
2494 * PCI write to sw_reset register is done by this time.
2498 /* Restore the PCI state saved during initializarion. */
2499 pci_restore_state(sp->pdev);
2505 /* Set swapper to enable I/O register access */
2506 s2io_set_swapper(sp);
2508 /* Clear certain PCI/PCI-X fields after reset */
2509 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2510 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2511 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2513 val64 = readq(&bar0->txpic_int_reg);
2514 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2515 writeq(val64, &bar0->txpic_int_reg);
2517 /* Clearing PCIX Ecc status register */
2518 pci_write_config_dword(sp->pdev, 0x68, 0);
2520 /* Reset device statistics maintained by OS */
2521 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2523 /* SXE-002: Configure link and activity LED to turn it off */
2524 subid = sp->pdev->subsystem_device;
2525 if ((subid & 0xFF) >= 0x07) {
2526 val64 = readq(&bar0->gpio_control);
2527 val64 |= 0x0000800000000000ULL;
2528 writeq(val64, &bar0->gpio_control);
2529 val64 = 0x0411040400000000ULL;
2530 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2533 sp->device_enabled_once = FALSE;
2537 * s2io_set_swapper - to set the swapper controle on the card
2538 * @sp : private member of the device structure,
2539 * pointer to the s2io_nic structure.
2540 * Description: Function to set the swapper control on the card
2541 * correctly depending on the 'endianness' of the system.
2543 * SUCCESS on success and FAILURE on failure.
2546 int s2io_set_swapper(nic_t * sp)
2548 struct net_device *dev = sp->dev;
2549 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2550 u64 val64, valt, valr;
2553 * Set proper endian settings and verify the same by reading
2554 * the PIF Feed-back register.
2557 val64 = readq(&bar0->pif_rd_swapper_fb);
2558 if (val64 != 0x0123456789ABCDEFULL) {
2560 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2561 0x8100008181000081ULL, /* FE=1, SE=0 */
2562 0x4200004242000042ULL, /* FE=0, SE=1 */
2563 0}; /* FE=0, SE=0 */
2566 writeq(value[i], &bar0->swapper_ctrl);
2567 val64 = readq(&bar0->pif_rd_swapper_fb);
2568 if (val64 == 0x0123456789ABCDEFULL)
2573 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2575 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2576 (unsigned long long) val64);
2581 valr = readq(&bar0->swapper_ctrl);
2584 valt = 0x0123456789ABCDEFULL;
2585 writeq(valt, &bar0->xmsi_address);
2586 val64 = readq(&bar0->xmsi_address);
2590 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2591 0x0081810000818100ULL, /* FE=1, SE=0 */
2592 0x0042420000424200ULL, /* FE=0, SE=1 */
2593 0}; /* FE=0, SE=0 */
2596 writeq((value[i] | valr), &bar0->swapper_ctrl);
2597 writeq(valt, &bar0->xmsi_address);
2598 val64 = readq(&bar0->xmsi_address);
2604 unsigned long long x = val64;
2605 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2606 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2610 val64 = readq(&bar0->swapper_ctrl);
2611 val64 &= 0xFFFF000000000000ULL;
2615 * The device by default set to a big endian format, so a
2616 * big endian driver need not set anything.
2618 val64 |= (SWAPPER_CTRL_TXP_FE |
2619 SWAPPER_CTRL_TXP_SE |
2620 SWAPPER_CTRL_TXD_R_FE |
2621 SWAPPER_CTRL_TXD_W_FE |
2622 SWAPPER_CTRL_TXF_R_FE |
2623 SWAPPER_CTRL_RXD_R_FE |
2624 SWAPPER_CTRL_RXD_W_FE |
2625 SWAPPER_CTRL_RXF_W_FE |
2626 SWAPPER_CTRL_XMSI_FE |
2627 SWAPPER_CTRL_XMSI_SE |
2628 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2629 writeq(val64, &bar0->swapper_ctrl);
2632 * Initially we enable all bits to make it accessible by the
2633 * driver, then we selectively enable only those bits that
2636 val64 |= (SWAPPER_CTRL_TXP_FE |
2637 SWAPPER_CTRL_TXP_SE |
2638 SWAPPER_CTRL_TXD_R_FE |
2639 SWAPPER_CTRL_TXD_R_SE |
2640 SWAPPER_CTRL_TXD_W_FE |
2641 SWAPPER_CTRL_TXD_W_SE |
2642 SWAPPER_CTRL_TXF_R_FE |
2643 SWAPPER_CTRL_RXD_R_FE |
2644 SWAPPER_CTRL_RXD_R_SE |
2645 SWAPPER_CTRL_RXD_W_FE |
2646 SWAPPER_CTRL_RXD_W_SE |
2647 SWAPPER_CTRL_RXF_W_FE |
2648 SWAPPER_CTRL_XMSI_FE |
2649 SWAPPER_CTRL_XMSI_SE |
2650 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2651 writeq(val64, &bar0->swapper_ctrl);
2653 val64 = readq(&bar0->swapper_ctrl);
2656 * Verifying if endian settings are accurate by reading a
2657 * feedback register.
2659 val64 = readq(&bar0->pif_rd_swapper_fb);
2660 if (val64 != 0x0123456789ABCDEFULL) {
2661 /* Endian settings are incorrect, calls for another dekko. */
2662 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2664 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2665 (unsigned long long) val64);
2672 /* ********************************************************* *
2673 * Functions defined below concern the OS part of the driver *
2674 * ********************************************************* */
2677 * s2io_open - open entry point of the driver
2678 * @dev : pointer to the device structure.
2680 * This function is the open entry point of the driver. It mainly calls a
2681 * function to allocate Rx buffers and inserts them into the buffer
2682 * descriptors and then enables the Rx part of the NIC.
2684 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2688 int s2io_open(struct net_device *dev)
2690 nic_t *sp = dev->priv;
2694 * Make sure you have link off by default every time
2695 * Nic is initialized
2697 netif_carrier_off(dev);
2698 sp->last_link_state = LINK_DOWN;
2700 /* Initialize H/W and enable interrupts */
2701 if (s2io_card_up(sp)) {
2702 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2705 goto hw_init_failed;
2708 /* After proper initialization of H/W, register ISR */
2709 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2712 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2714 goto isr_registration_failed;
2717 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2718 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2720 goto setting_mac_address_failed;
2723 netif_start_queue(dev);
2726 setting_mac_address_failed:
2727 free_irq(sp->pdev->irq, dev);
2728 isr_registration_failed:
2735 * s2io_close -close entry point of the driver
2736 * @dev : device pointer.
2738 * This is the stop entry point of the driver. It needs to undo exactly
2739 * whatever was done by the open entry point,thus it's usually referred to
2740 * as the close function.Among other things this function mainly stops the
2741 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2743 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2747 int s2io_close(struct net_device *dev)
2749 nic_t *sp = dev->priv;
2750 flush_scheduled_work();
2751 netif_stop_queue(dev);
2752 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2755 free_irq(sp->pdev->irq, dev);
2756 sp->device_close_flag = TRUE; /* Device is shut down. */
2761 * s2io_xmit - Tx entry point of te driver
2762 * @skb : the socket buffer containing the Tx data.
2763 * @dev : device pointer.
2765 * This function is the Tx entry point of the driver. S2IO NIC supports
2766 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2767 * NOTE: when device cant queue the pkt,just the trans_start variable will
2770 * 0 on success & 1 on failure.
2773 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2775 nic_t *sp = dev->priv;
2776 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2779 TxFIFO_element_t __iomem *tx_fifo;
2780 unsigned long flags;
2784 mac_info_t *mac_control;
2785 struct config_param *config;
2786 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2788 mac_control = &sp->mac_control;
2789 config = &sp->config;
2791 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2792 spin_lock_irqsave(&sp->tx_lock, flags);
2793 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2794 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2796 spin_unlock_irqrestore(&sp->tx_lock, flags);
2803 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2804 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2805 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2808 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2809 /* Avoid "put" pointer going beyond "get" pointer */
2810 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2811 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2812 netif_stop_queue(dev);
2814 spin_unlock_irqrestore(&sp->tx_lock, flags);
2818 mss = skb_shinfo(skb)->tso_size;
2820 txdp->Control_1 |= TXD_TCP_LSO_EN;
2821 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2825 frg_cnt = skb_shinfo(skb)->nr_frags;
2826 frg_len = skb->len - skb->data_len;
2828 txdp->Buffer_Pointer = pci_map_single
2829 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2830 txdp->Host_Control = (unsigned long) skb;
2831 if (skb->ip_summed == CHECKSUM_HW) {
2833 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2837 txdp->Control_2 |= config->tx_intr_type;
2839 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2840 TXD_GATHER_CODE_FIRST);
2841 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2843 /* For fragmented SKB. */
2844 for (i = 0; i < frg_cnt; i++) {
2845 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2847 txdp->Buffer_Pointer = (u64) pci_map_page
2848 (sp->pdev, frag->page, frag->page_offset,
2849 frag->size, PCI_DMA_TODEVICE);
2850 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2852 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2854 tx_fifo = mac_control->tx_FIFO_start[queue];
2855 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2856 writeq(val64, &tx_fifo->TxDL_Pointer);
2858 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2863 val64 |= TX_FIFO_SPECIAL_FUNC;
2865 writeq(val64, &tx_fifo->List_Control);
2867 /* Perform a PCI read to flush previous writes */
2868 val64 = readq(&bar0->general_int_status);
2871 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2872 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2874 /* Avoid "put" pointer going beyond "get" pointer */
2875 if (((put_off + 1) % queue_len) == get_off) {
2877 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2879 netif_stop_queue(dev);
2882 dev->trans_start = jiffies;
2883 spin_unlock_irqrestore(&sp->tx_lock, flags);
2889 * s2io_isr - ISR handler of the device .
2890 * @irq: the irq of the device.
2891 * @dev_id: a void pointer to the dev structure of the NIC.
2892 * @pt_regs: pointer to the registers pushed on the stack.
2893 * Description: This function is the ISR handler of the device. It
2894 * identifies the reason for the interrupt and calls the relevant
2895 * service routines. As a contongency measure, this ISR allocates the
2896 * recv buffers, if their numbers are below the panic value which is
2897 * presently set to 25% of the original number of rcv buffers allocated.
2899 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2900 * IRQ_NONE: will be returned if interrupt is not from our device
2902 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2904 struct net_device *dev = (struct net_device *) dev_id;
2905 nic_t *sp = dev->priv;
2906 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2909 mac_info_t *mac_control;
2910 struct config_param *config;
2912 mac_control = &sp->mac_control;
2913 config = &sp->config;
2916 * Identify the cause for interrupt and call the appropriate
2917 * interrupt handler. Causes for the interrupt could be;
2921 * 4. Error in any functional blocks of the NIC.
2923 reason = readq(&bar0->general_int_status);
2926 /* The interrupt was not raised by Xena. */
2930 if (reason & (GEN_ERROR_INTR))
2931 alarm_intr_handler(sp);
2933 #ifdef CONFIG_S2IO_NAPI
2934 if (reason & GEN_INTR_RXTRAFFIC) {
2935 if (netif_rx_schedule_prep(dev)) {
2936 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2938 __netif_rx_schedule(dev);
2942 /* If Intr is because of Rx Traffic */
2943 if (reason & GEN_INTR_RXTRAFFIC) {
2944 for (i = 0; i < config->rx_ring_num; i++) {
2945 rx_intr_handler(&mac_control->rings[i]);
2950 /* If Intr is because of Tx Traffic */
2951 if (reason & GEN_INTR_TXTRAFFIC) {
2952 for (i = 0; i < config->tx_fifo_num; i++)
2953 tx_intr_handler(&mac_control->fifos[i]);
2957 * If the Rx buffer count is below the panic threshold then
2958 * reallocate the buffers from the interrupt handler itself,
2959 * else schedule a tasklet to reallocate the buffers.
2961 #ifndef CONFIG_S2IO_NAPI
2962 for (i = 0; i < config->rx_ring_num; i++) {
2964 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2965 int level = rx_buffer_level(sp, rxb_size, i);
2967 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2968 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2969 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2970 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2971 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2973 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2974 clear_bit(0, (&sp->tasklet_status));
2977 clear_bit(0, (&sp->tasklet_status));
2978 } else if (level == LOW) {
2979 tasklet_schedule(&sp->task);
2988 * s2io_get_stats - Updates the device statistics structure.
2989 * @dev : pointer to the device structure.
2991 * This function updates the device statistics structure in the s2io_nic
2992 * structure and returns a pointer to the same.
2994 * pointer to the updated net_device_stats structure.
2997 struct net_device_stats *s2io_get_stats(struct net_device *dev)
2999 nic_t *sp = dev->priv;
3000 mac_info_t *mac_control;
3001 struct config_param *config;
3004 mac_control = &sp->mac_control;
3005 config = &sp->config;
3007 sp->stats.tx_errors =
3008 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3009 sp->stats.rx_errors =
3010 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3011 sp->stats.multicast =
3012 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3013 sp->stats.rx_length_errors =
3014 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3016 return (&sp->stats);
3020 * s2io_set_multicast - entry point for multicast address enable/disable.
3021 * @dev : pointer to the device structure
3023 * This function is a driver entry point which gets called by the kernel
3024 * whenever multicast addresses must be enabled/disabled. This also gets
3025 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3026 * determine, if multicast address must be enabled or if promiscuous mode
3027 * is to be disabled etc.
3032 static void s2io_set_multicast(struct net_device *dev)
3035 struct dev_mc_list *mclist;
3036 nic_t *sp = dev->priv;
3037 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3038 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3040 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3043 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3044 /* Enable all Multicast addresses */
3045 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3046 &bar0->rmac_addr_data0_mem);
3047 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3048 &bar0->rmac_addr_data1_mem);
3049 val64 = RMAC_ADDR_CMD_MEM_WE |
3050 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3051 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3052 writeq(val64, &bar0->rmac_addr_cmd_mem);
3053 /* Wait till command completes */
3054 wait_for_cmd_complete(sp);
3057 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3058 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3059 /* Disable all Multicast addresses */
3060 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3061 &bar0->rmac_addr_data0_mem);
3062 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3063 &bar0->rmac_addr_data1_mem);
3064 val64 = RMAC_ADDR_CMD_MEM_WE |
3065 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3066 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3067 writeq(val64, &bar0->rmac_addr_cmd_mem);
3068 /* Wait till command completes */
3069 wait_for_cmd_complete(sp);
3072 sp->all_multi_pos = 0;
3075 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3076 /* Put the NIC into promiscuous mode */
3077 add = &bar0->mac_cfg;
3078 val64 = readq(&bar0->mac_cfg);
3079 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3081 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3082 writel((u32) val64, add);
3083 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3084 writel((u32) (val64 >> 32), (add + 4));
3086 val64 = readq(&bar0->mac_cfg);
3087 sp->promisc_flg = 1;
3088 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3090 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3091 /* Remove the NIC from promiscuous mode */
3092 add = &bar0->mac_cfg;
3093 val64 = readq(&bar0->mac_cfg);
3094 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3096 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3097 writel((u32) val64, add);
3098 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3099 writel((u32) (val64 >> 32), (add + 4));
3101 val64 = readq(&bar0->mac_cfg);
3102 sp->promisc_flg = 0;
3103 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3107 /* Update individual M_CAST address list */
3108 if ((!sp->m_cast_flg) && dev->mc_count) {
3110 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3111 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3113 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3114 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3118 prev_cnt = sp->mc_addr_count;
3119 sp->mc_addr_count = dev->mc_count;
3121 /* Clear out the previous list of Mc in the H/W. */
3122 for (i = 0; i < prev_cnt; i++) {
3123 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3124 &bar0->rmac_addr_data0_mem);
3125 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3126 &bar0->rmac_addr_data1_mem);
3127 val64 = RMAC_ADDR_CMD_MEM_WE |
3128 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3129 RMAC_ADDR_CMD_MEM_OFFSET
3130 (MAC_MC_ADDR_START_OFFSET + i);
3131 writeq(val64, &bar0->rmac_addr_cmd_mem);
3133 /* Wait for command completes */
3134 if (wait_for_cmd_complete(sp)) {
3135 DBG_PRINT(ERR_DBG, "%s: Adding ",
3137 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3142 /* Create the new Rx filter list and update the same in H/W. */
3143 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3144 i++, mclist = mclist->next) {
3145 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3147 for (j = 0; j < ETH_ALEN; j++) {
3148 mac_addr |= mclist->dmi_addr[j];
3152 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3153 &bar0->rmac_addr_data0_mem);
3154 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3155 &bar0->rmac_addr_data1_mem);
3156 val64 = RMAC_ADDR_CMD_MEM_WE |
3157 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3158 RMAC_ADDR_CMD_MEM_OFFSET
3159 (i + MAC_MC_ADDR_START_OFFSET);
3160 writeq(val64, &bar0->rmac_addr_cmd_mem);
3162 /* Wait for command completes */
3163 if (wait_for_cmd_complete(sp)) {
3164 DBG_PRINT(ERR_DBG, "%s: Adding ",
3166 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3174 * s2io_set_mac_addr - Programs the Xframe mac address
3175 * @dev : pointer to the device structure.
3176 * @addr: a uchar pointer to the new mac address which is to be set.
3177 * Description : This procedure will program the Xframe to receive
3178 * frames with new Mac Address
3179 * Return value: SUCCESS on success and an appropriate (-)ve integer
3180 * as defined in errno.h file on failure.
3183 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3185 nic_t *sp = dev->priv;
3186 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3187 register u64 val64, mac_addr = 0;
3191 * Set the new MAC address as the new unicast filter and reflect this
3192 * change on the device address registered with the OS. It will be
3195 for (i = 0; i < ETH_ALEN; i++) {
3197 mac_addr |= addr[i];
3200 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3201 &bar0->rmac_addr_data0_mem);
3204 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3205 RMAC_ADDR_CMD_MEM_OFFSET(0);
3206 writeq(val64, &bar0->rmac_addr_cmd_mem);
3207 /* Wait till command completes */
3208 if (wait_for_cmd_complete(sp)) {
3209 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3217 * s2io_ethtool_sset - Sets different link parameters.
3218 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3219 * @info: pointer to the structure with parameters given by ethtool to set
3222 * The function sets different link parameters provided by the user onto
3228 static int s2io_ethtool_sset(struct net_device *dev,
3229 struct ethtool_cmd *info)
3231 nic_t *sp = dev->priv;
3232 if ((info->autoneg == AUTONEG_ENABLE) ||
3233 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3236 s2io_close(sp->dev);
3244 * s2io_ethtol_gset - Return link specific information.
3245 * @sp : private member of the device structure, pointer to the
3246 * s2io_nic structure.
3247 * @info : pointer to the structure with parameters given by ethtool
3248 * to return link information.
3250 * Returns link specific information like speed, duplex etc.. to ethtool.
3252 * return 0 on success.
3255 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3257 nic_t *sp = dev->priv;
3258 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3259 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3260 info->port = PORT_FIBRE;
3261 /* info->transceiver?? TODO */
3263 if (netif_carrier_ok(sp->dev)) {
3264 info->speed = 10000;
3265 info->duplex = DUPLEX_FULL;
3271 info->autoneg = AUTONEG_DISABLE;
3276 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3277 * @sp : private member of the device structure, which is a pointer to the
3278 * s2io_nic structure.
3279 * @info : pointer to the structure with parameters given by ethtool to
3280 * return driver information.
3282 * Returns driver specefic information like name, version etc.. to ethtool.
3287 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3288 struct ethtool_drvinfo *info)
3290 nic_t *sp = dev->priv;
3292 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3293 strncpy(info->version, s2io_driver_version,
3294 sizeof(s2io_driver_version));
3295 strncpy(info->fw_version, "", 32);
3296 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3297 info->regdump_len = XENA_REG_SPACE;
3298 info->eedump_len = XENA_EEPROM_SPACE;
3299 info->testinfo_len = S2IO_TEST_LEN;
3300 info->n_stats = S2IO_STAT_LEN;
3304 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3305 * @sp: private member of the device structure, which is a pointer to the
3306 * s2io_nic structure.
3307 * @regs : pointer to the structure with parameters given by ethtool for
3308 * dumping the registers.
3309 * @reg_space: The input argumnet into which all the registers are dumped.
3311 * Dumps the entire register space of xFrame NIC into the user given
3317 static void s2io_ethtool_gregs(struct net_device *dev,
3318 struct ethtool_regs *regs, void *space)
3322 u8 *reg_space = (u8 *) space;
3323 nic_t *sp = dev->priv;
3325 regs->len = XENA_REG_SPACE;
3326 regs->version = sp->pdev->subsystem_device;
3328 for (i = 0; i < regs->len; i += 8) {
3329 reg = readq(sp->bar0 + i);
3330 memcpy((reg_space + i), ®, 8);
3335 * s2io_phy_id - timer function that alternates adapter LED.
3336 * @data : address of the private member of the device structure, which
3337 * is a pointer to the s2io_nic structure, provided as an u32.
3338 * Description: This is actually the timer function that alternates the
3339 * adapter LED bit of the adapter control bit to set/reset every time on
3340 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3341 * once every second.
3343 static void s2io_phy_id(unsigned long data)
3345 nic_t *sp = (nic_t *) data;
3346 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3350 subid = sp->pdev->subsystem_device;
3351 if ((subid & 0xFF) >= 0x07) {
3352 val64 = readq(&bar0->gpio_control);
3353 val64 ^= GPIO_CTRL_GPIO_0;
3354 writeq(val64, &bar0->gpio_control);
3356 val64 = readq(&bar0->adapter_control);
3357 val64 ^= ADAPTER_LED_ON;
3358 writeq(val64, &bar0->adapter_control);
3361 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3365 * s2io_ethtool_idnic - To physically identify the nic on the system.
3366 * @sp : private member of the device structure, which is a pointer to the
3367 * s2io_nic structure.
3368 * @id : pointer to the structure with identification parameters given by
3370 * Description: Used to physically identify the NIC on the system.
3371 * The Link LED will blink for a time specified by the user for
3373 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3374 * identification is possible only if it's link is up.
3376 * int , returns 0 on success
3379 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3381 u64 val64 = 0, last_gpio_ctrl_val;
3382 nic_t *sp = dev->priv;
3383 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3386 subid = sp->pdev->subsystem_device;
3387 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3388 if ((subid & 0xFF) < 0x07) {
3389 val64 = readq(&bar0->adapter_control);
3390 if (!(val64 & ADAPTER_CNTL_EN)) {
3392 "Adapter Link down, cannot blink LED\n");
3396 if (sp->id_timer.function == NULL) {
3397 init_timer(&sp->id_timer);
3398 sp->id_timer.function = s2io_phy_id;
3399 sp->id_timer.data = (unsigned long) sp;
3401 mod_timer(&sp->id_timer, jiffies);
3403 msleep_interruptible(data * HZ);
3405 msleep_interruptible(MAX_FLICKER_TIME);
3406 del_timer_sync(&sp->id_timer);
3408 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3409 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3410 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3417 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3418 * @sp : private member of the device structure, which is a pointer to the
3419 * s2io_nic structure.
3420 * @ep : pointer to the structure with pause parameters given by ethtool.
3422 * Returns the Pause frame generation and reception capability of the NIC.
3426 static void s2io_ethtool_getpause_data(struct net_device *dev,
3427 struct ethtool_pauseparam *ep)
3430 nic_t *sp = dev->priv;
3431 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3433 val64 = readq(&bar0->rmac_pause_cfg);
3434 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3435 ep->tx_pause = TRUE;
3436 if (val64 & RMAC_PAUSE_RX_ENABLE)
3437 ep->rx_pause = TRUE;
3438 ep->autoneg = FALSE;
3442 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3443 * @sp : private member of the device structure, which is a pointer to the
3444 * s2io_nic structure.
3445 * @ep : pointer to the structure with pause parameters given by ethtool.
3447 * It can be used to set or reset Pause frame generation or reception
3448 * support of the NIC.
3450 * int, returns 0 on Success
3453 static int s2io_ethtool_setpause_data(struct net_device *dev,
3454 struct ethtool_pauseparam *ep)
3457 nic_t *sp = dev->priv;
3458 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3460 val64 = readq(&bar0->rmac_pause_cfg);
3462 val64 |= RMAC_PAUSE_GEN_ENABLE;
3464 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3466 val64 |= RMAC_PAUSE_RX_ENABLE;
3468 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3469 writeq(val64, &bar0->rmac_pause_cfg);
3474 * read_eeprom - reads 4 bytes of data from user given offset.
3475 * @sp : private member of the device structure, which is a pointer to the
3476 * s2io_nic structure.
3477 * @off : offset at which the data must be written
3478 * @data : Its an output parameter where the data read at the given
3481 * Will read 4 bytes of data from the user given offset and return the
3483 * NOTE: Will allow to read only part of the EEPROM visible through the
3486 * -1 on failure and 0 on success.
3489 #define S2IO_DEV_ID 5
3490 static int read_eeprom(nic_t * sp, int off, u32 * data)
3495 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3497 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3498 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3499 I2C_CONTROL_CNTL_START;
3500 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3502 while (exit_cnt < 5) {
3503 val64 = readq(&bar0->i2c_control);
3504 if (I2C_CONTROL_CNTL_END(val64)) {
3505 *data = I2C_CONTROL_GET_DATA(val64);
3517 * write_eeprom - actually writes the relevant part of the data value.
3518 * @sp : private member of the device structure, which is a pointer to the
3519 * s2io_nic structure.
3520 * @off : offset at which the data must be written
3521 * @data : The data that is to be written
3522 * @cnt : Number of bytes of the data that are actually to be written into
3523 * the Eeprom. (max of 3)
3525 * Actually writes the relevant part of the data value into the Eeprom
3526 * through the I2C bus.
3528 * 0 on success, -1 on failure.
3531 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3533 int exit_cnt = 0, ret = -1;
3535 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3537 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3538 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3539 I2C_CONTROL_CNTL_START;
3540 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3542 while (exit_cnt < 5) {
3543 val64 = readq(&bar0->i2c_control);
3544 if (I2C_CONTROL_CNTL_END(val64)) {
3545 if (!(val64 & I2C_CONTROL_NACK))
3557 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3558 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3559 * @eeprom : pointer to the user level structure provided by ethtool,
3560 * containing all relevant information.
3561 * @data_buf : user defined value to be written into Eeprom.
3562 * Description: Reads the values stored in the Eeprom at given offset
3563 * for a given length. Stores these values int the input argument data
3564 * buffer 'data_buf' and returns these to the caller (ethtool.)
3569 static int s2io_ethtool_geeprom(struct net_device *dev,
3570 struct ethtool_eeprom *eeprom, u8 * data_buf)
3573 nic_t *sp = dev->priv;
3575 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3577 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3578 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3580 for (i = 0; i < eeprom->len; i += 4) {
3581 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3582 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3586 memcpy((data_buf + i), &valid, 4);
3592 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3593 * @sp : private member of the device structure, which is a pointer to the
3594 * s2io_nic structure.
3595 * @eeprom : pointer to the user level structure provided by ethtool,
3596 * containing all relevant information.
3597 * @data_buf ; user defined value to be written into Eeprom.
3599 * Tries to write the user provided value in the Eeprom, at the offset
3600 * given by the user.
3602 * 0 on success, -EFAULT on failure.
3605 static int s2io_ethtool_seeprom(struct net_device *dev,
3606 struct ethtool_eeprom *eeprom,
3609 int len = eeprom->len, cnt = 0;
3610 u32 valid = 0, data;
3611 nic_t *sp = dev->priv;
3613 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3615 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3616 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3622 data = (u32) data_buf[cnt] & 0x000000FF;
3624 valid = (u32) (data << 24);
3628 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3630 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3632 "write into the specified offset\n");
3643 * s2io_register_test - reads and writes into all clock domains.
3644 * @sp : private member of the device structure, which is a pointer to the
3645 * s2io_nic structure.
3646 * @data : variable that returns the result of each of the test conducted b
3649 * Read and write into all clock domains. The NIC has 3 clock domains,
3650 * see that registers in all the three regions are accessible.
3655 static int s2io_register_test(nic_t * sp, uint64_t * data)
3657 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3661 val64 = readq(&bar0->pif_rd_swapper_fb);
3662 if (val64 != 0x123456789abcdefULL) {
3664 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3667 val64 = readq(&bar0->rmac_pause_cfg);
3668 if (val64 != 0xc000ffff00000000ULL) {
3670 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3673 val64 = readq(&bar0->rx_queue_cfg);
3674 if (val64 != 0x0808080808080808ULL) {
3676 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3679 val64 = readq(&bar0->xgxs_efifo_cfg);
3680 if (val64 != 0x000000001923141EULL) {
3682 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3685 val64 = 0x5A5A5A5A5A5A5A5AULL;
3686 writeq(val64, &bar0->xmsi_data);
3687 val64 = readq(&bar0->xmsi_data);
3688 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3690 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3693 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3694 writeq(val64, &bar0->xmsi_data);
3695 val64 = readq(&bar0->xmsi_data);
3696 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3698 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3706 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3707 * @sp : private member of the device structure, which is a pointer to the
3708 * s2io_nic structure.
3709 * @data:variable that returns the result of each of the test conducted by
3712 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3718 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3723 /* Test Write Error at offset 0 */
3724 if (!write_eeprom(sp, 0, 0, 3))
3727 /* Test Write at offset 4f0 */
3728 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3730 if (read_eeprom(sp, 0x4F0, &ret_data))
3733 if (ret_data != 0x01234567)
3736 /* Reset the EEPROM data go FFFF */
3737 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3739 /* Test Write Request Error at offset 0x7c */
3740 if (!write_eeprom(sp, 0x07C, 0, 3))
3743 /* Test Write Request at offset 0x7fc */
3744 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3746 if (read_eeprom(sp, 0x7FC, &ret_data))
3749 if (ret_data != 0x01234567)
3752 /* Reset the EEPROM data go FFFF */
3753 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3755 /* Test Write Error at offset 0x80 */
3756 if (!write_eeprom(sp, 0x080, 0, 3))
3759 /* Test Write Error at offset 0xfc */
3760 if (!write_eeprom(sp, 0x0FC, 0, 3))
3763 /* Test Write Error at offset 0x100 */
3764 if (!write_eeprom(sp, 0x100, 0, 3))
3767 /* Test Write Error at offset 4ec */
3768 if (!write_eeprom(sp, 0x4EC, 0, 3))
3776 * s2io_bist_test - invokes the MemBist test of the card .
3777 * @sp : private member of the device structure, which is a pointer to the
3778 * s2io_nic structure.
3779 * @data:variable that returns the result of each of the test conducted by
3782 * This invokes the MemBist test of the card. We give around
3783 * 2 secs time for the Test to complete. If it's still not complete
3784 * within this peiod, we consider that the test failed.
3786 * 0 on success and -1 on failure.
3789 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3792 int cnt = 0, ret = -1;
3794 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3795 bist |= PCI_BIST_START;
3796 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3799 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3800 if (!(bist & PCI_BIST_START)) {
3801 *data = (bist & PCI_BIST_CODE_MASK);
3813 * s2io-link_test - verifies the link state of the nic
3814 * @sp ; private member of the device structure, which is a pointer to the
3815 * s2io_nic structure.
3816 * @data: variable that returns the result of each of the test conducted by
3819 * The function verifies the link state of the NIC and updates the input
3820 * argument 'data' appropriately.
3825 static int s2io_link_test(nic_t * sp, uint64_t * data)
3827 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3830 val64 = readq(&bar0->adapter_status);
3831 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3838 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3839 * @sp - private member of the device structure, which is a pointer to the
3840 * s2io_nic structure.
3841 * @data - variable that returns the result of each of the test
3842 * conducted by the driver.
3844 * This is one of the offline test that tests the read and write
3845 * access to the RldRam chip on the NIC.
3850 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3852 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3854 int cnt, iteration = 0, test_pass = 0;
3856 val64 = readq(&bar0->adapter_control);
3857 val64 &= ~ADAPTER_ECC_EN;
3858 writeq(val64, &bar0->adapter_control);
3860 val64 = readq(&bar0->mc_rldram_test_ctrl);
3861 val64 |= MC_RLDRAM_TEST_MODE;
3862 writeq(val64, &bar0->mc_rldram_test_ctrl);
3864 val64 = readq(&bar0->mc_rldram_mrs);
3865 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3866 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3868 val64 |= MC_RLDRAM_MRS_ENABLE;
3869 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3871 while (iteration < 2) {
3872 val64 = 0x55555555aaaa0000ULL;
3873 if (iteration == 1) {
3874 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3876 writeq(val64, &bar0->mc_rldram_test_d0);
3878 val64 = 0xaaaa5a5555550000ULL;
3879 if (iteration == 1) {
3880 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3882 writeq(val64, &bar0->mc_rldram_test_d1);
3884 val64 = 0x55aaaaaaaa5a0000ULL;
3885 if (iteration == 1) {
3886 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3888 writeq(val64, &bar0->mc_rldram_test_d2);
3890 val64 = (u64) (0x0000003fffff0000ULL);
3891 writeq(val64, &bar0->mc_rldram_test_add);
3894 val64 = MC_RLDRAM_TEST_MODE;
3895 writeq(val64, &bar0->mc_rldram_test_ctrl);
3898 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3900 writeq(val64, &bar0->mc_rldram_test_ctrl);
3902 for (cnt = 0; cnt < 5; cnt++) {
3903 val64 = readq(&bar0->mc_rldram_test_ctrl);
3904 if (val64 & MC_RLDRAM_TEST_DONE)
3912 val64 = MC_RLDRAM_TEST_MODE;
3913 writeq(val64, &bar0->mc_rldram_test_ctrl);
3915 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3916 writeq(val64, &bar0->mc_rldram_test_ctrl);
3918 for (cnt = 0; cnt < 5; cnt++) {
3919 val64 = readq(&bar0->mc_rldram_test_ctrl);
3920 if (val64 & MC_RLDRAM_TEST_DONE)
3928 val64 = readq(&bar0->mc_rldram_test_ctrl);
3929 if (val64 & MC_RLDRAM_TEST_PASS)
3944 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3945 * @sp : private member of the device structure, which is a pointer to the
3946 * s2io_nic structure.
3947 * @ethtest : pointer to a ethtool command specific structure that will be
3948 * returned to the user.
3949 * @data : variable that returns the result of each of the test
3950 * conducted by the driver.
3952 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3953 * the health of the card.
3958 static void s2io_ethtool_test(struct net_device *dev,
3959 struct ethtool_test *ethtest,
3962 nic_t *sp = dev->priv;
3963 int orig_state = netif_running(sp->dev);
3965 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3966 /* Offline Tests. */
3968 s2io_close(sp->dev);
3970 if (s2io_register_test(sp, &data[0]))
3971 ethtest->flags |= ETH_TEST_FL_FAILED;
3975 if (s2io_rldram_test(sp, &data[3]))
3976 ethtest->flags |= ETH_TEST_FL_FAILED;
3980 if (s2io_eeprom_test(sp, &data[1]))
3981 ethtest->flags |= ETH_TEST_FL_FAILED;
3983 if (s2io_bist_test(sp, &data[4]))
3984 ethtest->flags |= ETH_TEST_FL_FAILED;
3994 "%s: is not up, cannot run test\n",
4003 if (s2io_link_test(sp, &data[2]))
4004 ethtest->flags |= ETH_TEST_FL_FAILED;
4013 static void s2io_get_ethtool_stats(struct net_device *dev,
4014 struct ethtool_stats *estats,
4018 nic_t *sp = dev->priv;
4019 StatInfo_t *stat_info = sp->mac_control.stats_info;
4021 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4022 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4023 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4024 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4025 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4026 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4027 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4028 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4029 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4030 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4031 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4032 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4033 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4034 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4035 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4036 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4037 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4038 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4039 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4040 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4041 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4042 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4043 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4044 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4045 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4046 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4047 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4048 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4049 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4050 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4051 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4052 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4053 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4054 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4055 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4056 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4057 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4058 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4059 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4062 int s2io_ethtool_get_regs_len(struct net_device *dev)
4064 return (XENA_REG_SPACE);
4068 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4070 nic_t *sp = dev->priv;
4072 return (sp->rx_csum);
4074 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4076 nic_t *sp = dev->priv;
4085 int s2io_get_eeprom_len(struct net_device *dev)
4087 return (XENA_EEPROM_SPACE);
4090 int s2io_ethtool_self_test_count(struct net_device *dev)
4092 return (S2IO_TEST_LEN);
4094 void s2io_ethtool_get_strings(struct net_device *dev,
4095 u32 stringset, u8 * data)
4097 switch (stringset) {
4099 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4102 memcpy(data, ðtool_stats_keys,
4103 sizeof(ethtool_stats_keys));
4106 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4108 return (S2IO_STAT_LEN);
4111 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4114 dev->features |= NETIF_F_IP_CSUM;
4116 dev->features &= ~NETIF_F_IP_CSUM;
4122 static struct ethtool_ops netdev_ethtool_ops = {
4123 .get_settings = s2io_ethtool_gset,
4124 .set_settings = s2io_ethtool_sset,
4125 .get_drvinfo = s2io_ethtool_gdrvinfo,
4126 .get_regs_len = s2io_ethtool_get_regs_len,
4127 .get_regs = s2io_ethtool_gregs,
4128 .get_link = ethtool_op_get_link,
4129 .get_eeprom_len = s2io_get_eeprom_len,
4130 .get_eeprom = s2io_ethtool_geeprom,
4131 .set_eeprom = s2io_ethtool_seeprom,
4132 .get_pauseparam = s2io_ethtool_getpause_data,
4133 .set_pauseparam = s2io_ethtool_setpause_data,
4134 .get_rx_csum = s2io_ethtool_get_rx_csum,
4135 .set_rx_csum = s2io_ethtool_set_rx_csum,
4136 .get_tx_csum = ethtool_op_get_tx_csum,
4137 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4138 .get_sg = ethtool_op_get_sg,
4139 .set_sg = ethtool_op_set_sg,
4141 .get_tso = ethtool_op_get_tso,
4142 .set_tso = ethtool_op_set_tso,
4144 .self_test_count = s2io_ethtool_self_test_count,
4145 .self_test = s2io_ethtool_test,
4146 .get_strings = s2io_ethtool_get_strings,
4147 .phys_id = s2io_ethtool_idnic,
4148 .get_stats_count = s2io_ethtool_get_stats_count,
4149 .get_ethtool_stats = s2io_get_ethtool_stats
4153 * s2io_ioctl - Entry point for the Ioctl
4154 * @dev : Device pointer.
4155 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4156 * a proprietary structure used to pass information to the driver.
4157 * @cmd : This is used to distinguish between the different commands that
4158 * can be passed to the IOCTL functions.
4160 * Currently there are no special functionality supported in IOCTL, hence
4161 * function always return EOPNOTSUPPORTED
4164 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4170 * s2io_change_mtu - entry point to change MTU size for the device.
4171 * @dev : device pointer.
4172 * @new_mtu : the new MTU size for the device.
4173 * Description: A driver entry point to change MTU size for the device.
4174 * Before changing the MTU the device must be stopped.
4176 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4180 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4182 nic_t *sp = dev->priv;
4183 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4186 if (netif_running(dev)) {
4187 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4188 DBG_PRINT(ERR_DBG, "change its MTU\n");
4192 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4193 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4198 /* Set the new MTU into the PYLD register of the NIC */
4200 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4208 * s2io_tasklet - Bottom half of the ISR.
4209 * @dev_adr : address of the device structure in dma_addr_t format.
4211 * This is the tasklet or the bottom half of the ISR. This is
4212 * an extension of the ISR which is scheduled by the scheduler to be run
4213 * when the load on the CPU is low. All low priority tasks of the ISR can
4214 * be pushed into the tasklet. For now the tasklet is used only to
4215 * replenish the Rx buffers in the Rx buffer descriptors.
4220 static void s2io_tasklet(unsigned long dev_addr)
4222 struct net_device *dev = (struct net_device *) dev_addr;
4223 nic_t *sp = dev->priv;
4225 mac_info_t *mac_control;
4226 struct config_param *config;
4228 mac_control = &sp->mac_control;
4229 config = &sp->config;
4231 if (!TASKLET_IN_USE) {
4232 for (i = 0; i < config->rx_ring_num; i++) {
4233 ret = fill_rx_buffers(sp, i);
4234 if (ret == -ENOMEM) {
4235 DBG_PRINT(ERR_DBG, "%s: Out of ",
4237 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4239 } else if (ret == -EFILL) {
4241 "%s: Rx Ring %d is full\n",
4246 clear_bit(0, (&sp->tasklet_status));
4251 * s2io_set_link - Set the LInk status
4252 * @data: long pointer to device private structue
4253 * Description: Sets the link status for the adapter
4256 static void s2io_set_link(unsigned long data)
4258 nic_t *nic = (nic_t *) data;
4259 struct net_device *dev = nic->dev;
4260 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4264 if (test_and_set_bit(0, &(nic->link_state))) {
4265 /* The card is being reset, no point doing anything */
4269 subid = nic->pdev->subsystem_device;
4271 * Allow a small delay for the NICs self initiated
4272 * cleanup to complete.
4276 val64 = readq(&bar0->adapter_status);
4277 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4278 if (LINK_IS_UP(val64)) {
4279 val64 = readq(&bar0->adapter_control);
4280 val64 |= ADAPTER_CNTL_EN;
4281 writeq(val64, &bar0->adapter_control);
4282 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4283 val64 = readq(&bar0->gpio_control);
4284 val64 |= GPIO_CTRL_GPIO_0;
4285 writeq(val64, &bar0->gpio_control);
4286 val64 = readq(&bar0->gpio_control);
4288 val64 |= ADAPTER_LED_ON;
4289 writeq(val64, &bar0->adapter_control);
4291 val64 = readq(&bar0->adapter_status);
4292 if (!LINK_IS_UP(val64)) {
4293 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4294 DBG_PRINT(ERR_DBG, " Link down");
4295 DBG_PRINT(ERR_DBG, "after ");
4296 DBG_PRINT(ERR_DBG, "enabling ");
4297 DBG_PRINT(ERR_DBG, "device \n");
4299 if (nic->device_enabled_once == FALSE) {
4300 nic->device_enabled_once = TRUE;
4302 s2io_link(nic, LINK_UP);
4304 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4305 val64 = readq(&bar0->gpio_control);
4306 val64 &= ~GPIO_CTRL_GPIO_0;
4307 writeq(val64, &bar0->gpio_control);
4308 val64 = readq(&bar0->gpio_control);
4310 s2io_link(nic, LINK_DOWN);
4312 } else { /* NIC is not Quiescent. */
4313 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4314 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4315 netif_stop_queue(dev);
4317 clear_bit(0, &(nic->link_state));
4320 static void s2io_card_down(nic_t * sp)
4323 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4324 unsigned long flags;
4325 register u64 val64 = 0;
4327 /* If s2io_set_link task is executing, wait till it completes. */
4328 while (test_and_set_bit(0, &(sp->link_state))) {
4331 atomic_set(&sp->card_state, CARD_DOWN);
4333 /* disable Tx and Rx traffic on the NIC */
4337 tasklet_kill(&sp->task);
4339 /* Check if the device is Quiescent and then Reset the NIC */
4341 val64 = readq(&bar0->adapter_status);
4342 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4350 "s2io_close:Device not Quiescent ");
4351 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4352 (unsigned long long) val64);
4356 spin_lock_irqsave(&sp->tx_lock, flags);
4359 /* Free all unused Tx and Rx buffers */
4360 free_tx_buffers(sp);
4361 free_rx_buffers(sp);
4363 spin_unlock_irqrestore(&sp->tx_lock, flags);
4364 clear_bit(0, &(sp->link_state));
4367 static int s2io_card_up(nic_t * sp)
4370 mac_info_t *mac_control;
4371 struct config_param *config;
4372 struct net_device *dev = (struct net_device *) sp->dev;
4374 /* Initialize the H/W I/O registers */
4375 if (init_nic(sp) != 0) {
4376 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4382 * Initializing the Rx buffers. For now we are considering only 1
4383 * Rx ring and initializing buffers into 30 Rx blocks
4385 mac_control = &sp->mac_control;
4386 config = &sp->config;
4388 for (i = 0; i < config->rx_ring_num; i++) {
4389 if ((ret = fill_rx_buffers(sp, i))) {
4390 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4393 free_rx_buffers(sp);
4396 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4397 atomic_read(&sp->rx_bufs_left[i]));
4400 /* Setting its receive mode */
4401 s2io_set_multicast(dev);
4403 /* Enable tasklet for the device */
4404 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4406 /* Enable Rx Traffic and interrupts on the NIC */
4407 if (start_nic(sp)) {
4408 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4409 tasklet_kill(&sp->task);
4411 free_irq(dev->irq, dev);
4412 free_rx_buffers(sp);
4416 atomic_set(&sp->card_state, CARD_UP);
4421 * s2io_restart_nic - Resets the NIC.
4422 * @data : long pointer to the device private structure
4424 * This function is scheduled to be run by the s2io_tx_watchdog
4425 * function after 0.5 secs to reset the NIC. The idea is to reduce
4426 * the run time of the watch dog routine which is run holding a
4430 static void s2io_restart_nic(unsigned long data)
4432 struct net_device *dev = (struct net_device *) data;
4433 nic_t *sp = dev->priv;
4436 if (s2io_card_up(sp)) {
4437 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4440 netif_wake_queue(dev);
4441 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4447 * s2io_tx_watchdog - Watchdog for transmit side.
4448 * @dev : Pointer to net device structure
4450 * This function is triggered if the Tx Queue is stopped
4451 * for a pre-defined amount of time when the Interface is still up.
4452 * If the Interface is jammed in such a situation, the hardware is
4453 * reset (by s2io_close) and restarted again (by s2io_open) to
4454 * overcome any problem that might have been caused in the hardware.
4459 static void s2io_tx_watchdog(struct net_device *dev)
4461 nic_t *sp = dev->priv;
4463 if (netif_carrier_ok(dev)) {
4464 schedule_work(&sp->rst_timer_task);
4469 * rx_osm_handler - To perform some OS related operations on SKB.
4470 * @sp: private member of the device structure,pointer to s2io_nic structure.
4471 * @skb : the socket buffer pointer.
4472 * @len : length of the packet
4473 * @cksum : FCS checksum of the frame.
4474 * @ring_no : the ring from which this RxD was extracted.
4476 * This function is called by the Tx interrupt serivce routine to perform
4477 * some OS related operations on the SKB before passing it to the upper
4478 * layers. It mainly checks if the checksum is OK, if so adds it to the
4479 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4480 * to the upper layer. If the checksum is wrong, it increments the Rx
4481 * packet error count, frees the SKB and returns error.
4483 * SUCCESS on success and -1 on failure.
4485 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4487 nic_t *sp = ring_data->nic;
4488 struct net_device *dev = (struct net_device *) sp->dev;
4489 struct sk_buff *skb = (struct sk_buff *)
4490 ((unsigned long) rxdp->Host_Control);
4491 int ring_no = ring_data->ring_no;
4492 u16 l3_csum, l4_csum;
4493 #ifdef CONFIG_2BUFF_MODE
4494 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4495 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4496 int get_block = ring_data->rx_curr_get_info.block_index;
4497 int get_off = ring_data->rx_curr_get_info.offset;
4498 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4499 unsigned char *buff;
4501 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4504 if (rxdp->Control_1 & RXD_T_CODE) {
4505 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4506 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4510 /* Updating statistics */
4511 rxdp->Host_Control = 0;
4513 sp->stats.rx_packets++;
4514 #ifndef CONFIG_2BUFF_MODE
4515 sp->stats.rx_bytes += len;
4517 sp->stats.rx_bytes += buf0_len + buf2_len;
4520 #ifndef CONFIG_2BUFF_MODE
4523 buff = skb_push(skb, buf0_len);
4524 memcpy(buff, ba->ba_0, buf0_len);
4525 skb_put(skb, buf2_len);
4528 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4530 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4531 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4532 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4534 * NIC verifies if the Checksum of the received
4535 * frame is Ok or not and accordingly returns
4536 * a flag in the RxD.
4538 skb->ip_summed = CHECKSUM_UNNECESSARY;
4541 * Packet with erroneous checksum, let the
4542 * upper layers deal with it.
4544 skb->ip_summed = CHECKSUM_NONE;
4547 skb->ip_summed = CHECKSUM_NONE;
4550 skb->protocol = eth_type_trans(skb, dev);
4551 #ifdef CONFIG_S2IO_NAPI
4552 netif_receive_skb(skb);
4556 dev->last_rx = jiffies;
4557 atomic_dec(&sp->rx_bufs_left[ring_no]);
4562 * s2io_link - stops/starts the Tx queue.
4563 * @sp : private member of the device structure, which is a pointer to the
4564 * s2io_nic structure.
4565 * @link : inidicates whether link is UP/DOWN.
4567 * This function stops/starts the Tx queue depending on whether the link
4568 * status of the NIC is is down or up. This is called by the Alarm
4569 * interrupt handler whenever a link change interrupt comes up.
4574 void s2io_link(nic_t * sp, int link)
4576 struct net_device *dev = (struct net_device *) sp->dev;
4578 if (link != sp->last_link_state) {
4579 if (link == LINK_DOWN) {
4580 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4581 netif_carrier_off(dev);
4583 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4584 netif_carrier_on(dev);
4587 sp->last_link_state = link;
4591 * get_xena_rev_id - to identify revision ID of xena.
4592 * @pdev : PCI Dev structure
4594 * Function to identify the Revision ID of xena.
4596 * returns the revision ID of the device.
4599 int get_xena_rev_id(struct pci_dev *pdev)
4603 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4608 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4609 * @sp : private member of the device structure, which is a pointer to the
4610 * s2io_nic structure.
4612 * This function initializes a few of the PCI and PCI-X configuration registers
4613 * with recommended values.
4618 static void s2io_init_pci(nic_t * sp)
4620 u16 pci_cmd = 0, pcix_cmd = 0;
4622 /* Enable Data Parity Error Recovery in PCI-X command register. */
4623 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4625 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4627 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4630 /* Set the PErr Response bit in PCI command register. */
4631 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4632 pci_write_config_word(sp->pdev, PCI_COMMAND,
4633 (pci_cmd | PCI_COMMAND_PARITY));
4634 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4636 /* Forcibly disabling relaxed ordering capability of the card. */
4638 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4640 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4644 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4645 MODULE_LICENSE("GPL");
4646 module_param(tx_fifo_num, int, 0);
4647 module_param(rx_ring_num, int, 0);
4648 module_param_array(tx_fifo_len, uint, NULL, 0);
4649 module_param_array(rx_ring_sz, uint, NULL, 0);
4650 module_param(Stats_refresh_time, int, 0);
4651 module_param_array(rts_frm_len, uint, NULL, 0);
4652 module_param(use_continuous_tx_intrs, int, 1);
4653 module_param(rmac_pause_time, int, 0);
4654 module_param(mc_pause_threshold_q0q3, int, 0);
4655 module_param(mc_pause_threshold_q4q7, int, 0);
4656 module_param(shared_splits, int, 0);
4657 module_param(tmac_util_period, int, 0);
4658 module_param(rmac_util_period, int, 0);
4659 #ifndef CONFIG_S2IO_NAPI
4660 module_param(indicate_max_pkts, int, 0);
4664 * s2io_init_nic - Initialization of the adapter .
4665 * @pdev : structure containing the PCI related information of the device.
4666 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4668 * The function initializes an adapter identified by the pci_dec structure.
4669 * All OS related initialization including memory and device structure and
4670 * initlaization of the device private variable is done. Also the swapper
4671 * control register is initialized to enable read and write into the I/O
4672 * registers of the device.
4674 * returns 0 on success and negative on failure.
4677 static int __devinit
4678 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4681 struct net_device *dev;
4683 int dma_flag = FALSE;
4684 u32 mac_up, mac_down;
4685 u64 val64 = 0, tmp64 = 0;
4686 XENA_dev_config_t __iomem *bar0 = NULL;
4688 mac_info_t *mac_control;
4689 struct config_param *config;
4691 #ifdef CONFIG_S2IO_NAPI
4692 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4695 if ((ret = pci_enable_device(pdev))) {
4697 "s2io_init_nic: pci_enable_device failed\n");
4701 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4702 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4704 if (pci_set_consistent_dma_mask
4705 (pdev, DMA_64BIT_MASK)) {
4707 "Unable to obtain 64bit DMA for \
4708 consistent allocations\n");
4709 pci_disable_device(pdev);
4712 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4713 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4715 pci_disable_device(pdev);
4719 if (pci_request_regions(pdev, s2io_driver_name)) {
4720 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4721 pci_disable_device(pdev);
4725 dev = alloc_etherdev(sizeof(nic_t));
4727 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4728 pci_disable_device(pdev);
4729 pci_release_regions(pdev);
4733 pci_set_master(pdev);
4734 pci_set_drvdata(pdev, dev);
4735 SET_MODULE_OWNER(dev);
4736 SET_NETDEV_DEV(dev, &pdev->dev);
4738 /* Private member variable initialized to s2io NIC structure */
4740 memset(sp, 0, sizeof(nic_t));
4743 sp->high_dma_flag = dma_flag;
4744 sp->device_enabled_once = FALSE;
4746 /* Initialize some PCI/PCI-X fields of the NIC. */
4750 * Setting the device configuration parameters.
4751 * Most of these parameters can be specified by the user during
4752 * module insertion as they are module loadable parameters. If
4753 * these parameters are not not specified during load time, they
4754 * are initialized with default values.
4756 mac_control = &sp->mac_control;
4757 config = &sp->config;
4759 /* Tx side parameters. */
4760 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4761 config->tx_fifo_num = tx_fifo_num;
4762 for (i = 0; i < MAX_TX_FIFOS; i++) {
4763 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4764 config->tx_cfg[i].fifo_priority = i;
4767 /* mapping the QoS priority to the configured fifos */
4768 for (i = 0; i < MAX_TX_FIFOS; i++)
4769 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4771 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4772 for (i = 0; i < config->tx_fifo_num; i++) {
4773 config->tx_cfg[i].f_no_snoop =
4774 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4775 if (config->tx_cfg[i].fifo_len < 65) {
4776 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4780 config->max_txds = MAX_SKB_FRAGS;
4782 /* Rx side parameters. */
4783 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4784 config->rx_ring_num = rx_ring_num;
4785 for (i = 0; i < MAX_RX_RINGS; i++) {
4786 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4787 (MAX_RXDS_PER_BLOCK + 1);
4788 config->rx_cfg[i].ring_priority = i;
4791 for (i = 0; i < rx_ring_num; i++) {
4792 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4793 config->rx_cfg[i].f_no_snoop =
4794 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4797 /* Setting Mac Control parameters */
4798 mac_control->rmac_pause_time = rmac_pause_time;
4799 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4800 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4803 /* Initialize Ring buffer parameters. */
4804 for (i = 0; i < config->rx_ring_num; i++)
4805 atomic_set(&sp->rx_bufs_left[i], 0);
4807 /* initialize the shared memory used by the NIC and the host */
4808 if (init_shared_mem(sp)) {
4809 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4812 goto mem_alloc_failed;
4815 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4816 pci_resource_len(pdev, 0));
4818 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4821 goto bar0_remap_failed;
4824 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4825 pci_resource_len(pdev, 2));
4827 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4830 goto bar1_remap_failed;
4833 dev->irq = pdev->irq;
4834 dev->base_addr = (unsigned long) sp->bar0;
4836 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4837 for (j = 0; j < MAX_TX_FIFOS; j++) {
4838 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4839 (sp->bar1 + (j * 0x00020000));
4842 /* Driver entry points */
4843 dev->open = &s2io_open;
4844 dev->stop = &s2io_close;
4845 dev->hard_start_xmit = &s2io_xmit;
4846 dev->get_stats = &s2io_get_stats;
4847 dev->set_multicast_list = &s2io_set_multicast;
4848 dev->do_ioctl = &s2io_ioctl;
4849 dev->change_mtu = &s2io_change_mtu;
4850 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4853 * will use eth_mac_addr() for dev->set_mac_address
4854 * mac address will be set every time dev->open() is called
4856 #if defined(CONFIG_S2IO_NAPI)
4857 dev->poll = s2io_poll;
4861 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4862 if (sp->high_dma_flag == TRUE)
4863 dev->features |= NETIF_F_HIGHDMA;
4865 dev->features |= NETIF_F_TSO;
4868 dev->tx_timeout = &s2io_tx_watchdog;
4869 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4870 INIT_WORK(&sp->rst_timer_task,
4871 (void (*)(void *)) s2io_restart_nic, dev);
4872 INIT_WORK(&sp->set_link_task,
4873 (void (*)(void *)) s2io_set_link, sp);
4875 pci_save_state(sp->pdev);
4877 /* Setting swapper control on the NIC, for proper reset operation */
4878 if (s2io_set_swapper(sp)) {
4879 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4882 goto set_swap_failed;
4886 * Fix for all "FFs" MAC address problems observed on
4889 fix_mac_address(sp);
4893 * MAC address initialization.
4894 * For now only one mac address will be read and used.
4897 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4898 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4899 writeq(val64, &bar0->rmac_addr_cmd_mem);
4900 wait_for_cmd_complete(sp);
4902 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4903 mac_down = (u32) tmp64;
4904 mac_up = (u32) (tmp64 >> 32);
4906 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4908 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4909 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4910 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4911 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4912 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4913 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4916 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4917 sp->def_mac_addr[0].mac_addr[0],
4918 sp->def_mac_addr[0].mac_addr[1],
4919 sp->def_mac_addr[0].mac_addr[2],
4920 sp->def_mac_addr[0].mac_addr[3],
4921 sp->def_mac_addr[0].mac_addr[4],
4922 sp->def_mac_addr[0].mac_addr[5]);
4924 /* Set the factory defined MAC address initially */
4925 dev->addr_len = ETH_ALEN;
4926 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4929 * Initialize the tasklet status and link state flags
4930 * and the card statte parameter
4932 atomic_set(&(sp->card_state), 0);
4933 sp->tasklet_status = 0;
4936 /* Initialize spinlocks */
4937 spin_lock_init(&sp->tx_lock);
4938 #ifndef CONFIG_S2IO_NAPI
4939 spin_lock_init(&sp->put_lock);
4943 * SXE-002: Configure link and activity LED to init state
4946 subid = sp->pdev->subsystem_device;
4947 if ((subid & 0xFF) >= 0x07) {
4948 val64 = readq(&bar0->gpio_control);
4949 val64 |= 0x0000800000000000ULL;
4950 writeq(val64, &bar0->gpio_control);
4951 val64 = 0x0411040400000000ULL;
4952 writeq(val64, (void __iomem *) bar0 + 0x2700);
4953 val64 = readq(&bar0->gpio_control);
4956 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4958 if (register_netdev(dev)) {
4959 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4961 goto register_failed;
4965 * Make Link state as off at this point, when the Link change
4966 * interrupt comes the state will be automatically changed to
4969 netif_carrier_off(dev);
4970 sp->last_link_state = LINK_DOWN;
4981 free_shared_mem(sp);
4982 pci_disable_device(pdev);
4983 pci_release_regions(pdev);
4984 pci_set_drvdata(pdev, NULL);
4991 * s2io_rem_nic - Free the PCI device
4992 * @pdev: structure containing the PCI related information of the device.
4993 * Description: This function is called by the Pci subsystem to release a
4994 * PCI device and free up all resource held up by the device. This could
4995 * be in response to a Hot plug event or when the driver is to be removed
4999 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5001 struct net_device *dev =
5002 (struct net_device *) pci_get_drvdata(pdev);
5006 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5011 unregister_netdev(dev);
5013 free_shared_mem(sp);
5016 pci_disable_device(pdev);
5017 pci_release_regions(pdev);
5018 pci_set_drvdata(pdev, NULL);
5023 * s2io_starter - Entry point for the driver
5024 * Description: This function is the entry point for the driver. It verifies
5025 * the module loadable parameters and initializes PCI configuration space.
5028 int __init s2io_starter(void)
5030 return pci_module_init(&s2io_driver);
5034 * s2io_closer - Cleanup routine for the driver
5035 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5038 void s2io_closer(void)
5040 pci_unregister_driver(&s2io_driver);
5041 DBG_PRINT(INIT_DBG, "cleanup done\n");
5044 module_init(s2io_starter);
5045 module_exit(s2io_closer);