]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/s2io.c
[PATCH] S2io: Support for Xframe II NIC
[mv-sheeva.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
59
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
62 #include <asm/io.h>
63
64 /* local include */
65 #include "s2io.h"
66 #include "s2io-regs.h"
67
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 1.7.7";
71
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
73 {
74         int ret;
75
76         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
78
79         return ret;
80 }
81
82 /*
83  * Cards with following subsystem_id have a link state indication
84  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85  * macro below identifies these cards given the subsystem_id.
86  */
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88         (dev_type == XFRAME_I_DEVICE) ?                 \
89                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
91
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 #define PANIC   1
96 #define LOW     2
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 {
99         int level = 0;
100         mac_info_t *mac_control;
101
102         mac_control = &sp->mac_control;
103         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
104                 level = LOW;
105                 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
106                         level = PANIC;
107                 }
108         }
109
110         return level;
111 }
112
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115         "Register test\t(offline)",
116         "Eeprom test\t(offline)",
117         "Link test\t(online)",
118         "RLDRAM test\t(offline)",
119         "BIST Test\t(offline)"
120 };
121
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
123         {"tmac_frms"},
124         {"tmac_data_octets"},
125         {"tmac_drop_frms"},
126         {"tmac_mcst_frms"},
127         {"tmac_bcst_frms"},
128         {"tmac_pause_ctrl_frms"},
129         {"tmac_any_err_frms"},
130         {"tmac_vld_ip_octets"},
131         {"tmac_vld_ip"},
132         {"tmac_drop_ip"},
133         {"tmac_icmp"},
134         {"tmac_rst_tcp"},
135         {"tmac_tcp"},
136         {"tmac_udp"},
137         {"rmac_vld_frms"},
138         {"rmac_data_octets"},
139         {"rmac_fcs_err_frms"},
140         {"rmac_drop_frms"},
141         {"rmac_vld_mcst_frms"},
142         {"rmac_vld_bcst_frms"},
143         {"rmac_in_rng_len_err_frms"},
144         {"rmac_long_frms"},
145         {"rmac_pause_ctrl_frms"},
146         {"rmac_discarded_frms"},
147         {"rmac_usized_frms"},
148         {"rmac_osized_frms"},
149         {"rmac_frag_frms"},
150         {"rmac_jabber_frms"},
151         {"rmac_ip"},
152         {"rmac_ip_octets"},
153         {"rmac_hdr_err_ip"},
154         {"rmac_drop_ip"},
155         {"rmac_icmp"},
156         {"rmac_tcp"},
157         {"rmac_udp"},
158         {"rmac_err_drp_udp"},
159         {"rmac_pause_cnt"},
160         {"rmac_accepted_ip"},
161         {"rmac_err_tcp"},
162         {"\n DRIVER STATISTICS"},
163         {"single_bit_ecc_errs"},
164         {"double_bit_ecc_errs"},
165 };
166
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
169
170 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
172
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
174                         init_timer(&timer);                     \
175                         timer.function = handle;                \
176                         timer.data = (unsigned long) arg;       \
177                         mod_timer(&timer, (jiffies + exp))      \
178
179 /* Add the vlan */
180 static void s2io_vlan_rx_register(struct net_device *dev,
181                                         struct vlan_group *grp)
182 {
183         nic_t *nic = dev->priv;
184         unsigned long flags;
185
186         spin_lock_irqsave(&nic->tx_lock, flags);
187         nic->vlgrp = grp;
188         spin_unlock_irqrestore(&nic->tx_lock, flags);
189 }
190
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
193 {
194         nic_t *nic = dev->priv;
195         unsigned long flags;
196
197         spin_lock_irqsave(&nic->tx_lock, flags);
198         if (nic->vlgrp)
199                 nic->vlgrp->vlan_devices[vid] = NULL;
200         spin_unlock_irqrestore(&nic->tx_lock, flags);
201 }
202
203 /*
204  * Constants to be programmed into the Xena's registers, to configure
205  * the XAUI.
206  */
207
208 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
209 #define END_SIGN        0x0
210
211 static u64 herc_act_dtx_cfg[] = {
212         /* Set address */
213         0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
214         /* Write data */
215         0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
216         /* Set address */
217         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
218         /* Write data */
219         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
220         /* Set address */
221         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
222         /* Write data */
223         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
224         /* Done */
225         END_SIGN
226 };
227
228 static u64 xena_mdio_cfg[] = {
229         /* Reset PMA PLL */
230         0xC001010000000000ULL, 0xC0010100000000E0ULL,
231         0xC0010100008000E4ULL,
232         /* Remove Reset from PMA PLL */
233         0xC001010000000000ULL, 0xC0010100000000E0ULL,
234         0xC0010100000000E4ULL,
235         END_SIGN
236 };
237
238 static u64 xena_dtx_cfg[] = {
239         0x8000051500000000ULL, 0x80000515000000E0ULL,
240         0x80000515D93500E4ULL, 0x8001051500000000ULL,
241         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
242         0x8002051500000000ULL, 0x80020515000000E0ULL,
243         0x80020515F21000E4ULL,
244         /* Set PADLOOPBACKN */
245         0x8002051500000000ULL, 0x80020515000000E0ULL,
246         0x80020515B20000E4ULL, 0x8003051500000000ULL,
247         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
248         0x8004051500000000ULL, 0x80040515000000E0ULL,
249         0x80040515B20000E4ULL, 0x8005051500000000ULL,
250         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
251         SWITCH_SIGN,
252         /* Remove PADLOOPBACKN */
253         0x8002051500000000ULL, 0x80020515000000E0ULL,
254         0x80020515F20000E4ULL, 0x8003051500000000ULL,
255         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
256         0x8004051500000000ULL, 0x80040515000000E0ULL,
257         0x80040515F20000E4ULL, 0x8005051500000000ULL,
258         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
259         END_SIGN
260 };
261
262 /*
263  * Constants for Fixing the MacAddress problem seen mostly on
264  * Alpha machines.
265  */
266 static u64 fix_mac[] = {
267         0x0060000000000000ULL, 0x0060600000000000ULL,
268         0x0040600000000000ULL, 0x0000600000000000ULL,
269         0x0020600000000000ULL, 0x0060600000000000ULL,
270         0x0020600000000000ULL, 0x0060600000000000ULL,
271         0x0020600000000000ULL, 0x0060600000000000ULL,
272         0x0020600000000000ULL, 0x0060600000000000ULL,
273         0x0020600000000000ULL, 0x0060600000000000ULL,
274         0x0020600000000000ULL, 0x0060600000000000ULL,
275         0x0020600000000000ULL, 0x0060600000000000ULL,
276         0x0020600000000000ULL, 0x0060600000000000ULL,
277         0x0020600000000000ULL, 0x0060600000000000ULL,
278         0x0020600000000000ULL, 0x0060600000000000ULL,
279         0x0020600000000000ULL, 0x0000600000000000ULL,
280         0x0040600000000000ULL, 0x0060600000000000ULL,
281         END_SIGN
282 };
283
284 /* Module Loadable parameters. */
285 static unsigned int tx_fifo_num = 1;
286 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
287     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
288 static unsigned int rx_ring_num = 1;
289 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
290     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
291 static unsigned int rts_frm_len[MAX_RX_RINGS] =
292     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
293 static unsigned int use_continuous_tx_intrs = 1;
294 static unsigned int rmac_pause_time = 65535;
295 static unsigned int mc_pause_threshold_q0q3 = 187;
296 static unsigned int mc_pause_threshold_q4q7 = 187;
297 static unsigned int shared_splits;
298 static unsigned int tmac_util_period = 5;
299 static unsigned int rmac_util_period = 5;
300 #ifndef CONFIG_S2IO_NAPI
301 static unsigned int indicate_max_pkts;
302 #endif
303
304 /*
305  * S2IO device table.
306  * This table lists all the devices that this driver supports.
307  */
308 static struct pci_device_id s2io_tbl[] __devinitdata = {
309         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
310          PCI_ANY_ID, PCI_ANY_ID},
311         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
312          PCI_ANY_ID, PCI_ANY_ID},
313         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
314          PCI_ANY_ID, PCI_ANY_ID},
315         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
316          PCI_ANY_ID, PCI_ANY_ID},
317         {0,}
318 };
319
320 MODULE_DEVICE_TABLE(pci, s2io_tbl);
321
322 static struct pci_driver s2io_driver = {
323       .name = "S2IO",
324       .id_table = s2io_tbl,
325       .probe = s2io_init_nic,
326       .remove = __devexit_p(s2io_rem_nic),
327 };
328
329 /* A simplifier macro used both by init and free shared_mem Fns(). */
330 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
331
332 /**
333  * init_shared_mem - Allocation and Initialization of Memory
334  * @nic: Device private variable.
335  * Description: The function allocates all the memory areas shared
336  * between the NIC and the driver. This includes Tx descriptors,
337  * Rx descriptors and the statistics block.
338  */
339
340 static int init_shared_mem(struct s2io_nic *nic)
341 {
342         u32 size;
343         void *tmp_v_addr, *tmp_v_addr_next;
344         dma_addr_t tmp_p_addr, tmp_p_addr_next;
345         RxD_block_t *pre_rxd_blk = NULL;
346         int i, j, blk_cnt, rx_sz, tx_sz;
347         int lst_size, lst_per_page;
348         struct net_device *dev = nic->dev;
349 #ifdef CONFIG_2BUFF_MODE
350         u64 tmp;
351         buffAdd_t *ba;
352 #endif
353
354         mac_info_t *mac_control;
355         struct config_param *config;
356
357         mac_control = &nic->mac_control;
358         config = &nic->config;
359
360
361         /* Allocation and initialization of TXDLs in FIOFs */
362         size = 0;
363         for (i = 0; i < config->tx_fifo_num; i++) {
364                 size += config->tx_cfg[i].fifo_len;
365         }
366         if (size > MAX_AVAILABLE_TXDS) {
367                 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
368                           dev->name);
369                 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
370                 DBG_PRINT(ERR_DBG, "that can be used\n");
371                 return FAILURE;
372         }
373
374         lst_size = (sizeof(TxD_t) * config->max_txds);
375         tx_sz = lst_size * size;
376         lst_per_page = PAGE_SIZE / lst_size;
377
378         for (i = 0; i < config->tx_fifo_num; i++) {
379                 int fifo_len = config->tx_cfg[i].fifo_len;
380                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
381                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
382                                                           GFP_KERNEL);
383                 if (!mac_control->fifos[i].list_info) {
384                         DBG_PRINT(ERR_DBG,
385                                   "Malloc failed for list_info\n");
386                         return -ENOMEM;
387                 }
388                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
389         }
390         for (i = 0; i < config->tx_fifo_num; i++) {
391                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
392                                                 lst_per_page);
393                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
394                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
395                     config->tx_cfg[i].fifo_len - 1;
396                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
397                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
398                     config->tx_cfg[i].fifo_len - 1;
399                 mac_control->fifos[i].fifo_no = i;
400                 mac_control->fifos[i].nic = nic;
401                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
402
403                 for (j = 0; j < page_num; j++) {
404                         int k = 0;
405                         dma_addr_t tmp_p;
406                         void *tmp_v;
407                         tmp_v = pci_alloc_consistent(nic->pdev,
408                                                      PAGE_SIZE, &tmp_p);
409                         if (!tmp_v) {
410                                 DBG_PRINT(ERR_DBG,
411                                           "pci_alloc_consistent ");
412                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
413                                 return -ENOMEM;
414                         }
415                         while (k < lst_per_page) {
416                                 int l = (j * lst_per_page) + k;
417                                 if (l == config->tx_cfg[i].fifo_len)
418                                         break;
419                                 mac_control->fifos[i].list_info[l].list_virt_addr =
420                                     tmp_v + (k * lst_size);
421                                 mac_control->fifos[i].list_info[l].list_phy_addr =
422                                     tmp_p + (k * lst_size);
423                                 k++;
424                         }
425                 }
426         }
427
428         /* Allocation and initialization of RXDs in Rings */
429         size = 0;
430         for (i = 0; i < config->rx_ring_num; i++) {
431                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
432                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
433                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
434                                   i);
435                         DBG_PRINT(ERR_DBG, "RxDs per Block");
436                         return FAILURE;
437                 }
438                 size += config->rx_cfg[i].num_rxd;
439                 mac_control->rings[i].block_count =
440                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
441                 mac_control->rings[i].pkt_cnt =
442                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
443         }
444         size = (size * (sizeof(RxD_t)));
445         rx_sz = size;
446
447         for (i = 0; i < config->rx_ring_num; i++) {
448                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
449                 mac_control->rings[i].rx_curr_get_info.offset = 0;
450                 mac_control->rings[i].rx_curr_get_info.ring_len =
451                     config->rx_cfg[i].num_rxd - 1;
452                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
453                 mac_control->rings[i].rx_curr_put_info.offset = 0;
454                 mac_control->rings[i].rx_curr_put_info.ring_len =
455                     config->rx_cfg[i].num_rxd - 1;
456                 mac_control->rings[i].nic = nic;
457                 mac_control->rings[i].ring_no = i;
458
459                 blk_cnt =
460                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
461                 /*  Allocating all the Rx blocks */
462                 for (j = 0; j < blk_cnt; j++) {
463 #ifndef CONFIG_2BUFF_MODE
464                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
465 #else
466                         size = SIZE_OF_BLOCK;
467 #endif
468                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
469                                                           &tmp_p_addr);
470                         if (tmp_v_addr == NULL) {
471                                 /*
472                                  * In case of failure, free_shared_mem()
473                                  * is called, which should free any
474                                  * memory that was alloced till the
475                                  * failure happened.
476                                  */
477                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
478                                     tmp_v_addr;
479                                 return -ENOMEM;
480                         }
481                         memset(tmp_v_addr, 0, size);
482                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
483                                 tmp_v_addr;
484                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
485                                 tmp_p_addr;
486                 }
487                 /* Interlinking all Rx Blocks */
488                 for (j = 0; j < blk_cnt; j++) {
489                         tmp_v_addr =
490                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
491                         tmp_v_addr_next =
492                                 mac_control->rings[i].rx_blocks[(j + 1) %
493                                               blk_cnt].block_virt_addr;
494                         tmp_p_addr =
495                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
496                         tmp_p_addr_next =
497                                 mac_control->rings[i].rx_blocks[(j + 1) %
498                                               blk_cnt].block_dma_addr;
499
500                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
501                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
502                                                                  * marker.
503                                                                  */
504 #ifndef CONFIG_2BUFF_MODE
505                         pre_rxd_blk->reserved_2_pNext_RxD_block =
506                             (unsigned long) tmp_v_addr_next;
507 #endif
508                         pre_rxd_blk->pNext_RxD_Blk_physical =
509                             (u64) tmp_p_addr_next;
510                 }
511         }
512
513 #ifdef CONFIG_2BUFF_MODE
514         /*
515          * Allocation of Storages for buffer addresses in 2BUFF mode
516          * and the buffers as well.
517          */
518         for (i = 0; i < config->rx_ring_num; i++) {
519                 blk_cnt =
520                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
521                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
522                                      GFP_KERNEL);
523                 if (!mac_control->rings[i].ba)
524                         return -ENOMEM;
525                 for (j = 0; j < blk_cnt; j++) {
526                         int k = 0;
527                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
528                                                  (MAX_RXDS_PER_BLOCK + 1)),
529                                                 GFP_KERNEL);
530                         if (!mac_control->rings[i].ba[j])
531                                 return -ENOMEM;
532                         while (k != MAX_RXDS_PER_BLOCK) {
533                                 ba = &mac_control->rings[i].ba[j][k];
534
535                                 ba->ba_0_org = (void *) kmalloc
536                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
537                                 if (!ba->ba_0_org)
538                                         return -ENOMEM;
539                                 tmp = (u64) ba->ba_0_org;
540                                 tmp += ALIGN_SIZE;
541                                 tmp &= ~((u64) ALIGN_SIZE);
542                                 ba->ba_0 = (void *) tmp;
543
544                                 ba->ba_1_org = (void *) kmalloc
545                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
546                                 if (!ba->ba_1_org)
547                                         return -ENOMEM;
548                                 tmp = (u64) ba->ba_1_org;
549                                 tmp += ALIGN_SIZE;
550                                 tmp &= ~((u64) ALIGN_SIZE);
551                                 ba->ba_1 = (void *) tmp;
552                                 k++;
553                         }
554                 }
555         }
556 #endif
557
558         /* Allocation and initialization of Statistics block */
559         size = sizeof(StatInfo_t);
560         mac_control->stats_mem = pci_alloc_consistent
561             (nic->pdev, size, &mac_control->stats_mem_phy);
562
563         if (!mac_control->stats_mem) {
564                 /*
565                  * In case of failure, free_shared_mem() is called, which
566                  * should free any memory that was alloced till the
567                  * failure happened.
568                  */
569                 return -ENOMEM;
570         }
571         mac_control->stats_mem_sz = size;
572
573         tmp_v_addr = mac_control->stats_mem;
574         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
575         memset(tmp_v_addr, 0, size);
576         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
577                   (unsigned long long) tmp_p_addr);
578
579         return SUCCESS;
580 }
581
582 /**
583  * free_shared_mem - Free the allocated Memory
584  * @nic:  Device private variable.
585  * Description: This function is to free all memory locations allocated by
586  * the init_shared_mem() function and return it to the kernel.
587  */
588
589 static void free_shared_mem(struct s2io_nic *nic)
590 {
591         int i, j, blk_cnt, size;
592         void *tmp_v_addr;
593         dma_addr_t tmp_p_addr;
594         mac_info_t *mac_control;
595         struct config_param *config;
596         int lst_size, lst_per_page;
597
598
599         if (!nic)
600                 return;
601
602         mac_control = &nic->mac_control;
603         config = &nic->config;
604
605         lst_size = (sizeof(TxD_t) * config->max_txds);
606         lst_per_page = PAGE_SIZE / lst_size;
607
608         for (i = 0; i < config->tx_fifo_num; i++) {
609                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
610                                                 lst_per_page);
611                 for (j = 0; j < page_num; j++) {
612                         int mem_blks = (j * lst_per_page);
613                         if (!mac_control->fifos[i].list_info[mem_blks].
614                             list_virt_addr)
615                                 break;
616                         pci_free_consistent(nic->pdev, PAGE_SIZE,
617                                             mac_control->fifos[i].
618                                             list_info[mem_blks].
619                                             list_virt_addr,
620                                             mac_control->fifos[i].
621                                             list_info[mem_blks].
622                                             list_phy_addr);
623                 }
624                 kfree(mac_control->fifos[i].list_info);
625         }
626
627 #ifndef CONFIG_2BUFF_MODE
628         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
629 #else
630         size = SIZE_OF_BLOCK;
631 #endif
632         for (i = 0; i < config->rx_ring_num; i++) {
633                 blk_cnt = mac_control->rings[i].block_count;
634                 for (j = 0; j < blk_cnt; j++) {
635                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
636                                 block_virt_addr;
637                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
638                                 block_dma_addr;
639                         if (tmp_v_addr == NULL)
640                                 break;
641                         pci_free_consistent(nic->pdev, size,
642                                             tmp_v_addr, tmp_p_addr);
643                 }
644         }
645
646 #ifdef CONFIG_2BUFF_MODE
647         /* Freeing buffer storage addresses in 2BUFF mode. */
648         for (i = 0; i < config->rx_ring_num; i++) {
649                 blk_cnt =
650                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
651                 for (j = 0; j < blk_cnt; j++) {
652                         int k = 0;
653                         if (!mac_control->rings[i].ba[j])
654                                 continue;
655                         while (k != MAX_RXDS_PER_BLOCK) {
656                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
657                                 kfree(ba->ba_0_org);
658                                 kfree(ba->ba_1_org);
659                                 k++;
660                         }
661                         kfree(mac_control->rings[i].ba[j]);
662                 }
663                 if (mac_control->rings[i].ba)
664                         kfree(mac_control->rings[i].ba);
665         }
666 #endif
667
668         if (mac_control->stats_mem) {
669                 pci_free_consistent(nic->pdev,
670                                     mac_control->stats_mem_sz,
671                                     mac_control->stats_mem,
672                                     mac_control->stats_mem_phy);
673         }
674 }
675
676 /**
677  * s2io_verify_pci_mode -
678  */
679
680 static int s2io_verify_pci_mode(nic_t *nic)
681 {
682         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
683         register u64 val64 = 0;
684         int     mode;
685
686         val64 = readq(&bar0->pci_mode);
687         mode = (u8)GET_PCI_MODE(val64);
688
689         if ( val64 & PCI_MODE_UNKNOWN_MODE)
690                 return -1;      /* Unknown PCI mode */
691         return mode;
692 }
693
694
695 /**
696  * s2io_print_pci_mode -
697  */
698 static int s2io_print_pci_mode(nic_t *nic)
699 {
700         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
701         register u64 val64 = 0;
702         int     mode;
703         struct config_param *config = &nic->config;
704
705         val64 = readq(&bar0->pci_mode);
706         mode = (u8)GET_PCI_MODE(val64);
707
708         if ( val64 & PCI_MODE_UNKNOWN_MODE)
709                 return -1;      /* Unknown PCI mode */
710
711         if (val64 & PCI_MODE_32_BITS) {
712                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
713         } else {
714                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
715         }
716
717         switch(mode) {
718                 case PCI_MODE_PCI_33:
719                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
720                         config->bus_speed = 33;
721                         break;
722                 case PCI_MODE_PCI_66:
723                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
724                         config->bus_speed = 133;
725                         break;
726                 case PCI_MODE_PCIX_M1_66:
727                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
728                         config->bus_speed = 133; /* Herc doubles the clock rate */
729                         break;
730                 case PCI_MODE_PCIX_M1_100:
731                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
732                         config->bus_speed = 200;
733                         break;
734                 case PCI_MODE_PCIX_M1_133:
735                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
736                         config->bus_speed = 266;
737                         break;
738                 case PCI_MODE_PCIX_M2_66:
739                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
740                         config->bus_speed = 133;
741                         break;
742                 case PCI_MODE_PCIX_M2_100:
743                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
744                         config->bus_speed = 200;
745                         break;
746                 case PCI_MODE_PCIX_M2_133:
747                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
748                         config->bus_speed = 266;
749                         break;
750                 default:
751                         return -1;      /* Unsupported bus speed */
752         }
753
754         return mode;
755 }
756
757 /**
758  *  init_nic - Initialization of hardware
759  *  @nic: device peivate variable
760  *  Description: The function sequentially configures every block
761  *  of the H/W from their reset values.
762  *  Return Value:  SUCCESS on success and
763  *  '-1' on failure (endian settings incorrect).
764  */
765
766 static int init_nic(struct s2io_nic *nic)
767 {
768         XENA_dev_config_t __iomem *bar0 = nic->bar0;
769         struct net_device *dev = nic->dev;
770         register u64 val64 = 0;
771         void __iomem *add;
772         u32 time;
773         int i, j;
774         mac_info_t *mac_control;
775         struct config_param *config;
776         int mdio_cnt = 0, dtx_cnt = 0;
777         unsigned long long mem_share;
778         int mem_size;
779
780         mac_control = &nic->mac_control;
781         config = &nic->config;
782
783         /* to set the swapper controle on the card */
784         if(s2io_set_swapper(nic)) {
785                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
786                 return -1;
787         }
788
789         /*
790          * Herc requires EOI to be removed from reset before XGXS, so..
791          */
792         if (nic->device_type & XFRAME_II_DEVICE) {
793                 val64 = 0xA500000000ULL;
794                 writeq(val64, &bar0->sw_reset);
795                 msleep(500);
796                 val64 = readq(&bar0->sw_reset);
797         }
798
799         /* Remove XGXS from reset state */
800         val64 = 0;
801         writeq(val64, &bar0->sw_reset);
802         msleep(500);
803         val64 = readq(&bar0->sw_reset);
804
805         /*  Enable Receiving broadcasts */
806         add = &bar0->mac_cfg;
807         val64 = readq(&bar0->mac_cfg);
808         val64 |= MAC_RMAC_BCAST_ENABLE;
809         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
810         writel((u32) val64, add);
811         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
812         writel((u32) (val64 >> 32), (add + 4));
813
814         /* Read registers in all blocks */
815         val64 = readq(&bar0->mac_int_mask);
816         val64 = readq(&bar0->mc_int_mask);
817         val64 = readq(&bar0->xgxs_int_mask);
818
819         /*  Set MTU */
820         val64 = dev->mtu;
821         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
822
823         /*
824          * Configuring the XAUI Interface of Xena.
825          * ***************************************
826          * To Configure the Xena's XAUI, one has to write a series
827          * of 64 bit values into two registers in a particular
828          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
829          * which will be defined in the array of configuration values
830          * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
831          * to switch writing from one regsiter to another. We continue
832          * writing these values until we encounter the 'END_SIGN' macro.
833          * For example, After making a series of 21 writes into
834          * dtx_control register the 'SWITCH_SIGN' appears and hence we
835          * start writing into mdio_control until we encounter END_SIGN.
836          */
837         if (nic->device_type & XFRAME_II_DEVICE) {
838                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
839                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
840                                           &bar0->dtx_control, UF);
841                         if (dtx_cnt & 0x1)
842                                 msleep(1); /* Necessary!! */
843                         dtx_cnt++;
844                 }
845         } else {
846                 while (1) {
847                       dtx_cfg:
848                         while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
849                                 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
850                                         dtx_cnt++;
851                                         goto mdio_cfg;
852                                 }
853                                 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
854                                                   &bar0->dtx_control, UF);
855                                 val64 = readq(&bar0->dtx_control);
856                                 dtx_cnt++;
857                         }
858                       mdio_cfg:
859                         while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
860                                 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
861                                         mdio_cnt++;
862                                         goto dtx_cfg;
863                                 }
864                                 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
865                                                   &bar0->mdio_control, UF);
866                                 val64 = readq(&bar0->mdio_control);
867                                 mdio_cnt++;
868                         }
869                         if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
870                             (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
871                                 break;
872                         } else {
873                                 goto dtx_cfg;
874                         }
875                 }
876         }
877
878         /*  Tx DMA Initialization */
879         val64 = 0;
880         writeq(val64, &bar0->tx_fifo_partition_0);
881         writeq(val64, &bar0->tx_fifo_partition_1);
882         writeq(val64, &bar0->tx_fifo_partition_2);
883         writeq(val64, &bar0->tx_fifo_partition_3);
884
885
886         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
887                 val64 |=
888                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
889                          13) | vBIT(config->tx_cfg[i].fifo_priority,
890                                     ((i * 32) + 5), 3);
891
892                 if (i == (config->tx_fifo_num - 1)) {
893                         if (i % 2 == 0)
894                                 i++;
895                 }
896
897                 switch (i) {
898                 case 1:
899                         writeq(val64, &bar0->tx_fifo_partition_0);
900                         val64 = 0;
901                         break;
902                 case 3:
903                         writeq(val64, &bar0->tx_fifo_partition_1);
904                         val64 = 0;
905                         break;
906                 case 5:
907                         writeq(val64, &bar0->tx_fifo_partition_2);
908                         val64 = 0;
909                         break;
910                 case 7:
911                         writeq(val64, &bar0->tx_fifo_partition_3);
912                         break;
913                 }
914         }
915
916         /* Enable Tx FIFO partition 0. */
917         val64 = readq(&bar0->tx_fifo_partition_0);
918         val64 |= BIT(0);        /* To enable the FIFO partition. */
919         writeq(val64, &bar0->tx_fifo_partition_0);
920
921         /*
922          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
923          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
924          */
925         if ((nic->device_type == XFRAME_I_DEVICE) &&
926                 (get_xena_rev_id(nic->pdev) < 4))
927                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
928
929         val64 = readq(&bar0->tx_fifo_partition_0);
930         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
931                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
932
933         /*
934          * Initialization of Tx_PA_CONFIG register to ignore packet
935          * integrity checking.
936          */
937         val64 = readq(&bar0->tx_pa_cfg);
938         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
939             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
940         writeq(val64, &bar0->tx_pa_cfg);
941
942         /* Rx DMA intialization. */
943         val64 = 0;
944         for (i = 0; i < config->rx_ring_num; i++) {
945                 val64 |=
946                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
947                          3);
948         }
949         writeq(val64, &bar0->rx_queue_priority);
950
951         /*
952          * Allocating equal share of memory to all the
953          * configured Rings.
954          */
955         val64 = 0;
956         if (nic->device_type & XFRAME_II_DEVICE)
957                 mem_size = 32;
958         else
959                 mem_size = 64;
960
961         for (i = 0; i < config->rx_ring_num; i++) {
962                 switch (i) {
963                 case 0:
964                         mem_share = (mem_size / config->rx_ring_num +
965                                      mem_size % config->rx_ring_num);
966                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
967                         continue;
968                 case 1:
969                         mem_share = (mem_size / config->rx_ring_num);
970                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
971                         continue;
972                 case 2:
973                         mem_share = (mem_size / config->rx_ring_num);
974                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
975                         continue;
976                 case 3:
977                         mem_share = (mem_size / config->rx_ring_num);
978                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
979                         continue;
980                 case 4:
981                         mem_share = (mem_size / config->rx_ring_num);
982                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
983                         continue;
984                 case 5:
985                         mem_share = (mem_size / config->rx_ring_num);
986                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
987                         continue;
988                 case 6:
989                         mem_share = (mem_size / config->rx_ring_num);
990                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
991                         continue;
992                 case 7:
993                         mem_share = (mem_size / config->rx_ring_num);
994                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
995                         continue;
996                 }
997         }
998         writeq(val64, &bar0->rx_queue_cfg);
999
1000         /*
1001          * Filling Tx round robin registers
1002          * as per the number of FIFOs
1003          */
1004         switch (config->tx_fifo_num) {
1005         case 1:
1006                 val64 = 0x0000000000000000ULL;
1007                 writeq(val64, &bar0->tx_w_round_robin_0);
1008                 writeq(val64, &bar0->tx_w_round_robin_1);
1009                 writeq(val64, &bar0->tx_w_round_robin_2);
1010                 writeq(val64, &bar0->tx_w_round_robin_3);
1011                 writeq(val64, &bar0->tx_w_round_robin_4);
1012                 break;
1013         case 2:
1014                 val64 = 0x0000010000010000ULL;
1015                 writeq(val64, &bar0->tx_w_round_robin_0);
1016                 val64 = 0x0100000100000100ULL;
1017                 writeq(val64, &bar0->tx_w_round_robin_1);
1018                 val64 = 0x0001000001000001ULL;
1019                 writeq(val64, &bar0->tx_w_round_robin_2);
1020                 val64 = 0x0000010000010000ULL;
1021                 writeq(val64, &bar0->tx_w_round_robin_3);
1022                 val64 = 0x0100000000000000ULL;
1023                 writeq(val64, &bar0->tx_w_round_robin_4);
1024                 break;
1025         case 3:
1026                 val64 = 0x0001000102000001ULL;
1027                 writeq(val64, &bar0->tx_w_round_robin_0);
1028                 val64 = 0x0001020000010001ULL;
1029                 writeq(val64, &bar0->tx_w_round_robin_1);
1030                 val64 = 0x0200000100010200ULL;
1031                 writeq(val64, &bar0->tx_w_round_robin_2);
1032                 val64 = 0x0001000102000001ULL;
1033                 writeq(val64, &bar0->tx_w_round_robin_3);
1034                 val64 = 0x0001020000000000ULL;
1035                 writeq(val64, &bar0->tx_w_round_robin_4);
1036                 break;
1037         case 4:
1038                 val64 = 0x0001020300010200ULL;
1039                 writeq(val64, &bar0->tx_w_round_robin_0);
1040                 val64 = 0x0100000102030001ULL;
1041                 writeq(val64, &bar0->tx_w_round_robin_1);
1042                 val64 = 0x0200010000010203ULL;
1043                 writeq(val64, &bar0->tx_w_round_robin_2);
1044                 val64 = 0x0001020001000001ULL;
1045                 writeq(val64, &bar0->tx_w_round_robin_3);
1046                 val64 = 0x0203000100000000ULL;
1047                 writeq(val64, &bar0->tx_w_round_robin_4);
1048                 break;
1049         case 5:
1050                 val64 = 0x0001000203000102ULL;
1051                 writeq(val64, &bar0->tx_w_round_robin_0);
1052                 val64 = 0x0001020001030004ULL;
1053                 writeq(val64, &bar0->tx_w_round_robin_1);
1054                 val64 = 0x0001000203000102ULL;
1055                 writeq(val64, &bar0->tx_w_round_robin_2);
1056                 val64 = 0x0001020001030004ULL;
1057                 writeq(val64, &bar0->tx_w_round_robin_3);
1058                 val64 = 0x0001000000000000ULL;
1059                 writeq(val64, &bar0->tx_w_round_robin_4);
1060                 break;
1061         case 6:
1062                 val64 = 0x0001020304000102ULL;
1063                 writeq(val64, &bar0->tx_w_round_robin_0);
1064                 val64 = 0x0304050001020001ULL;
1065                 writeq(val64, &bar0->tx_w_round_robin_1);
1066                 val64 = 0x0203000100000102ULL;
1067                 writeq(val64, &bar0->tx_w_round_robin_2);
1068                 val64 = 0x0304000102030405ULL;
1069                 writeq(val64, &bar0->tx_w_round_robin_3);
1070                 val64 = 0x0001000200000000ULL;
1071                 writeq(val64, &bar0->tx_w_round_robin_4);
1072                 break;
1073         case 7:
1074                 val64 = 0x0001020001020300ULL;
1075                 writeq(val64, &bar0->tx_w_round_robin_0);
1076                 val64 = 0x0102030400010203ULL;
1077                 writeq(val64, &bar0->tx_w_round_robin_1);
1078                 val64 = 0x0405060001020001ULL;
1079                 writeq(val64, &bar0->tx_w_round_robin_2);
1080                 val64 = 0x0304050000010200ULL;
1081                 writeq(val64, &bar0->tx_w_round_robin_3);
1082                 val64 = 0x0102030000000000ULL;
1083                 writeq(val64, &bar0->tx_w_round_robin_4);
1084                 break;
1085         case 8:
1086                 val64 = 0x0001020300040105ULL;
1087                 writeq(val64, &bar0->tx_w_round_robin_0);
1088                 val64 = 0x0200030106000204ULL;
1089                 writeq(val64, &bar0->tx_w_round_robin_1);
1090                 val64 = 0x0103000502010007ULL;
1091                 writeq(val64, &bar0->tx_w_round_robin_2);
1092                 val64 = 0x0304010002060500ULL;
1093                 writeq(val64, &bar0->tx_w_round_robin_3);
1094                 val64 = 0x0103020400000000ULL;
1095                 writeq(val64, &bar0->tx_w_round_robin_4);
1096                 break;
1097         }
1098
1099         /* Filling the Rx round robin registers as per the
1100          * number of Rings and steering based on QoS.
1101          */
1102         switch (config->rx_ring_num) {
1103         case 1:
1104                 val64 = 0x8080808080808080ULL;
1105                 writeq(val64, &bar0->rts_qos_steering);
1106                 break;
1107         case 2:
1108                 val64 = 0x0000010000010000ULL;
1109                 writeq(val64, &bar0->rx_w_round_robin_0);
1110                 val64 = 0x0100000100000100ULL;
1111                 writeq(val64, &bar0->rx_w_round_robin_1);
1112                 val64 = 0x0001000001000001ULL;
1113                 writeq(val64, &bar0->rx_w_round_robin_2);
1114                 val64 = 0x0000010000010000ULL;
1115                 writeq(val64, &bar0->rx_w_round_robin_3);
1116                 val64 = 0x0100000000000000ULL;
1117                 writeq(val64, &bar0->rx_w_round_robin_4);
1118
1119                 val64 = 0x8080808040404040ULL;
1120                 writeq(val64, &bar0->rts_qos_steering);
1121                 break;
1122         case 3:
1123                 val64 = 0x0001000102000001ULL;
1124                 writeq(val64, &bar0->rx_w_round_robin_0);
1125                 val64 = 0x0001020000010001ULL;
1126                 writeq(val64, &bar0->rx_w_round_robin_1);
1127                 val64 = 0x0200000100010200ULL;
1128                 writeq(val64, &bar0->rx_w_round_robin_2);
1129                 val64 = 0x0001000102000001ULL;
1130                 writeq(val64, &bar0->rx_w_round_robin_3);
1131                 val64 = 0x0001020000000000ULL;
1132                 writeq(val64, &bar0->rx_w_round_robin_4);
1133
1134                 val64 = 0x8080804040402020ULL;
1135                 writeq(val64, &bar0->rts_qos_steering);
1136                 break;
1137         case 4:
1138                 val64 = 0x0001020300010200ULL;
1139                 writeq(val64, &bar0->rx_w_round_robin_0);
1140                 val64 = 0x0100000102030001ULL;
1141                 writeq(val64, &bar0->rx_w_round_robin_1);
1142                 val64 = 0x0200010000010203ULL;
1143                 writeq(val64, &bar0->rx_w_round_robin_2);
1144                 val64 = 0x0001020001000001ULL;  
1145                 writeq(val64, &bar0->rx_w_round_robin_3);
1146                 val64 = 0x0203000100000000ULL;
1147                 writeq(val64, &bar0->rx_w_round_robin_4);
1148
1149                 val64 = 0x8080404020201010ULL;
1150                 writeq(val64, &bar0->rts_qos_steering);
1151                 break;
1152         case 5:
1153                 val64 = 0x0001000203000102ULL;
1154                 writeq(val64, &bar0->rx_w_round_robin_0);
1155                 val64 = 0x0001020001030004ULL;
1156                 writeq(val64, &bar0->rx_w_round_robin_1);
1157                 val64 = 0x0001000203000102ULL;
1158                 writeq(val64, &bar0->rx_w_round_robin_2);
1159                 val64 = 0x0001020001030004ULL;
1160                 writeq(val64, &bar0->rx_w_round_robin_3);
1161                 val64 = 0x0001000000000000ULL;
1162                 writeq(val64, &bar0->rx_w_round_robin_4);
1163
1164                 val64 = 0x8080404020201008ULL;
1165                 writeq(val64, &bar0->rts_qos_steering);
1166                 break;
1167         case 6:
1168                 val64 = 0x0001020304000102ULL;
1169                 writeq(val64, &bar0->rx_w_round_robin_0);
1170                 val64 = 0x0304050001020001ULL;
1171                 writeq(val64, &bar0->rx_w_round_robin_1);
1172                 val64 = 0x0203000100000102ULL;
1173                 writeq(val64, &bar0->rx_w_round_robin_2);
1174                 val64 = 0x0304000102030405ULL;
1175                 writeq(val64, &bar0->rx_w_round_robin_3);
1176                 val64 = 0x0001000200000000ULL;
1177                 writeq(val64, &bar0->rx_w_round_robin_4);
1178
1179                 val64 = 0x8080404020100804ULL;
1180                 writeq(val64, &bar0->rts_qos_steering);
1181                 break;
1182         case 7:
1183                 val64 = 0x0001020001020300ULL;
1184                 writeq(val64, &bar0->rx_w_round_robin_0);
1185                 val64 = 0x0102030400010203ULL;
1186                 writeq(val64, &bar0->rx_w_round_robin_1);
1187                 val64 = 0x0405060001020001ULL;
1188                 writeq(val64, &bar0->rx_w_round_robin_2);
1189                 val64 = 0x0304050000010200ULL;
1190                 writeq(val64, &bar0->rx_w_round_robin_3);
1191                 val64 = 0x0102030000000000ULL;
1192                 writeq(val64, &bar0->rx_w_round_robin_4);
1193
1194                 val64 = 0x8080402010080402ULL;
1195                 writeq(val64, &bar0->rts_qos_steering);
1196                 break;
1197         case 8:
1198                 val64 = 0x0001020300040105ULL;
1199                 writeq(val64, &bar0->rx_w_round_robin_0);
1200                 val64 = 0x0200030106000204ULL;
1201                 writeq(val64, &bar0->rx_w_round_robin_1);
1202                 val64 = 0x0103000502010007ULL;
1203                 writeq(val64, &bar0->rx_w_round_robin_2);
1204                 val64 = 0x0304010002060500ULL;
1205                 writeq(val64, &bar0->rx_w_round_robin_3);
1206                 val64 = 0x0103020400000000ULL;
1207                 writeq(val64, &bar0->rx_w_round_robin_4);
1208
1209                 val64 = 0x8040201008040201ULL;
1210                 writeq(val64, &bar0->rts_qos_steering);
1211                 break;
1212         }
1213
1214         /* UDP Fix */
1215         val64 = 0;
1216         for (i = 0; i < 8; i++)
1217                 writeq(val64, &bar0->rts_frm_len_n[i]);
1218
1219         /* Set the default rts frame length for the rings configured */
1220         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1221         for (i = 0 ; i < config->rx_ring_num ; i++)
1222                 writeq(val64, &bar0->rts_frm_len_n[i]);
1223
1224         /* Set the frame length for the configured rings
1225          * desired by the user
1226          */
1227         for (i = 0; i < config->rx_ring_num; i++) {
1228                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1229                  * specified frame length steering.
1230                  * If the user provides the frame length then program
1231                  * the rts_frm_len register for those values or else
1232                  * leave it as it is.
1233                  */
1234                 if (rts_frm_len[i] != 0) {
1235                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1236                                 &bar0->rts_frm_len_n[i]);
1237                 }
1238         }
1239
1240         /* Program statistics memory */
1241         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1242
1243         if (nic->device_type == XFRAME_II_DEVICE) {
1244                 val64 = STAT_BC(0x320);
1245                 writeq(val64, &bar0->stat_byte_cnt);
1246         }
1247
1248         /*
1249          * Initializing the sampling rate for the device to calculate the
1250          * bandwidth utilization.
1251          */
1252         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1253             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1254         writeq(val64, &bar0->mac_link_util);
1255
1256
1257         /*
1258          * Initializing the Transmit and Receive Traffic Interrupt
1259          * Scheme.
1260          */
1261         /*
1262          * TTI Initialization. Default Tx timer gets us about
1263          * 250 interrupts per sec. Continuous interrupts are enabled
1264          * by default.
1265          */
1266         if (nic->device_type == XFRAME_II_DEVICE) {
1267                 int count = (nic->config.bus_speed * 125)/2;
1268                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1269         } else {
1270
1271                 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1272         }
1273         val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1274             TTI_DATA1_MEM_TX_URNG_B(0x10) |
1275             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1276                 if (use_continuous_tx_intrs)
1277                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1278         writeq(val64, &bar0->tti_data1_mem);
1279
1280         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1281             TTI_DATA2_MEM_TX_UFC_B(0x20) |
1282             TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1283         writeq(val64, &bar0->tti_data2_mem);
1284
1285         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1286         writeq(val64, &bar0->tti_command_mem);
1287
1288         /*
1289          * Once the operation completes, the Strobe bit of the command
1290          * register will be reset. We poll for this particular condition
1291          * We wait for a maximum of 500ms for the operation to complete,
1292          * if it's not complete by then we return error.
1293          */
1294         time = 0;
1295         while (TRUE) {
1296                 val64 = readq(&bar0->tti_command_mem);
1297                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1298                         break;
1299                 }
1300                 if (time > 10) {
1301                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1302                                   dev->name);
1303                         return -1;
1304                 }
1305                 msleep(50);
1306                 time++;
1307         }
1308
1309
1310         /* RTI Initialization */
1311         if (nic->device_type == XFRAME_II_DEVICE) {
1312                 /*
1313                  * Programmed to generate Apprx 500 Intrs per
1314                  * second
1315                  */
1316                 int count = (nic->config.bus_speed * 125)/4;
1317                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1318         } else {
1319                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1320         }
1321         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1322             RTI_DATA1_MEM_RX_URNG_B(0x10) |
1323             RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1324
1325         writeq(val64, &bar0->rti_data1_mem);
1326
1327         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1328             RTI_DATA2_MEM_RX_UFC_B(0x2) |
1329             RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1330         writeq(val64, &bar0->rti_data2_mem);
1331
1332         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1333         writeq(val64, &bar0->rti_command_mem);
1334
1335         /*
1336          * Once the operation completes, the Strobe bit of the
1337          * command register will be reset. We poll for this
1338          * particular condition. We wait for a maximum of 500ms
1339          * for the operation to complete, if it's not complete
1340          * by then we return error.
1341          */
1342         time = 0;
1343         while (TRUE) {
1344                 val64 = readq(&bar0->rti_command_mem);
1345                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1346                         break;
1347                 }
1348                 if (time > 10) {
1349                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1350                                   dev->name);
1351                         return -1;
1352                 }
1353                 time++;
1354                 msleep(50);
1355         }
1356
1357         /*
1358          * Initializing proper values as Pause threshold into all
1359          * the 8 Queues on Rx side.
1360          */
1361         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1362         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1363
1364         /* Disable RMAC PAD STRIPPING */
1365         add = (void *) &bar0->mac_cfg;
1366         val64 = readq(&bar0->mac_cfg);
1367         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1368         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1369         writel((u32) (val64), add);
1370         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1371         writel((u32) (val64 >> 32), (add + 4));
1372         val64 = readq(&bar0->mac_cfg);
1373
1374         /*
1375          * Set the time value to be inserted in the pause frame
1376          * generated by xena.
1377          */
1378         val64 = readq(&bar0->rmac_pause_cfg);
1379         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1380         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1381         writeq(val64, &bar0->rmac_pause_cfg);
1382
1383         /*
1384          * Set the Threshold Limit for Generating the pause frame
1385          * If the amount of data in any Queue exceeds ratio of
1386          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1387          * pause frame is generated
1388          */
1389         val64 = 0;
1390         for (i = 0; i < 4; i++) {
1391                 val64 |=
1392                     (((u64) 0xFF00 | nic->mac_control.
1393                       mc_pause_threshold_q0q3)
1394                      << (i * 2 * 8));
1395         }
1396         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1397
1398         val64 = 0;
1399         for (i = 0; i < 4; i++) {
1400                 val64 |=
1401                     (((u64) 0xFF00 | nic->mac_control.
1402                       mc_pause_threshold_q4q7)
1403                      << (i * 2 * 8));
1404         }
1405         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1406
1407         /*
1408          * TxDMA will stop Read request if the number of read split has
1409          * exceeded the limit pointed by shared_splits
1410          */
1411         val64 = readq(&bar0->pic_control);
1412         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1413         writeq(val64, &bar0->pic_control);
1414
1415         /*
1416          * Programming the Herc to split every write transaction
1417          * that does not start on an ADB to reduce disconnects.
1418          */
1419         if (nic->device_type == XFRAME_II_DEVICE) {
1420                 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1421                 writeq(val64, &bar0->wreq_split_mask);
1422         }
1423
1424         return SUCCESS;
1425 }
1426
1427 /**
1428  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1429  *  @nic: device private variable,
1430  *  @mask: A mask indicating which Intr block must be modified and,
1431  *  @flag: A flag indicating whether to enable or disable the Intrs.
1432  *  Description: This function will either disable or enable the interrupts
1433  *  depending on the flag argument. The mask argument can be used to
1434  *  enable/disable any Intr block.
1435  *  Return Value: NONE.
1436  */
1437
1438 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1439 {
1440         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1441         register u64 val64 = 0, temp64 = 0;
1442
1443         /*  Top level interrupt classification */
1444         /*  PIC Interrupts */
1445         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1446                 /*  Enable PIC Intrs in the general intr mask register */
1447                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1448                 if (flag == ENABLE_INTRS) {
1449                         temp64 = readq(&bar0->general_int_mask);
1450                         temp64 &= ~((u64) val64);
1451                         writeq(temp64, &bar0->general_int_mask);
1452                         /*
1453                          * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1454                          * interrupts for now.
1455                          * TODO
1456                          */
1457                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1458                         /*
1459                          * No MSI Support is available presently, so TTI and
1460                          * RTI interrupts are also disabled.
1461                          */
1462                 } else if (flag == DISABLE_INTRS) {
1463                         /*
1464                          * Disable PIC Intrs in the general
1465                          * intr mask register
1466                          */
1467                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1468                         temp64 = readq(&bar0->general_int_mask);
1469                         val64 |= temp64;
1470                         writeq(val64, &bar0->general_int_mask);
1471                 }
1472         }
1473
1474         /*  DMA Interrupts */
1475         /*  Enabling/Disabling Tx DMA interrupts */
1476         if (mask & TX_DMA_INTR) {
1477                 /* Enable TxDMA Intrs in the general intr mask register */
1478                 val64 = TXDMA_INT_M;
1479                 if (flag == ENABLE_INTRS) {
1480                         temp64 = readq(&bar0->general_int_mask);
1481                         temp64 &= ~((u64) val64);
1482                         writeq(temp64, &bar0->general_int_mask);
1483                         /*
1484                          * Keep all interrupts other than PFC interrupt
1485                          * and PCC interrupt disabled in DMA level.
1486                          */
1487                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1488                                                       TXDMA_PCC_INT_M);
1489                         writeq(val64, &bar0->txdma_int_mask);
1490                         /*
1491                          * Enable only the MISC error 1 interrupt in PFC block
1492                          */
1493                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1494                         writeq(val64, &bar0->pfc_err_mask);
1495                         /*
1496                          * Enable only the FB_ECC error interrupt in PCC block
1497                          */
1498                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1499                         writeq(val64, &bar0->pcc_err_mask);
1500                 } else if (flag == DISABLE_INTRS) {
1501                         /*
1502                          * Disable TxDMA Intrs in the general intr mask
1503                          * register
1504                          */
1505                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1506                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1507                         temp64 = readq(&bar0->general_int_mask);
1508                         val64 |= temp64;
1509                         writeq(val64, &bar0->general_int_mask);
1510                 }
1511         }
1512
1513         /*  Enabling/Disabling Rx DMA interrupts */
1514         if (mask & RX_DMA_INTR) {
1515                 /*  Enable RxDMA Intrs in the general intr mask register */
1516                 val64 = RXDMA_INT_M;
1517                 if (flag == ENABLE_INTRS) {
1518                         temp64 = readq(&bar0->general_int_mask);
1519                         temp64 &= ~((u64) val64);
1520                         writeq(temp64, &bar0->general_int_mask);
1521                         /*
1522                          * All RxDMA block interrupts are disabled for now
1523                          * TODO
1524                          */
1525                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1526                 } else if (flag == DISABLE_INTRS) {
1527                         /*
1528                          * Disable RxDMA Intrs in the general intr mask
1529                          * register
1530                          */
1531                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1532                         temp64 = readq(&bar0->general_int_mask);
1533                         val64 |= temp64;
1534                         writeq(val64, &bar0->general_int_mask);
1535                 }
1536         }
1537
1538         /*  MAC Interrupts */
1539         /*  Enabling/Disabling MAC interrupts */
1540         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1541                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1542                 if (flag == ENABLE_INTRS) {
1543                         temp64 = readq(&bar0->general_int_mask);
1544                         temp64 &= ~((u64) val64);
1545                         writeq(temp64, &bar0->general_int_mask);
1546                         /*
1547                          * All MAC block error interrupts are disabled for now
1548                          * except the link status change interrupt.
1549                          * TODO
1550                          */
1551                         val64 = MAC_INT_STATUS_RMAC_INT;
1552                         temp64 = readq(&bar0->mac_int_mask);
1553                         temp64 &= ~((u64) val64);
1554                         writeq(temp64, &bar0->mac_int_mask);
1555
1556                         val64 = readq(&bar0->mac_rmac_err_mask);
1557                         val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1558                         writeq(val64, &bar0->mac_rmac_err_mask);
1559                 } else if (flag == DISABLE_INTRS) {
1560                         /*
1561                          * Disable MAC Intrs in the general intr mask register
1562                          */
1563                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1564                         writeq(DISABLE_ALL_INTRS,
1565                                &bar0->mac_rmac_err_mask);
1566
1567                         temp64 = readq(&bar0->general_int_mask);
1568                         val64 |= temp64;
1569                         writeq(val64, &bar0->general_int_mask);
1570                 }
1571         }
1572
1573         /*  XGXS Interrupts */
1574         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1575                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1576                 if (flag == ENABLE_INTRS) {
1577                         temp64 = readq(&bar0->general_int_mask);
1578                         temp64 &= ~((u64) val64);
1579                         writeq(temp64, &bar0->general_int_mask);
1580                         /*
1581                          * All XGXS block error interrupts are disabled for now
1582                          * TODO
1583                          */
1584                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1585                 } else if (flag == DISABLE_INTRS) {
1586                         /*
1587                          * Disable MC Intrs in the general intr mask register
1588                          */
1589                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1590                         temp64 = readq(&bar0->general_int_mask);
1591                         val64 |= temp64;
1592                         writeq(val64, &bar0->general_int_mask);
1593                 }
1594         }
1595
1596         /*  Memory Controller(MC) interrupts */
1597         if (mask & MC_INTR) {
1598                 val64 = MC_INT_M;
1599                 if (flag == ENABLE_INTRS) {
1600                         temp64 = readq(&bar0->general_int_mask);
1601                         temp64 &= ~((u64) val64);
1602                         writeq(temp64, &bar0->general_int_mask);
1603                         /*
1604                          * Enable all MC Intrs.
1605                          */
1606                         writeq(0x0, &bar0->mc_int_mask);
1607                         writeq(0x0, &bar0->mc_err_mask);
1608                 } else if (flag == DISABLE_INTRS) {
1609                         /*
1610                          * Disable MC Intrs in the general intr mask register
1611                          */
1612                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1613                         temp64 = readq(&bar0->general_int_mask);
1614                         val64 |= temp64;
1615                         writeq(val64, &bar0->general_int_mask);
1616                 }
1617         }
1618
1619
1620         /*  Tx traffic interrupts */
1621         if (mask & TX_TRAFFIC_INTR) {
1622                 val64 = TXTRAFFIC_INT_M;
1623                 if (flag == ENABLE_INTRS) {
1624                         temp64 = readq(&bar0->general_int_mask);
1625                         temp64 &= ~((u64) val64);
1626                         writeq(temp64, &bar0->general_int_mask);
1627                         /*
1628                          * Enable all the Tx side interrupts
1629                          * writing 0 Enables all 64 TX interrupt levels
1630                          */
1631                         writeq(0x0, &bar0->tx_traffic_mask);
1632                 } else if (flag == DISABLE_INTRS) {
1633                         /*
1634                          * Disable Tx Traffic Intrs in the general intr mask
1635                          * register.
1636                          */
1637                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1638                         temp64 = readq(&bar0->general_int_mask);
1639                         val64 |= temp64;
1640                         writeq(val64, &bar0->general_int_mask);
1641                 }
1642         }
1643
1644         /*  Rx traffic interrupts */
1645         if (mask & RX_TRAFFIC_INTR) {
1646                 val64 = RXTRAFFIC_INT_M;
1647                 if (flag == ENABLE_INTRS) {
1648                         temp64 = readq(&bar0->general_int_mask);
1649                         temp64 &= ~((u64) val64);
1650                         writeq(temp64, &bar0->general_int_mask);
1651                         /* writing 0 Enables all 8 RX interrupt levels */
1652                         writeq(0x0, &bar0->rx_traffic_mask);
1653                 } else if (flag == DISABLE_INTRS) {
1654                         /*
1655                          * Disable Rx Traffic Intrs in the general intr mask
1656                          * register.
1657                          */
1658                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1659                         temp64 = readq(&bar0->general_int_mask);
1660                         val64 |= temp64;
1661                         writeq(val64, &bar0->general_int_mask);
1662                 }
1663         }
1664 }
1665
1666 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1667 {
1668         int ret = 0;
1669
1670         if (flag == FALSE) {
1671                 if ((!herc && (rev_id >= 4)) || herc) {
1672                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1673                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1674                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1675                                 ret = 1;
1676                         }
1677                 }else {
1678                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1679                             ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1680                              ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1681                                 ret = 1;
1682                         }
1683                 }
1684         } else {
1685                 if ((!herc && (rev_id >= 4)) || herc) {
1686                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1687                              ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1688                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1689                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1690                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1691                                 ret = 1;
1692                         }
1693                 } else {
1694                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1695                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1696                             (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1697                              ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1698                               ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1699                                 ret = 1;
1700                         }
1701                 }
1702         }
1703
1704         return ret;
1705 }
1706 /**
1707  *  verify_xena_quiescence - Checks whether the H/W is ready
1708  *  @val64 :  Value read from adapter status register.
1709  *  @flag : indicates if the adapter enable bit was ever written once
1710  *  before.
1711  *  Description: Returns whether the H/W is ready to go or not. Depending
1712  *  on whether adapter enable bit was written or not the comparison
1713  *  differs and the calling function passes the input argument flag to
1714  *  indicate this.
1715  *  Return: 1 If xena is quiescence
1716  *          0 If Xena is not quiescence
1717  */
1718
1719 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1720 {
1721         int ret = 0, herc;
1722         u64 tmp64 = ~((u64) val64);
1723         int rev_id = get_xena_rev_id(sp->pdev);
1724
1725         herc = (sp->device_type == XFRAME_II_DEVICE);
1726         if (!
1727             (tmp64 &
1728              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1729               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1730               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1731               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1732               ADAPTER_STATUS_P_PLL_LOCK))) {
1733                 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1734         }
1735
1736         return ret;
1737 }
1738
1739 /**
1740  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1741  * @sp: Pointer to device specifc structure
1742  * Description :
1743  * New procedure to clear mac address reading  problems on Alpha platforms
1744  *
1745  */
1746
1747 void fix_mac_address(nic_t * sp)
1748 {
1749         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1750         u64 val64;
1751         int i = 0;
1752
1753         while (fix_mac[i] != END_SIGN) {
1754                 writeq(fix_mac[i++], &bar0->gpio_control);
1755                 udelay(10);
1756                 val64 = readq(&bar0->gpio_control);
1757         }
1758 }
1759
1760 /**
1761  *  start_nic - Turns the device on
1762  *  @nic : device private variable.
1763  *  Description:
1764  *  This function actually turns the device on. Before this  function is
1765  *  called,all Registers are configured from their reset states
1766  *  and shared memory is allocated but the NIC is still quiescent. On
1767  *  calling this function, the device interrupts are cleared and the NIC is
1768  *  literally switched on by writing into the adapter control register.
1769  *  Return Value:
1770  *  SUCCESS on success and -1 on failure.
1771  */
1772
1773 static int start_nic(struct s2io_nic *nic)
1774 {
1775         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1776         struct net_device *dev = nic->dev;
1777         register u64 val64 = 0;
1778         u16 interruptible;
1779         u16 subid, i;
1780         mac_info_t *mac_control;
1781         struct config_param *config;
1782
1783         mac_control = &nic->mac_control;
1784         config = &nic->config;
1785
1786         /*  PRC Initialization and configuration */
1787         for (i = 0; i < config->rx_ring_num; i++) {
1788                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1789                        &bar0->prc_rxd0_n[i]);
1790
1791                 val64 = readq(&bar0->prc_ctrl_n[i]);
1792 #ifndef CONFIG_2BUFF_MODE
1793                 val64 |= PRC_CTRL_RC_ENABLED;
1794 #else
1795                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1796 #endif
1797                 writeq(val64, &bar0->prc_ctrl_n[i]);
1798         }
1799
1800 #ifdef CONFIG_2BUFF_MODE
1801         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1802         val64 = readq(&bar0->rx_pa_cfg);
1803         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1804         writeq(val64, &bar0->rx_pa_cfg);
1805 #endif
1806
1807         /*
1808          * Enabling MC-RLDRAM. After enabling the device, we timeout
1809          * for around 100ms, which is approximately the time required
1810          * for the device to be ready for operation.
1811          */
1812         val64 = readq(&bar0->mc_rldram_mrs);
1813         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1814         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1815         val64 = readq(&bar0->mc_rldram_mrs);
1816
1817         msleep(100);    /* Delay by around 100 ms. */
1818
1819         /* Enabling ECC Protection. */
1820         val64 = readq(&bar0->adapter_control);
1821         val64 &= ~ADAPTER_ECC_EN;
1822         writeq(val64, &bar0->adapter_control);
1823
1824         /*
1825          * Clearing any possible Link state change interrupts that
1826          * could have popped up just before Enabling the card.
1827          */
1828         val64 = readq(&bar0->mac_rmac_err_reg);
1829         if (val64)
1830                 writeq(val64, &bar0->mac_rmac_err_reg);
1831
1832         /*
1833          * Verify if the device is ready to be enabled, if so enable
1834          * it.
1835          */
1836         val64 = readq(&bar0->adapter_status);
1837         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1838                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1839                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1840                           (unsigned long long) val64);
1841                 return FAILURE;
1842         }
1843
1844         /*  Enable select interrupts */
1845         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1846             RX_MAC_INTR | MC_INTR;
1847         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1848
1849         /*
1850          * With some switches, link might be already up at this point.
1851          * Because of this weird behavior, when we enable laser,
1852          * we may not get link. We need to handle this. We cannot
1853          * figure out which switch is misbehaving. So we are forced to
1854          * make a global change.
1855          */
1856
1857         /* Enabling Laser. */
1858         val64 = readq(&bar0->adapter_control);
1859         val64 |= ADAPTER_EOI_TX_ON;
1860         writeq(val64, &bar0->adapter_control);
1861
1862         /* SXE-002: Initialize link and activity LED */
1863         subid = nic->pdev->subsystem_device;
1864         if (((subid & 0xFF) >= 0x07) &&
1865             (nic->device_type == XFRAME_I_DEVICE)) {
1866                 val64 = readq(&bar0->gpio_control);
1867                 val64 |= 0x0000800000000000ULL;
1868                 writeq(val64, &bar0->gpio_control);
1869                 val64 = 0x0411040400000000ULL;
1870                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1871         }
1872
1873         /*
1874          * Don't see link state interrupts on certain switches, so
1875          * directly scheduling a link state task from here.
1876          */
1877         schedule_work(&nic->set_link_task);
1878
1879         return SUCCESS;
1880 }
1881
1882 /**
1883  *  free_tx_buffers - Free all queued Tx buffers
1884  *  @nic : device private variable.
1885  *  Description:
1886  *  Free all queued Tx buffers.
1887  *  Return Value: void
1888 */
1889
1890 static void free_tx_buffers(struct s2io_nic *nic)
1891 {
1892         struct net_device *dev = nic->dev;
1893         struct sk_buff *skb;
1894         TxD_t *txdp;
1895         int i, j;
1896         mac_info_t *mac_control;
1897         struct config_param *config;
1898         int cnt = 0, frg_cnt;
1899
1900         mac_control = &nic->mac_control;
1901         config = &nic->config;
1902
1903         for (i = 0; i < config->tx_fifo_num; i++) {
1904                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1905                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1906                             list_virt_addr;
1907                         skb =
1908                             (struct sk_buff *) ((unsigned long) txdp->
1909                                                 Host_Control);
1910                         if (skb == NULL) {
1911                                 memset(txdp, 0, sizeof(TxD_t) *
1912                                        config->max_txds);
1913                                 continue;
1914                         }
1915                         frg_cnt = skb_shinfo(skb)->nr_frags;
1916                         pci_unmap_single(nic->pdev, (dma_addr_t)
1917                                          txdp->Buffer_Pointer,
1918                                          skb->len - skb->data_len,
1919                                          PCI_DMA_TODEVICE);
1920                         if (frg_cnt) {
1921                                 TxD_t *temp;
1922                                 temp = txdp;
1923                                 txdp++;
1924                                 for (j = 0; j < frg_cnt; j++, txdp++) {
1925                                         skb_frag_t *frag =
1926                                             &skb_shinfo(skb)->frags[j];
1927                                         pci_unmap_page(nic->pdev,
1928                                                        (dma_addr_t)
1929                                                        txdp->
1930                                                        Buffer_Pointer,
1931                                                        frag->size,
1932                                                        PCI_DMA_TODEVICE);
1933                                 }
1934                                 txdp = temp;
1935                         }
1936                         dev_kfree_skb(skb);
1937                         memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1938                         cnt++;
1939                 }
1940                 DBG_PRINT(INTR_DBG,
1941                           "%s:forcibly freeing %d skbs on FIFO%d\n",
1942                           dev->name, cnt, i);
1943                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1944                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1945         }
1946 }
1947
1948 /**
1949  *   stop_nic -  To stop the nic
1950  *   @nic ; device private variable.
1951  *   Description:
1952  *   This function does exactly the opposite of what the start_nic()
1953  *   function does. This function is called to stop the device.
1954  *   Return Value:
1955  *   void.
1956  */
1957
1958 static void stop_nic(struct s2io_nic *nic)
1959 {
1960         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1961         register u64 val64 = 0;
1962         u16 interruptible, i;
1963         mac_info_t *mac_control;
1964         struct config_param *config;
1965
1966         mac_control = &nic->mac_control;
1967         config = &nic->config;
1968
1969         /*  Disable all interrupts */
1970         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1971             RX_MAC_INTR | MC_INTR;
1972         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1973
1974         /*  Disable PRCs */
1975         for (i = 0; i < config->rx_ring_num; i++) {
1976                 val64 = readq(&bar0->prc_ctrl_n[i]);
1977                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1978                 writeq(val64, &bar0->prc_ctrl_n[i]);
1979         }
1980 }
1981
1982 /**
1983  *  fill_rx_buffers - Allocates the Rx side skbs
1984  *  @nic:  device private variable
1985  *  @ring_no: ring number
1986  *  Description:
1987  *  The function allocates Rx side skbs and puts the physical
1988  *  address of these buffers into the RxD buffer pointers, so that the NIC
1989  *  can DMA the received frame into these locations.
1990  *  The NIC supports 3 receive modes, viz
1991  *  1. single buffer,
1992  *  2. three buffer and
1993  *  3. Five buffer modes.
1994  *  Each mode defines how many fragments the received frame will be split
1995  *  up into by the NIC. The frame is split into L3 header, L4 Header,
1996  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1997  *  is split into 3 fragments. As of now only single buffer mode is
1998  *  supported.
1999  *   Return Value:
2000  *  SUCCESS on success or an appropriate -ve value on failure.
2001  */
2002
2003 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2004 {
2005         struct net_device *dev = nic->dev;
2006         struct sk_buff *skb;
2007         RxD_t *rxdp;
2008         int off, off1, size, block_no, block_no1;
2009         int offset, offset1;
2010         u32 alloc_tab = 0;
2011         u32 alloc_cnt;
2012         mac_info_t *mac_control;
2013         struct config_param *config;
2014 #ifdef CONFIG_2BUFF_MODE
2015         RxD_t *rxdpnext;
2016         int nextblk;
2017         u64 tmp;
2018         buffAdd_t *ba;
2019         dma_addr_t rxdpphys;
2020 #endif
2021 #ifndef CONFIG_S2IO_NAPI
2022         unsigned long flags;
2023 #endif
2024
2025         mac_control = &nic->mac_control;
2026         config = &nic->config;
2027         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2028             atomic_read(&nic->rx_bufs_left[ring_no]);
2029         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2030             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2031
2032         while (alloc_tab < alloc_cnt) {
2033                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2034                     block_index;
2035                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2036                     block_index;
2037                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2038                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2039 #ifndef CONFIG_2BUFF_MODE
2040                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2041                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2042 #else
2043                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2044                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2045 #endif
2046
2047                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2048                     block_virt_addr + off;
2049                 if ((offset == offset1) && (rxdp->Host_Control)) {
2050                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2051                         DBG_PRINT(INTR_DBG, " info equated\n");
2052                         goto end;
2053                 }
2054 #ifndef CONFIG_2BUFF_MODE
2055                 if (rxdp->Control_1 == END_OF_BLOCK) {
2056                         mac_control->rings[ring_no].rx_curr_put_info.
2057                             block_index++;
2058                         mac_control->rings[ring_no].rx_curr_put_info.
2059                             block_index %= mac_control->rings[ring_no].block_count;
2060                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
2061                                 block_index;
2062                         off++;
2063                         off %= (MAX_RXDS_PER_BLOCK + 1);
2064                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2065                             off;
2066                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2067                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2068                                   dev->name, rxdp);
2069                 }
2070 #ifndef CONFIG_S2IO_NAPI
2071                 spin_lock_irqsave(&nic->put_lock, flags);
2072                 mac_control->rings[ring_no].put_pos =
2073                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2074                 spin_unlock_irqrestore(&nic->put_lock, flags);
2075 #endif
2076 #else
2077                 if (rxdp->Host_Control == END_OF_BLOCK) {
2078                         mac_control->rings[ring_no].rx_curr_put_info.
2079                             block_index++;
2080                         mac_control->rings[ring_no].rx_curr_put_info.block_index
2081                             %= mac_control->rings[ring_no].block_count;
2082                         block_no = mac_control->rings[ring_no].rx_curr_put_info
2083                             .block_index;
2084                         off = 0;
2085                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2086                                   dev->name, block_no,
2087                                   (unsigned long long) rxdp->Control_1);
2088                         mac_control->rings[ring_no].rx_curr_put_info.offset =
2089                             off;
2090                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2091                             block_virt_addr;
2092                 }
2093 #ifndef CONFIG_S2IO_NAPI
2094                 spin_lock_irqsave(&nic->put_lock, flags);
2095                 mac_control->rings[ring_no].put_pos = (block_no *
2096                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
2097                 spin_unlock_irqrestore(&nic->put_lock, flags);
2098 #endif
2099 #endif
2100
2101 #ifndef CONFIG_2BUFF_MODE
2102                 if (rxdp->Control_1 & RXD_OWN_XENA)
2103 #else
2104                 if (rxdp->Control_2 & BIT(0))
2105 #endif
2106                 {
2107                         mac_control->rings[ring_no].rx_curr_put_info.
2108                             offset = off;
2109                         goto end;
2110                 }
2111 #ifdef  CONFIG_2BUFF_MODE
2112                 /*
2113                  * RxDs Spanning cache lines will be replenished only
2114                  * if the succeeding RxD is also owned by Host. It
2115                  * will always be the ((8*i)+3) and ((8*i)+6)
2116                  * descriptors for the 48 byte descriptor. The offending
2117                  * decsriptor is of-course the 3rd descriptor.
2118                  */
2119                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2120                     block_dma_addr + (off * sizeof(RxD_t));
2121                 if (((u64) (rxdpphys)) % 128 > 80) {
2122                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2123                             block_virt_addr + (off + 1);
2124                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
2125                                 nextblk = (block_no + 1) %
2126                                     (mac_control->rings[ring_no].block_count);
2127                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
2128                                     [nextblk].block_virt_addr;
2129                         }
2130                         if (rxdpnext->Control_2 & BIT(0))
2131                                 goto end;
2132                 }
2133 #endif
2134
2135 #ifndef CONFIG_2BUFF_MODE
2136                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2137 #else
2138                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2139 #endif
2140                 if (!skb) {
2141                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2142                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2143                         return -ENOMEM;
2144                 }
2145 #ifndef CONFIG_2BUFF_MODE
2146                 skb_reserve(skb, NET_IP_ALIGN);
2147                 memset(rxdp, 0, sizeof(RxD_t));
2148                 rxdp->Buffer0_ptr = pci_map_single
2149                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2150                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2151                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2152                 rxdp->Host_Control = (unsigned long) (skb);
2153                 rxdp->Control_1 |= RXD_OWN_XENA;
2154                 off++;
2155                 off %= (MAX_RXDS_PER_BLOCK + 1);
2156                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2157 #else
2158                 ba = &mac_control->rings[ring_no].ba[block_no][off];
2159                 skb_reserve(skb, BUF0_LEN);
2160                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2161                 if (tmp)
2162                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2163
2164                 memset(rxdp, 0, sizeof(RxD_t));
2165                 rxdp->Buffer2_ptr = pci_map_single
2166                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2167                      PCI_DMA_FROMDEVICE);
2168                 rxdp->Buffer0_ptr =
2169                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2170                                    PCI_DMA_FROMDEVICE);
2171                 rxdp->Buffer1_ptr =
2172                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2173                                    PCI_DMA_FROMDEVICE);
2174
2175                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2176                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2177                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2178                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
2179                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2180                 rxdp->Control_1 |= RXD_OWN_XENA;
2181                 off++;
2182                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2183 #endif
2184                 rxdp->Control_2 |= SET_RXD_MARKER;
2185
2186                 atomic_inc(&nic->rx_bufs_left[ring_no]);
2187                 alloc_tab++;
2188         }
2189
2190       end:
2191         return SUCCESS;
2192 }
2193
2194 /**
2195  *  free_rx_buffers - Frees all Rx buffers
2196  *  @sp: device private variable.
2197  *  Description:
2198  *  This function will free all Rx buffers allocated by host.
2199  *  Return Value:
2200  *  NONE.
2201  */
2202
2203 static void free_rx_buffers(struct s2io_nic *sp)
2204 {
2205         struct net_device *dev = sp->dev;
2206         int i, j, blk = 0, off, buf_cnt = 0;
2207         RxD_t *rxdp;
2208         struct sk_buff *skb;
2209         mac_info_t *mac_control;
2210         struct config_param *config;
2211 #ifdef CONFIG_2BUFF_MODE
2212         buffAdd_t *ba;
2213 #endif
2214
2215         mac_control = &sp->mac_control;
2216         config = &sp->config;
2217
2218         for (i = 0; i < config->rx_ring_num; i++) {
2219                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2220                         off = j % (MAX_RXDS_PER_BLOCK + 1);
2221                         rxdp = mac_control->rings[i].rx_blocks[blk].
2222                                 block_virt_addr + off;
2223
2224 #ifndef CONFIG_2BUFF_MODE
2225                         if (rxdp->Control_1 == END_OF_BLOCK) {
2226                                 rxdp =
2227                                     (RxD_t *) ((unsigned long) rxdp->
2228                                                Control_2);
2229                                 j++;
2230                                 blk++;
2231                         }
2232 #else
2233                         if (rxdp->Host_Control == END_OF_BLOCK) {
2234                                 blk++;
2235                                 continue;
2236                         }
2237 #endif
2238
2239                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2240                                 memset(rxdp, 0, sizeof(RxD_t));
2241                                 continue;
2242                         }
2243
2244                         skb =
2245                             (struct sk_buff *) ((unsigned long) rxdp->
2246                                                 Host_Control);
2247                         if (skb) {
2248 #ifndef CONFIG_2BUFF_MODE
2249                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2250                                                  rxdp->Buffer0_ptr,
2251                                                  dev->mtu +
2252                                                  HEADER_ETHERNET_II_802_3_SIZE
2253                                                  + HEADER_802_2_SIZE +
2254                                                  HEADER_SNAP_SIZE,
2255                                                  PCI_DMA_FROMDEVICE);
2256 #else
2257                                 ba = &mac_control->rings[i].ba[blk][off];
2258                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2259                                                  rxdp->Buffer0_ptr,
2260                                                  BUF0_LEN,
2261                                                  PCI_DMA_FROMDEVICE);
2262                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2263                                                  rxdp->Buffer1_ptr,
2264                                                  BUF1_LEN,
2265                                                  PCI_DMA_FROMDEVICE);
2266                                 pci_unmap_single(sp->pdev, (dma_addr_t)
2267                                                  rxdp->Buffer2_ptr,
2268                                                  dev->mtu + BUF0_LEN + 4,
2269                                                  PCI_DMA_FROMDEVICE);
2270 #endif
2271                                 dev_kfree_skb(skb);
2272                                 atomic_dec(&sp->rx_bufs_left[i]);
2273                                 buf_cnt++;
2274                         }
2275                         memset(rxdp, 0, sizeof(RxD_t));
2276                 }
2277                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2278                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2279                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2280                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2281                 atomic_set(&sp->rx_bufs_left[i], 0);
2282                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2283                           dev->name, buf_cnt, i);
2284         }
2285 }
2286
2287 /**
2288  * s2io_poll - Rx interrupt handler for NAPI support
2289  * @dev : pointer to the device structure.
2290  * @budget : The number of packets that were budgeted to be processed
2291  * during  one pass through the 'Poll" function.
2292  * Description:
2293  * Comes into picture only if NAPI support has been incorporated. It does
2294  * the same thing that rx_intr_handler does, but not in a interrupt context
2295  * also It will process only a given number of packets.
2296  * Return value:
2297  * 0 on success and 1 if there are No Rx packets to be processed.
2298  */
2299
2300 #if defined(CONFIG_S2IO_NAPI)
2301 static int s2io_poll(struct net_device *dev, int *budget)
2302 {
2303         nic_t *nic = dev->priv;
2304         int pkt_cnt = 0, org_pkts_to_process;
2305         mac_info_t *mac_control;
2306         struct config_param *config;
2307         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2308         u64 val64;
2309         int i;
2310
2311         atomic_inc(&nic->isr_cnt);
2312         mac_control = &nic->mac_control;
2313         config = &nic->config;
2314
2315         nic->pkts_to_process = *budget;
2316         if (nic->pkts_to_process > dev->quota)
2317                 nic->pkts_to_process = dev->quota;
2318         org_pkts_to_process = nic->pkts_to_process;
2319
2320         val64 = readq(&bar0->rx_traffic_int);
2321         writeq(val64, &bar0->rx_traffic_int);
2322
2323         for (i = 0; i < config->rx_ring_num; i++) {
2324                 rx_intr_handler(&mac_control->rings[i]);
2325                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2326                 if (!nic->pkts_to_process) {
2327                         /* Quota for the current iteration has been met */
2328                         goto no_rx;
2329                 }
2330         }
2331         if (!pkt_cnt)
2332                 pkt_cnt = 1;
2333
2334         dev->quota -= pkt_cnt;
2335         *budget -= pkt_cnt;
2336         netif_rx_complete(dev);
2337
2338         for (i = 0; i < config->rx_ring_num; i++) {
2339                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2340                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2341                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2342                         break;
2343                 }
2344         }
2345         /* Re enable the Rx interrupts. */
2346         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2347         atomic_dec(&nic->isr_cnt);
2348         return 0;
2349
2350 no_rx:
2351         dev->quota -= pkt_cnt;
2352         *budget -= pkt_cnt;
2353
2354         for (i = 0; i < config->rx_ring_num; i++) {
2355                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2356                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2357                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2358                         break;
2359                 }
2360         }
2361         atomic_dec(&nic->isr_cnt);
2362         return 1;
2363 }
2364 #endif
2365
2366 /**
2367  *  rx_intr_handler - Rx interrupt handler
2368  *  @nic: device private variable.
2369  *  Description:
2370  *  If the interrupt is because of a received frame or if the
2371  *  receive ring contains fresh as yet un-processed frames,this function is
2372  *  called. It picks out the RxD at which place the last Rx processing had
2373  *  stopped and sends the skb to the OSM's Rx handler and then increments
2374  *  the offset.
2375  *  Return Value:
2376  *  NONE.
2377  */
2378 static void rx_intr_handler(ring_info_t *ring_data)
2379 {
2380         nic_t *nic = ring_data->nic;
2381         struct net_device *dev = (struct net_device *) nic->dev;
2382         int get_block, get_offset, put_block, put_offset, ring_bufs;
2383         rx_curr_get_info_t get_info, put_info;
2384         RxD_t *rxdp;
2385         struct sk_buff *skb;
2386 #ifndef CONFIG_S2IO_NAPI
2387         int pkt_cnt = 0;
2388 #endif
2389         spin_lock(&nic->rx_lock);
2390         if (atomic_read(&nic->card_state) == CARD_DOWN) {
2391                 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2392                           __FUNCTION__, dev->name);
2393                 spin_unlock(&nic->rx_lock);
2394         }
2395
2396         get_info = ring_data->rx_curr_get_info;
2397         get_block = get_info.block_index;
2398         put_info = ring_data->rx_curr_put_info;
2399         put_block = put_info.block_index;
2400         ring_bufs = get_info.ring_len+1;
2401         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2402                     get_info.offset;
2403         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2404                 get_info.offset;
2405 #ifndef CONFIG_S2IO_NAPI
2406         spin_lock(&nic->put_lock);
2407         put_offset = ring_data->put_pos;
2408         spin_unlock(&nic->put_lock);
2409 #else
2410         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2411                 put_info.offset;
2412 #endif
2413         while (RXD_IS_UP2DT(rxdp) &&
2414                (((get_offset + 1) % ring_bufs) != put_offset)) {
2415                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2416                 if (skb == NULL) {
2417                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2418                                   dev->name);
2419                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2420                         spin_unlock(&nic->rx_lock);
2421                         return;
2422                 }
2423 #ifndef CONFIG_2BUFF_MODE
2424                 pci_unmap_single(nic->pdev, (dma_addr_t)
2425                                  rxdp->Buffer0_ptr,
2426                                  dev->mtu +
2427                                  HEADER_ETHERNET_II_802_3_SIZE +
2428                                  HEADER_802_2_SIZE +
2429                                  HEADER_SNAP_SIZE,
2430                                  PCI_DMA_FROMDEVICE);
2431 #else
2432                 pci_unmap_single(nic->pdev, (dma_addr_t)
2433                                  rxdp->Buffer0_ptr,
2434                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
2435                 pci_unmap_single(nic->pdev, (dma_addr_t)
2436                                  rxdp->Buffer1_ptr,
2437                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
2438                 pci_unmap_single(nic->pdev, (dma_addr_t)
2439                                  rxdp->Buffer2_ptr,
2440                                  dev->mtu + BUF0_LEN + 4,
2441                                  PCI_DMA_FROMDEVICE);
2442 #endif
2443                 rx_osm_handler(ring_data, rxdp);
2444                 get_info.offset++;
2445                 ring_data->rx_curr_get_info.offset =
2446                     get_info.offset;
2447                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2448                     get_info.offset;
2449                 if (get_info.offset &&
2450                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2451                         get_info.offset = 0;
2452                         ring_data->rx_curr_get_info.offset
2453                             = get_info.offset;
2454                         get_block++;
2455                         get_block %= ring_data->block_count;
2456                         ring_data->rx_curr_get_info.block_index
2457                             = get_block;
2458                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2459                 }
2460
2461                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2462                             get_info.offset;
2463 #ifdef CONFIG_S2IO_NAPI
2464                 nic->pkts_to_process -= 1;
2465                 if (!nic->pkts_to_process)
2466                         break;
2467 #else
2468                 pkt_cnt++;
2469                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2470                         break;
2471 #endif
2472         }
2473         spin_unlock(&nic->rx_lock);
2474 }
2475
2476 /**
2477  *  tx_intr_handler - Transmit interrupt handler
2478  *  @nic : device private variable
2479  *  Description:
2480  *  If an interrupt was raised to indicate DMA complete of the
2481  *  Tx packet, this function is called. It identifies the last TxD
2482  *  whose buffer was freed and frees all skbs whose data have already
2483  *  DMA'ed into the NICs internal memory.
2484  *  Return Value:
2485  *  NONE
2486  */
2487
2488 static void tx_intr_handler(fifo_info_t *fifo_data)
2489 {
2490         nic_t *nic = fifo_data->nic;
2491         struct net_device *dev = (struct net_device *) nic->dev;
2492         tx_curr_get_info_t get_info, put_info;
2493         struct sk_buff *skb;
2494         TxD_t *txdlp;
2495         u16 j, frg_cnt;
2496
2497         get_info = fifo_data->tx_curr_get_info;
2498         put_info = fifo_data->tx_curr_put_info;
2499         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2500             list_virt_addr;
2501         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2502                (get_info.offset != put_info.offset) &&
2503                (txdlp->Host_Control)) {
2504                 /* Check for TxD errors */
2505                 if (txdlp->Control_1 & TXD_T_CODE) {
2506                         unsigned long long err;
2507                         err = txdlp->Control_1 & TXD_T_CODE;
2508                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2509                                   err);
2510                 }
2511
2512                 skb = (struct sk_buff *) ((unsigned long)
2513                                 txdlp->Host_Control);
2514                 if (skb == NULL) {
2515                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2516                         __FUNCTION__);
2517                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2518                         return;
2519                 }
2520
2521                 frg_cnt = skb_shinfo(skb)->nr_frags;
2522                 nic->tx_pkt_count++;
2523
2524                 pci_unmap_single(nic->pdev, (dma_addr_t)
2525                                  txdlp->Buffer_Pointer,
2526                                  skb->len - skb->data_len,
2527                                  PCI_DMA_TODEVICE);
2528                 if (frg_cnt) {
2529                         TxD_t *temp;
2530                         temp = txdlp;
2531                         txdlp++;
2532                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2533                                 skb_frag_t *frag =
2534                                     &skb_shinfo(skb)->frags[j];
2535                                 pci_unmap_page(nic->pdev,
2536                                                (dma_addr_t)
2537                                                txdlp->
2538                                                Buffer_Pointer,
2539                                                frag->size,
2540                                                PCI_DMA_TODEVICE);
2541                         }
2542                         txdlp = temp;
2543                 }
2544                 memset(txdlp, 0,
2545                        (sizeof(TxD_t) * fifo_data->max_txds));
2546
2547                 /* Updating the statistics block */
2548                 nic->stats.tx_bytes += skb->len;
2549                 dev_kfree_skb_irq(skb);
2550
2551                 get_info.offset++;
2552                 get_info.offset %= get_info.fifo_len + 1;
2553                 txdlp = (TxD_t *) fifo_data->list_info
2554                     [get_info.offset].list_virt_addr;
2555                 fifo_data->tx_curr_get_info.offset =
2556                     get_info.offset;
2557         }
2558
2559         spin_lock(&nic->tx_lock);
2560         if (netif_queue_stopped(dev))
2561                 netif_wake_queue(dev);
2562         spin_unlock(&nic->tx_lock);
2563 }
2564
2565 /**
2566  *  alarm_intr_handler - Alarm Interrrupt handler
2567  *  @nic: device private variable
2568  *  Description: If the interrupt was neither because of Rx packet or Tx
2569  *  complete, this function is called. If the interrupt was to indicate
2570  *  a loss of link, the OSM link status handler is invoked for any other
2571  *  alarm interrupt the block that raised the interrupt is displayed
2572  *  and a H/W reset is issued.
2573  *  Return Value:
2574  *  NONE
2575 */
2576
2577 static void alarm_intr_handler(struct s2io_nic *nic)
2578 {
2579         struct net_device *dev = (struct net_device *) nic->dev;
2580         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2581         register u64 val64 = 0, err_reg = 0;
2582
2583         /* Handling link status change error Intr */
2584         err_reg = readq(&bar0->mac_rmac_err_reg);
2585         writeq(err_reg, &bar0->mac_rmac_err_reg);
2586         if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2587                 schedule_work(&nic->set_link_task);
2588         }
2589
2590         /* Handling Ecc errors */
2591         val64 = readq(&bar0->mc_err_reg);
2592         writeq(val64, &bar0->mc_err_reg);
2593         if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2594                 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2595                         nic->mac_control.stats_info->sw_stat.
2596                                 double_ecc_errs++;
2597                         DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2598                                   dev->name);
2599                         DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2600                         netif_stop_queue(dev);
2601                         schedule_work(&nic->rst_timer_task);
2602                 } else {
2603                         nic->mac_control.stats_info->sw_stat.
2604                                 single_ecc_errs++;
2605                 }
2606         }
2607
2608         /* In case of a serious error, the device will be Reset. */
2609         val64 = readq(&bar0->serr_source);
2610         if (val64 & SERR_SOURCE_ANY) {
2611                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2612                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2613                 netif_stop_queue(dev);
2614                 schedule_work(&nic->rst_timer_task);
2615         }
2616
2617         /*
2618          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2619          * Error occurs, the adapter will be recycled by disabling the
2620          * adapter enable bit and enabling it again after the device
2621          * becomes Quiescent.
2622          */
2623         val64 = readq(&bar0->pcc_err_reg);
2624         writeq(val64, &bar0->pcc_err_reg);
2625         if (val64 & PCC_FB_ECC_DB_ERR) {
2626                 u64 ac = readq(&bar0->adapter_control);
2627                 ac &= ~(ADAPTER_CNTL_EN);
2628                 writeq(ac, &bar0->adapter_control);
2629                 ac = readq(&bar0->adapter_control);
2630                 schedule_work(&nic->set_link_task);
2631         }
2632
2633         /* Other type of interrupts are not being handled now,  TODO */
2634 }
2635
2636 /**
2637  *  wait_for_cmd_complete - waits for a command to complete.
2638  *  @sp : private member of the device structure, which is a pointer to the
2639  *  s2io_nic structure.
2640  *  Description: Function that waits for a command to Write into RMAC
2641  *  ADDR DATA registers to be completed and returns either success or
2642  *  error depending on whether the command was complete or not.
2643  *  Return value:
2644  *   SUCCESS on success and FAILURE on failure.
2645  */
2646
2647 int wait_for_cmd_complete(nic_t * sp)
2648 {
2649         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2650         int ret = FAILURE, cnt = 0;
2651         u64 val64;
2652
2653         while (TRUE) {
2654                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2655                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2656                         ret = SUCCESS;
2657                         break;
2658                 }
2659                 msleep(50);
2660                 if (cnt++ > 10)
2661                         break;
2662         }
2663
2664         return ret;
2665 }
2666
2667 /**
2668  *  s2io_reset - Resets the card.
2669  *  @sp : private member of the device structure.
2670  *  Description: Function to Reset the card. This function then also
2671  *  restores the previously saved PCI configuration space registers as
2672  *  the card reset also resets the configuration space.
2673  *  Return value:
2674  *  void.
2675  */
2676
2677 void s2io_reset(nic_t * sp)
2678 {
2679         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2680         u64 val64;
2681         u16 subid, pci_cmd;
2682
2683         val64 = SW_RESET_ALL;
2684         writeq(val64, &bar0->sw_reset);
2685
2686         /*
2687          * At this stage, if the PCI write is indeed completed, the
2688          * card is reset and so is the PCI Config space of the device.
2689          * So a read cannot be issued at this stage on any of the
2690          * registers to ensure the write into "sw_reset" register
2691          * has gone through.
2692          * Question: Is there any system call that will explicitly force
2693          * all the write commands still pending on the bus to be pushed
2694          * through?
2695          * As of now I'am just giving a 250ms delay and hoping that the
2696          * PCI write to sw_reset register is done by this time.
2697          */
2698         msleep(250);
2699
2700         if (!(sp->device_type & XFRAME_II_DEVICE)) {
2701         /* Restore the PCI state saved during initializarion. */
2702                 pci_restore_state(sp->pdev);
2703         } else {
2704                 pci_set_master(sp->pdev);
2705         }
2706         s2io_init_pci(sp);
2707
2708         msleep(250);
2709
2710         /* Set swapper to enable I/O register access */
2711         s2io_set_swapper(sp);
2712
2713         /* Clear certain PCI/PCI-X fields after reset */
2714         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2715         pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2716         pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2717
2718         val64 = readq(&bar0->txpic_int_reg);
2719         val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2720         writeq(val64, &bar0->txpic_int_reg);
2721
2722         /* Clearing PCIX Ecc status register */
2723         pci_write_config_dword(sp->pdev, 0x68, 0);
2724
2725         /* Reset device statistics maintained by OS */
2726         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2727
2728         /* SXE-002: Configure link and activity LED to turn it off */
2729         subid = sp->pdev->subsystem_device;
2730         if (((subid & 0xFF) >= 0x07) &&
2731             (sp->device_type == XFRAME_I_DEVICE)) {
2732                 val64 = readq(&bar0->gpio_control);
2733                 val64 |= 0x0000800000000000ULL;
2734                 writeq(val64, &bar0->gpio_control);
2735                 val64 = 0x0411040400000000ULL;
2736                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2737         }
2738
2739         /*
2740          * Clear spurious ECC interrupts that would have occured on
2741          * XFRAME II cards after reset.
2742          */
2743         if (sp->device_type == XFRAME_II_DEVICE) {
2744                 val64 = readq(&bar0->pcc_err_reg);
2745                 writeq(val64, &bar0->pcc_err_reg);
2746         }
2747
2748         sp->device_enabled_once = FALSE;
2749 }
2750
2751 /**
2752  *  s2io_set_swapper - to set the swapper controle on the card
2753  *  @sp : private member of the device structure,
2754  *  pointer to the s2io_nic structure.
2755  *  Description: Function to set the swapper control on the card
2756  *  correctly depending on the 'endianness' of the system.
2757  *  Return value:
2758  *  SUCCESS on success and FAILURE on failure.
2759  */
2760
2761 int s2io_set_swapper(nic_t * sp)
2762 {
2763         struct net_device *dev = sp->dev;
2764         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2765         u64 val64, valt, valr;
2766
2767         /*
2768          * Set proper endian settings and verify the same by reading
2769          * the PIF Feed-back register.
2770          */
2771
2772         val64 = readq(&bar0->pif_rd_swapper_fb);
2773         if (val64 != 0x0123456789ABCDEFULL) {
2774                 int i = 0;
2775                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2776                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2777                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2778                                 0};                     /* FE=0, SE=0 */
2779
2780                 while(i<4) {
2781                         writeq(value[i], &bar0->swapper_ctrl);
2782                         val64 = readq(&bar0->pif_rd_swapper_fb);
2783                         if (val64 == 0x0123456789ABCDEFULL)
2784                                 break;
2785                         i++;
2786                 }
2787                 if (i == 4) {
2788                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2789                                 dev->name);
2790                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2791                                 (unsigned long long) val64);
2792                         return FAILURE;
2793                 }
2794                 valr = value[i];
2795         } else {
2796                 valr = readq(&bar0->swapper_ctrl);
2797         }
2798
2799         valt = 0x0123456789ABCDEFULL;
2800         writeq(valt, &bar0->xmsi_address);
2801         val64 = readq(&bar0->xmsi_address);
2802
2803         if(val64 != valt) {
2804                 int i = 0;
2805                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2806                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2807                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2808                                 0};                     /* FE=0, SE=0 */
2809
2810                 while(i<4) {
2811                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2812                         writeq(valt, &bar0->xmsi_address);
2813                         val64 = readq(&bar0->xmsi_address);
2814                         if(val64 == valt)
2815                                 break;
2816                         i++;
2817                 }
2818                 if(i == 4) {
2819                         unsigned long long x = val64;
2820                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2821                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2822                         return FAILURE;
2823                 }
2824         }
2825         val64 = readq(&bar0->swapper_ctrl);
2826         val64 &= 0xFFFF000000000000ULL;
2827
2828 #ifdef  __BIG_ENDIAN
2829         /*
2830          * The device by default set to a big endian format, so a
2831          * big endian driver need not set anything.
2832          */
2833         val64 |= (SWAPPER_CTRL_TXP_FE |
2834                  SWAPPER_CTRL_TXP_SE |
2835                  SWAPPER_CTRL_TXD_R_FE |
2836                  SWAPPER_CTRL_TXD_W_FE |
2837                  SWAPPER_CTRL_TXF_R_FE |
2838                  SWAPPER_CTRL_RXD_R_FE |
2839                  SWAPPER_CTRL_RXD_W_FE |
2840                  SWAPPER_CTRL_RXF_W_FE |
2841                  SWAPPER_CTRL_XMSI_FE |
2842                  SWAPPER_CTRL_XMSI_SE |
2843                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2844         writeq(val64, &bar0->swapper_ctrl);
2845 #else
2846         /*
2847          * Initially we enable all bits to make it accessible by the
2848          * driver, then we selectively enable only those bits that
2849          * we want to set.
2850          */
2851         val64 |= (SWAPPER_CTRL_TXP_FE |
2852                  SWAPPER_CTRL_TXP_SE |
2853                  SWAPPER_CTRL_TXD_R_FE |
2854                  SWAPPER_CTRL_TXD_R_SE |
2855                  SWAPPER_CTRL_TXD_W_FE |
2856                  SWAPPER_CTRL_TXD_W_SE |
2857                  SWAPPER_CTRL_TXF_R_FE |
2858                  SWAPPER_CTRL_RXD_R_FE |
2859                  SWAPPER_CTRL_RXD_R_SE |
2860                  SWAPPER_CTRL_RXD_W_FE |
2861                  SWAPPER_CTRL_RXD_W_SE |
2862                  SWAPPER_CTRL_RXF_W_FE |
2863                  SWAPPER_CTRL_XMSI_FE |
2864                  SWAPPER_CTRL_XMSI_SE |
2865                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2866         writeq(val64, &bar0->swapper_ctrl);
2867 #endif
2868         val64 = readq(&bar0->swapper_ctrl);
2869
2870         /*
2871          * Verifying if endian settings are accurate by reading a
2872          * feedback register.
2873          */
2874         val64 = readq(&bar0->pif_rd_swapper_fb);
2875         if (val64 != 0x0123456789ABCDEFULL) {
2876                 /* Endian settings are incorrect, calls for another dekko. */
2877                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2878                           dev->name);
2879                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2880                           (unsigned long long) val64);
2881                 return FAILURE;
2882         }
2883
2884         return SUCCESS;
2885 }
2886
2887 /* ********************************************************* *
2888  * Functions defined below concern the OS part of the driver *
2889  * ********************************************************* */
2890
2891 /**
2892  *  s2io_open - open entry point of the driver
2893  *  @dev : pointer to the device structure.
2894  *  Description:
2895  *  This function is the open entry point of the driver. It mainly calls a
2896  *  function to allocate Rx buffers and inserts them into the buffer
2897  *  descriptors and then enables the Rx part of the NIC.
2898  *  Return value:
2899  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2900  *   file on failure.
2901  */
2902
2903 int s2io_open(struct net_device *dev)
2904 {
2905         nic_t *sp = dev->priv;
2906         int err = 0;
2907
2908         /*
2909          * Make sure you have link off by default every time
2910          * Nic is initialized
2911          */
2912         netif_carrier_off(dev);
2913         sp->last_link_state = 0; /* Unkown link state */
2914
2915         /* Initialize H/W and enable interrupts */
2916         if (s2io_card_up(sp)) {
2917                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2918                           dev->name);
2919                 err = -ENODEV;
2920                 goto hw_init_failed;
2921         }
2922
2923         /* After proper initialization of H/W, register ISR */
2924         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2925                           sp->name, dev);
2926         if (err) {
2927                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2928                           dev->name);
2929                 goto isr_registration_failed;
2930         }
2931
2932         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2933                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2934                 err = -ENODEV;
2935                 goto setting_mac_address_failed;
2936         }
2937
2938         netif_start_queue(dev);
2939         return 0;
2940
2941 setting_mac_address_failed:
2942         free_irq(sp->pdev->irq, dev);
2943 isr_registration_failed:
2944         del_timer_sync(&sp->alarm_timer);
2945         s2io_reset(sp);
2946 hw_init_failed:
2947         return err;
2948 }
2949
2950 /**
2951  *  s2io_close -close entry point of the driver
2952  *  @dev : device pointer.
2953  *  Description:
2954  *  This is the stop entry point of the driver. It needs to undo exactly
2955  *  whatever was done by the open entry point,thus it's usually referred to
2956  *  as the close function.Among other things this function mainly stops the
2957  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2958  *  Return value:
2959  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2960  *  file on failure.
2961  */
2962
2963 int s2io_close(struct net_device *dev)
2964 {
2965         nic_t *sp = dev->priv;
2966         flush_scheduled_work();
2967         netif_stop_queue(dev);
2968         /* Reset card, kill tasklet and free Tx and Rx buffers. */
2969         s2io_card_down(sp);
2970
2971         free_irq(sp->pdev->irq, dev);
2972         sp->device_close_flag = TRUE;   /* Device is shut down. */
2973         return 0;
2974 }
2975
2976 /**
2977  *  s2io_xmit - Tx entry point of te driver
2978  *  @skb : the socket buffer containing the Tx data.
2979  *  @dev : device pointer.
2980  *  Description :
2981  *  This function is the Tx entry point of the driver. S2IO NIC supports
2982  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
2983  *  NOTE: when device cant queue the pkt,just the trans_start variable will
2984  *  not be upadted.
2985  *  Return value:
2986  *  0 on success & 1 on failure.
2987  */
2988
2989 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2990 {
2991         nic_t *sp = dev->priv;
2992         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2993         register u64 val64;
2994         TxD_t *txdp;
2995         TxFIFO_element_t __iomem *tx_fifo;
2996         unsigned long flags;
2997 #ifdef NETIF_F_TSO
2998         int mss;
2999 #endif
3000         u16 vlan_tag = 0;
3001         int vlan_priority = 0;
3002         mac_info_t *mac_control;
3003         struct config_param *config;
3004
3005         mac_control = &sp->mac_control;
3006         config = &sp->config;
3007
3008         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3009         spin_lock_irqsave(&sp->tx_lock, flags);
3010         if (atomic_read(&sp->card_state) == CARD_DOWN) {
3011                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3012                           dev->name);
3013                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3014                 dev_kfree_skb(skb);
3015                 return 0;
3016         }
3017
3018         queue = 0;
3019
3020         /* Get Fifo number to Transmit based on vlan priority */
3021         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3022                 vlan_tag = vlan_tx_tag_get(skb);
3023                 vlan_priority = vlan_tag >> 13;
3024                 queue = config->fifo_mapping[vlan_priority];
3025         }
3026
3027         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3028         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3029         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3030                 list_virt_addr;
3031
3032         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3033         /* Avoid "put" pointer going beyond "get" pointer */
3034         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
3035                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
3036                 netif_stop_queue(dev);
3037                 dev_kfree_skb(skb);
3038                 spin_unlock_irqrestore(&sp->tx_lock, flags);
3039                 return 0;
3040         }
3041 #ifdef NETIF_F_TSO
3042         mss = skb_shinfo(skb)->tso_size;
3043         if (mss) {
3044                 txdp->Control_1 |= TXD_TCP_LSO_EN;
3045                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
3046         }
3047 #endif
3048
3049         frg_cnt = skb_shinfo(skb)->nr_frags;
3050         frg_len = skb->len - skb->data_len;
3051
3052         txdp->Buffer_Pointer = pci_map_single
3053             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3054         txdp->Host_Control = (unsigned long) skb;
3055         if (skb->ip_summed == CHECKSUM_HW) {
3056                 txdp->Control_2 |=
3057                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3058                      TXD_TX_CKO_UDP_EN);
3059         }
3060
3061         txdp->Control_2 |= config->tx_intr_type;
3062
3063         if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3064                 txdp->Control_2 |= TXD_VLAN_ENABLE;
3065                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3066         }
3067
3068         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
3069                             TXD_GATHER_CODE_FIRST);
3070         txdp->Control_1 |= TXD_LIST_OWN_XENA;
3071
3072         /* For fragmented SKB. */
3073         for (i = 0; i < frg_cnt; i++) {
3074                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3075                 txdp++;
3076                 txdp->Buffer_Pointer = (u64) pci_map_page
3077                     (sp->pdev, frag->page, frag->page_offset,
3078                      frag->size, PCI_DMA_TODEVICE);
3079                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
3080         }
3081         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3082
3083         tx_fifo = mac_control->tx_FIFO_start[queue];
3084         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3085         writeq(val64, &tx_fifo->TxDL_Pointer);
3086
3087         wmb();
3088
3089         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3090                  TX_FIFO_LAST_LIST);
3091
3092 #ifdef NETIF_F_TSO
3093         if (mss)
3094                 val64 |= TX_FIFO_SPECIAL_FUNC;
3095 #endif
3096         writeq(val64, &tx_fifo->List_Control);
3097
3098         put_off++;
3099         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3100         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3101
3102         /* Avoid "put" pointer going beyond "get" pointer */
3103         if (((put_off + 1) % queue_len) == get_off) {
3104                 DBG_PRINT(TX_DBG,
3105                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3106                           put_off, get_off);
3107                 netif_stop_queue(dev);
3108         }
3109
3110         dev->trans_start = jiffies;
3111         spin_unlock_irqrestore(&sp->tx_lock, flags);
3112
3113         return 0;
3114 }
3115
3116 static void
3117 s2io_alarm_handle(unsigned long data)
3118 {
3119         nic_t *sp = (nic_t *)data;
3120
3121         alarm_intr_handler(sp);
3122         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3123 }
3124
3125 /**
3126  *  s2io_isr - ISR handler of the device .
3127  *  @irq: the irq of the device.
3128  *  @dev_id: a void pointer to the dev structure of the NIC.
3129  *  @pt_regs: pointer to the registers pushed on the stack.
3130  *  Description:  This function is the ISR handler of the device. It
3131  *  identifies the reason for the interrupt and calls the relevant
3132  *  service routines. As a contongency measure, this ISR allocates the
3133  *  recv buffers, if their numbers are below the panic value which is
3134  *  presently set to 25% of the original number of rcv buffers allocated.
3135  *  Return value:
3136  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
3137  *   IRQ_NONE: will be returned if interrupt is not from our device
3138  */
3139 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
3140 {
3141         struct net_device *dev = (struct net_device *) dev_id;
3142         nic_t *sp = dev->priv;
3143         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3144         int i;
3145         u64 reason = 0, val64;
3146         mac_info_t *mac_control;
3147         struct config_param *config;
3148
3149         atomic_inc(&sp->isr_cnt);
3150         mac_control = &sp->mac_control;
3151         config = &sp->config;
3152
3153         /*
3154          * Identify the cause for interrupt and call the appropriate
3155          * interrupt handler. Causes for the interrupt could be;
3156          * 1. Rx of packet.
3157          * 2. Tx complete.
3158          * 3. Link down.
3159          * 4. Error in any functional blocks of the NIC.
3160          */
3161         reason = readq(&bar0->general_int_status);
3162
3163         if (!reason) {
3164                 /* The interrupt was not raised by Xena. */
3165                 atomic_dec(&sp->isr_cnt);
3166                 return IRQ_NONE;
3167         }
3168
3169 #ifdef CONFIG_S2IO_NAPI
3170         if (reason & GEN_INTR_RXTRAFFIC) {
3171                 if (netif_rx_schedule_prep(dev)) {
3172                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
3173                                               DISABLE_INTRS);
3174                         __netif_rx_schedule(dev);
3175                 }
3176         }
3177 #else
3178         /* If Intr is because of Rx Traffic */
3179         if (reason & GEN_INTR_RXTRAFFIC) {
3180                 /*
3181                  * rx_traffic_int reg is an R1 register, writing all 1's
3182                  * will ensure that the actual interrupt causing bit get's
3183                  * cleared and hence a read can be avoided.
3184                  */
3185                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3186                 writeq(val64, &bar0->rx_traffic_int);
3187                 for (i = 0; i < config->rx_ring_num; i++) {
3188                         rx_intr_handler(&mac_control->rings[i]);
3189                 }
3190         }
3191 #endif
3192
3193         /* If Intr is because of Tx Traffic */
3194         if (reason & GEN_INTR_TXTRAFFIC) {
3195                 /*
3196                  * tx_traffic_int reg is an R1 register, writing all 1's
3197                  * will ensure that the actual interrupt causing bit get's
3198                  * cleared and hence a read can be avoided.
3199                  */
3200                 val64 = 0xFFFFFFFFFFFFFFFFULL;
3201                 writeq(val64, &bar0->tx_traffic_int);
3202
3203                 for (i = 0; i < config->tx_fifo_num; i++)
3204                         tx_intr_handler(&mac_control->fifos[i]);
3205         }
3206
3207         /*
3208          * If the Rx buffer count is below the panic threshold then
3209          * reallocate the buffers from the interrupt handler itself,
3210          * else schedule a tasklet to reallocate the buffers.
3211          */
3212 #ifndef CONFIG_S2IO_NAPI
3213         for (i = 0; i < config->rx_ring_num; i++) {
3214                 int ret;
3215                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3216                 int level = rx_buffer_level(sp, rxb_size, i);
3217
3218                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3219                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3220                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
3221                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3222                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3223                                           dev->name);
3224                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3225                                 clear_bit(0, (&sp->tasklet_status));
3226                                 atomic_dec(&sp->isr_cnt);
3227                                 return IRQ_HANDLED;
3228                         }
3229                         clear_bit(0, (&sp->tasklet_status));
3230                 } else if (level == LOW) {
3231                         tasklet_schedule(&sp->task);
3232                 }
3233         }
3234 #endif
3235
3236         atomic_dec(&sp->isr_cnt);
3237         return IRQ_HANDLED;
3238 }
3239
3240 /**
3241  * s2io_updt_stats -
3242  */
3243 static void s2io_updt_stats(nic_t *sp)
3244 {
3245         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3246         u64 val64;
3247         int cnt = 0;
3248
3249         if (atomic_read(&sp->card_state) == CARD_UP) {
3250                 /* Apprx 30us on a 133 MHz bus */
3251                 val64 = SET_UPDT_CLICKS(10) |
3252                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3253                 writeq(val64, &bar0->stat_cfg);
3254                 do {
3255                         udelay(100);
3256                         val64 = readq(&bar0->stat_cfg);
3257                         if (!(val64 & BIT(0)))
3258                                 break;
3259                         cnt++;
3260                         if (cnt == 5)
3261                                 break; /* Updt failed */
3262                 } while(1);
3263         }
3264 }
3265
3266 /**
3267  *  s2io_get_stats - Updates the device statistics structure.
3268  *  @dev : pointer to the device structure.
3269  *  Description:
3270  *  This function updates the device statistics structure in the s2io_nic
3271  *  structure and returns a pointer to the same.
3272  *  Return value:
3273  *  pointer to the updated net_device_stats structure.
3274  */
3275
3276 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3277 {
3278         nic_t *sp = dev->priv;
3279         mac_info_t *mac_control;
3280         struct config_param *config;
3281
3282
3283         mac_control = &sp->mac_control;
3284         config = &sp->config;
3285
3286         /* Configure Stats for immediate updt */
3287         s2io_updt_stats(sp);
3288
3289         sp->stats.tx_packets =
3290                 le32_to_cpu(mac_control->stats_info->tmac_frms);
3291         sp->stats.tx_errors =
3292                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3293         sp->stats.rx_errors =
3294                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3295         sp->stats.multicast =
3296                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3297         sp->stats.rx_length_errors =
3298                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3299
3300         return (&sp->stats);
3301 }
3302
3303 /**
3304  *  s2io_set_multicast - entry point for multicast address enable/disable.
3305  *  @dev : pointer to the device structure
3306  *  Description:
3307  *  This function is a driver entry point which gets called by the kernel
3308  *  whenever multicast addresses must be enabled/disabled. This also gets
3309  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
3310  *  determine, if multicast address must be enabled or if promiscuous mode
3311  *  is to be disabled etc.
3312  *  Return value:
3313  *  void.
3314  */
3315
3316 static void s2io_set_multicast(struct net_device *dev)
3317 {
3318         int i, j, prev_cnt;
3319         struct dev_mc_list *mclist;
3320         nic_t *sp = dev->priv;
3321         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3322         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3323             0xfeffffffffffULL;
3324         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3325         void __iomem *add;
3326
3327         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3328                 /*  Enable all Multicast addresses */
3329                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3330                        &bar0->rmac_addr_data0_mem);
3331                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3332                        &bar0->rmac_addr_data1_mem);
3333                 val64 = RMAC_ADDR_CMD_MEM_WE |
3334                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3335                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3336                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3337                 /* Wait till command completes */
3338                 wait_for_cmd_complete(sp);
3339
3340                 sp->m_cast_flg = 1;
3341                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3342         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3343                 /*  Disable all Multicast addresses */
3344                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3345                        &bar0->rmac_addr_data0_mem);
3346                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3347                        &bar0->rmac_addr_data1_mem);
3348                 val64 = RMAC_ADDR_CMD_MEM_WE |
3349                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3350                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3351                 writeq(val64, &bar0->rmac_addr_cmd_mem);
3352                 /* Wait till command completes */
3353                 wait_for_cmd_complete(sp);
3354
3355                 sp->m_cast_flg = 0;
3356                 sp->all_multi_pos = 0;
3357         }
3358
3359         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3360                 /*  Put the NIC into promiscuous mode */
3361                 add = &bar0->mac_cfg;
3362                 val64 = readq(&bar0->mac_cfg);
3363                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3364
3365                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3366                 writel((u32) val64, add);
3367                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3368                 writel((u32) (val64 >> 32), (add + 4));
3369
3370                 val64 = readq(&bar0->mac_cfg);
3371                 sp->promisc_flg = 1;
3372                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3373                           dev->name);
3374         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3375                 /*  Remove the NIC from promiscuous mode */
3376                 add = &bar0->mac_cfg;
3377                 val64 = readq(&bar0->mac_cfg);
3378                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3379
3380                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3381                 writel((u32) val64, add);
3382                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3383                 writel((u32) (val64 >> 32), (add + 4));
3384
3385                 val64 = readq(&bar0->mac_cfg);
3386                 sp->promisc_flg = 0;
3387                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3388                           dev->name);
3389         }
3390
3391         /*  Update individual M_CAST address list */
3392         if ((!sp->m_cast_flg) && dev->mc_count) {
3393                 if (dev->mc_count >
3394                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3395                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3396                                   dev->name);
3397                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
3398                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3399                         return;
3400                 }
3401
3402                 prev_cnt = sp->mc_addr_count;
3403                 sp->mc_addr_count = dev->mc_count;
3404
3405                 /* Clear out the previous list of Mc in the H/W. */
3406                 for (i = 0; i < prev_cnt; i++) {
3407                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3408                                &bar0->rmac_addr_data0_mem);
3409                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3410                                 &bar0->rmac_addr_data1_mem);
3411                         val64 = RMAC_ADDR_CMD_MEM_WE |
3412                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3413                             RMAC_ADDR_CMD_MEM_OFFSET
3414                             (MAC_MC_ADDR_START_OFFSET + i);
3415                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3416
3417                         /* Wait for command completes */
3418                         if (wait_for_cmd_complete(sp)) {
3419                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3420                                           dev->name);
3421                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3422                                 return;
3423                         }
3424                 }
3425
3426                 /* Create the new Rx filter list and update the same in H/W. */
3427                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3428                      i++, mclist = mclist->next) {
3429                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3430                                ETH_ALEN);
3431                         for (j = 0; j < ETH_ALEN; j++) {
3432                                 mac_addr |= mclist->dmi_addr[j];
3433                                 mac_addr <<= 8;
3434                         }
3435                         mac_addr >>= 8;
3436                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3437                                &bar0->rmac_addr_data0_mem);
3438                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3439                                 &bar0->rmac_addr_data1_mem);
3440                         val64 = RMAC_ADDR_CMD_MEM_WE |
3441                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3442                             RMAC_ADDR_CMD_MEM_OFFSET
3443                             (i + MAC_MC_ADDR_START_OFFSET);
3444                         writeq(val64, &bar0->rmac_addr_cmd_mem);
3445
3446                         /* Wait for command completes */
3447                         if (wait_for_cmd_complete(sp)) {
3448                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
3449                                           dev->name);
3450                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3451                                 return;
3452                         }
3453                 }
3454         }
3455 }
3456
3457 /**
3458  *  s2io_set_mac_addr - Programs the Xframe mac address
3459  *  @dev : pointer to the device structure.
3460  *  @addr: a uchar pointer to the new mac address which is to be set.
3461  *  Description : This procedure will program the Xframe to receive
3462  *  frames with new Mac Address
3463  *  Return value: SUCCESS on success and an appropriate (-)ve integer
3464  *  as defined in errno.h file on failure.
3465  */
3466
3467 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3468 {
3469         nic_t *sp = dev->priv;
3470         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3471         register u64 val64, mac_addr = 0;
3472         int i;
3473
3474         /*
3475          * Set the new MAC address as the new unicast filter and reflect this
3476          * change on the device address registered with the OS. It will be
3477          * at offset 0.
3478          */
3479         for (i = 0; i < ETH_ALEN; i++) {
3480                 mac_addr <<= 8;
3481                 mac_addr |= addr[i];
3482         }
3483
3484         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3485                &bar0->rmac_addr_data0_mem);
3486
3487         val64 =
3488             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3489             RMAC_ADDR_CMD_MEM_OFFSET(0);
3490         writeq(val64, &bar0->rmac_addr_cmd_mem);
3491         /* Wait till command completes */
3492         if (wait_for_cmd_complete(sp)) {
3493                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3494                 return FAILURE;
3495         }
3496
3497         return SUCCESS;
3498 }
3499
3500 /**
3501  * s2io_ethtool_sset - Sets different link parameters.
3502  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
3503  * @info: pointer to the structure with parameters given by ethtool to set
3504  * link information.
3505  * Description:
3506  * The function sets different link parameters provided by the user onto
3507  * the NIC.
3508  * Return value:
3509  * 0 on success.
3510 */
3511
3512 static int s2io_ethtool_sset(struct net_device *dev,
3513                              struct ethtool_cmd *info)
3514 {
3515         nic_t *sp = dev->priv;
3516         if ((info->autoneg == AUTONEG_ENABLE) ||
3517             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3518                 return -EINVAL;
3519         else {
3520                 s2io_close(sp->dev);
3521                 s2io_open(sp->dev);
3522         }
3523
3524         return 0;
3525 }
3526
3527 /**
3528  * s2io_ethtol_gset - Return link specific information.
3529  * @sp : private member of the device structure, pointer to the
3530  *      s2io_nic structure.
3531  * @info : pointer to the structure with parameters given by ethtool
3532  * to return link information.
3533  * Description:
3534  * Returns link specific information like speed, duplex etc.. to ethtool.
3535  * Return value :
3536  * return 0 on success.
3537  */
3538
3539 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3540 {
3541         nic_t *sp = dev->priv;
3542         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3543         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3544         info->port = PORT_FIBRE;
3545         /* info->transceiver?? TODO */
3546
3547         if (netif_carrier_ok(sp->dev)) {
3548                 info->speed = 10000;
3549                 info->duplex = DUPLEX_FULL;
3550         } else {
3551                 info->speed = -1;
3552                 info->duplex = -1;
3553         }
3554
3555         info->autoneg = AUTONEG_DISABLE;
3556         return 0;
3557 }
3558
3559 /**
3560  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3561  * @sp : private member of the device structure, which is a pointer to the
3562  * s2io_nic structure.
3563  * @info : pointer to the structure with parameters given by ethtool to
3564  * return driver information.
3565  * Description:
3566  * Returns driver specefic information like name, version etc.. to ethtool.
3567  * Return value:
3568  *  void
3569  */
3570
3571 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3572                                   struct ethtool_drvinfo *info)
3573 {
3574         nic_t *sp = dev->priv;
3575
3576         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3577         strncpy(info->version, s2io_driver_version,
3578                 sizeof(s2io_driver_version));
3579         strncpy(info->fw_version, "", 32);
3580         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3581         info->regdump_len = XENA_REG_SPACE;
3582         info->eedump_len = XENA_EEPROM_SPACE;
3583         info->testinfo_len = S2IO_TEST_LEN;
3584         info->n_stats = S2IO_STAT_LEN;
3585 }
3586
3587 /**
3588  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3589  *  @sp: private member of the device structure, which is a pointer to the
3590  *  s2io_nic structure.
3591  *  @regs : pointer to the structure with parameters given by ethtool for
3592  *  dumping the registers.
3593  *  @reg_space: The input argumnet into which all the registers are dumped.
3594  *  Description:
3595  *  Dumps the entire register space of xFrame NIC into the user given
3596  *  buffer area.
3597  * Return value :
3598  * void .
3599 */
3600
3601 static void s2io_ethtool_gregs(struct net_device *dev,
3602                                struct ethtool_regs *regs, void *space)
3603 {
3604         int i;
3605         u64 reg;
3606         u8 *reg_space = (u8 *) space;
3607         nic_t *sp = dev->priv;
3608
3609         regs->len = XENA_REG_SPACE;
3610         regs->version = sp->pdev->subsystem_device;
3611
3612         for (i = 0; i < regs->len; i += 8) {
3613                 reg = readq(sp->bar0 + i);
3614                 memcpy((reg_space + i), &reg, 8);
3615         }
3616 }
3617
3618 /**
3619  *  s2io_phy_id  - timer function that alternates adapter LED.
3620  *  @data : address of the private member of the device structure, which
3621  *  is a pointer to the s2io_nic structure, provided as an u32.
3622  * Description: This is actually the timer function that alternates the
3623  * adapter LED bit of the adapter control bit to set/reset every time on
3624  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3625  *  once every second.
3626 */
3627 static void s2io_phy_id(unsigned long data)
3628 {
3629         nic_t *sp = (nic_t *) data;
3630         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3631         u64 val64 = 0;
3632         u16 subid;
3633
3634         subid = sp->pdev->subsystem_device;
3635         if ((sp->device_type == XFRAME_II_DEVICE) ||
3636                    ((subid & 0xFF) >= 0x07)) {
3637                 val64 = readq(&bar0->gpio_control);
3638                 val64 ^= GPIO_CTRL_GPIO_0;
3639                 writeq(val64, &bar0->gpio_control);
3640         } else {
3641                 val64 = readq(&bar0->adapter_control);
3642                 val64 ^= ADAPTER_LED_ON;
3643                 writeq(val64, &bar0->adapter_control);
3644         }
3645
3646         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3647 }
3648
3649 /**
3650  * s2io_ethtool_idnic - To physically identify the nic on the system.
3651  * @sp : private member of the device structure, which is a pointer to the
3652  * s2io_nic structure.
3653  * @id : pointer to the structure with identification parameters given by
3654  * ethtool.
3655  * Description: Used to physically identify the NIC on the system.
3656  * The Link LED will blink for a time specified by the user for
3657  * identification.
3658  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3659  * identification is possible only if it's link is up.
3660  * Return value:
3661  * int , returns 0 on success
3662  */
3663
3664 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3665 {
3666         u64 val64 = 0, last_gpio_ctrl_val;
3667         nic_t *sp = dev->priv;
3668         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3669         u16 subid;
3670
3671         subid = sp->pdev->subsystem_device;
3672         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3673         if ((sp->device_type == XFRAME_I_DEVICE) &&
3674                 ((subid & 0xFF) < 0x07)) {
3675                 val64 = readq(&bar0->adapter_control);
3676                 if (!(val64 & ADAPTER_CNTL_EN)) {
3677                         printk(KERN_ERR
3678                                "Adapter Link down, cannot blink LED\n");
3679                         return -EFAULT;
3680                 }
3681         }
3682         if (sp->id_timer.function == NULL) {
3683                 init_timer(&sp->id_timer);
3684                 sp->id_timer.function = s2io_phy_id;
3685                 sp->id_timer.data = (unsigned long) sp;
3686         }
3687         mod_timer(&sp->id_timer, jiffies);
3688         if (data)
3689                 msleep_interruptible(data * HZ);
3690         else
3691                 msleep_interruptible(MAX_FLICKER_TIME);
3692         del_timer_sync(&sp->id_timer);
3693
3694         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
3695                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3696                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3697         }
3698
3699         return 0;
3700 }
3701
3702 /**
3703  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3704  * @sp : private member of the device structure, which is a pointer to the
3705  *      s2io_nic structure.
3706  * @ep : pointer to the structure with pause parameters given by ethtool.
3707  * Description:
3708  * Returns the Pause frame generation and reception capability of the NIC.
3709  * Return value:
3710  *  void
3711  */
3712 static void s2io_ethtool_getpause_data(struct net_device *dev,
3713                                        struct ethtool_pauseparam *ep)
3714 {
3715         u64 val64;
3716         nic_t *sp = dev->priv;
3717         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3718
3719         val64 = readq(&bar0->rmac_pause_cfg);
3720         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3721                 ep->tx_pause = TRUE;
3722         if (val64 & RMAC_PAUSE_RX_ENABLE)
3723                 ep->rx_pause = TRUE;
3724         ep->autoneg = FALSE;
3725 }
3726
3727 /**
3728  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3729  * @sp : private member of the device structure, which is a pointer to the
3730  *      s2io_nic structure.
3731  * @ep : pointer to the structure with pause parameters given by ethtool.
3732  * Description:
3733  * It can be used to set or reset Pause frame generation or reception
3734  * support of the NIC.
3735  * Return value:
3736  * int, returns 0 on Success
3737  */
3738
3739 static int s2io_ethtool_setpause_data(struct net_device *dev,
3740                                struct ethtool_pauseparam *ep)
3741 {
3742         u64 val64;
3743         nic_t *sp = dev->priv;
3744         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3745
3746         val64 = readq(&bar0->rmac_pause_cfg);
3747         if (ep->tx_pause)
3748                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3749         else
3750                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3751         if (ep->rx_pause)
3752                 val64 |= RMAC_PAUSE_RX_ENABLE;
3753         else
3754                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3755         writeq(val64, &bar0->rmac_pause_cfg);
3756         return 0;
3757 }
3758
3759 /**
3760  * read_eeprom - reads 4 bytes of data from user given offset.
3761  * @sp : private member of the device structure, which is a pointer to the
3762  *      s2io_nic structure.
3763  * @off : offset at which the data must be written
3764  * @data : Its an output parameter where the data read at the given
3765  *      offset is stored.
3766  * Description:
3767  * Will read 4 bytes of data from the user given offset and return the
3768  * read data.
3769  * NOTE: Will allow to read only part of the EEPROM visible through the
3770  *   I2C bus.
3771  * Return value:
3772  *  -1 on failure and 0 on success.
3773  */
3774
3775 #define S2IO_DEV_ID             5
3776 static int read_eeprom(nic_t * sp, int off, u32 * data)
3777 {
3778         int ret = -1;
3779         u32 exit_cnt = 0;
3780         u64 val64;
3781         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3782
3783         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3784             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3785             I2C_CONTROL_CNTL_START;
3786         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3787
3788         while (exit_cnt < 5) {
3789                 val64 = readq(&bar0->i2c_control);
3790                 if (I2C_CONTROL_CNTL_END(val64)) {
3791                         *data = I2C_CONTROL_GET_DATA(val64);
3792                         ret = 0;
3793                         break;
3794                 }
3795                 msleep(50);
3796                 exit_cnt++;
3797         }
3798
3799         return ret;
3800 }
3801
3802 /**
3803  *  write_eeprom - actually writes the relevant part of the data value.
3804  *  @sp : private member of the device structure, which is a pointer to the
3805  *       s2io_nic structure.
3806  *  @off : offset at which the data must be written
3807  *  @data : The data that is to be written
3808  *  @cnt : Number of bytes of the data that are actually to be written into
3809  *  the Eeprom. (max of 3)
3810  * Description:
3811  *  Actually writes the relevant part of the data value into the Eeprom
3812  *  through the I2C bus.
3813  * Return value:
3814  *  0 on success, -1 on failure.
3815  */
3816
3817 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3818 {
3819         int exit_cnt = 0, ret = -1;
3820         u64 val64;
3821         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3822
3823         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3824             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3825             I2C_CONTROL_CNTL_START;
3826         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3827
3828         while (exit_cnt < 5) {
3829                 val64 = readq(&bar0->i2c_control);
3830                 if (I2C_CONTROL_CNTL_END(val64)) {
3831                         if (!(val64 & I2C_CONTROL_NACK))
3832                                 ret = 0;
3833                         break;
3834                 }
3835                 msleep(50);
3836                 exit_cnt++;
3837         }
3838
3839         return ret;
3840 }
3841
3842 /**
3843  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
3844  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
3845  *  @eeprom : pointer to the user level structure provided by ethtool,
3846  *  containing all relevant information.
3847  *  @data_buf : user defined value to be written into Eeprom.
3848  *  Description: Reads the values stored in the Eeprom at given offset
3849  *  for a given length. Stores these values int the input argument data
3850  *  buffer 'data_buf' and returns these to the caller (ethtool.)
3851  *  Return value:
3852  *  int  0 on success
3853  */
3854
3855 static int s2io_ethtool_geeprom(struct net_device *dev,
3856                          struct ethtool_eeprom *eeprom, u8 * data_buf)
3857 {
3858         u32 data, i, valid;
3859         nic_t *sp = dev->priv;
3860
3861         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3862
3863         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3864                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3865
3866         for (i = 0; i < eeprom->len; i += 4) {
3867                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3868                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3869                         return -EFAULT;
3870                 }
3871                 valid = INV(data);
3872                 memcpy((data_buf + i), &valid, 4);
3873         }
3874         return 0;
3875 }
3876
3877 /**
3878  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3879  *  @sp : private member of the device structure, which is a pointer to the
3880  *  s2io_nic structure.
3881  *  @eeprom : pointer to the user level structure provided by ethtool,
3882  *  containing all relevant information.
3883  *  @data_buf ; user defined value to be written into Eeprom.
3884  *  Description:
3885  *  Tries to write the user provided value in the Eeprom, at the offset
3886  *  given by the user.
3887  *  Return value:
3888  *  0 on success, -EFAULT on failure.
3889  */
3890
3891 static int s2io_ethtool_seeprom(struct net_device *dev,
3892                                 struct ethtool_eeprom *eeprom,
3893                                 u8 * data_buf)
3894 {
3895         int len = eeprom->len, cnt = 0;
3896         u32 valid = 0, data;
3897         nic_t *sp = dev->priv;
3898
3899         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3900                 DBG_PRINT(ERR_DBG,
3901                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3902                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3903                           eeprom->magic);
3904                 return -EFAULT;
3905         }
3906
3907         while (len) {
3908                 data = (u32) data_buf[cnt] & 0x000000FF;
3909                 if (data) {
3910                         valid = (u32) (data << 24);
3911                 } else
3912                         valid = data;
3913
3914                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3915                         DBG_PRINT(ERR_DBG,
3916                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3917                         DBG_PRINT(ERR_DBG,
3918                                   "write into the specified offset\n");
3919                         return -EFAULT;
3920                 }
3921                 cnt++;
3922                 len--;
3923         }
3924
3925         return 0;
3926 }
3927
3928 /**
3929  * s2io_register_test - reads and writes into all clock domains.
3930  * @sp : private member of the device structure, which is a pointer to the
3931  * s2io_nic structure.
3932  * @data : variable that returns the result of each of the test conducted b
3933  * by the driver.
3934  * Description:
3935  * Read and write into all clock domains. The NIC has 3 clock domains,
3936  * see that registers in all the three regions are accessible.
3937  * Return value:
3938  * 0 on success.
3939  */
3940
3941 static int s2io_register_test(nic_t * sp, uint64_t * data)
3942 {
3943         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3944         u64 val64 = 0;
3945         int fail = 0;
3946
3947         val64 = readq(&bar0->pif_rd_swapper_fb);
3948         if (val64 != 0x123456789abcdefULL) {
3949                 fail = 1;
3950                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3951         }
3952
3953         val64 = readq(&bar0->rmac_pause_cfg);
3954         if (val64 != 0xc000ffff00000000ULL) {
3955                 fail = 1;
3956                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3957         }
3958
3959         val64 = readq(&bar0->rx_queue_cfg);
3960         if (val64 != 0x0808080808080808ULL) {
3961                 fail = 1;
3962                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3963         }
3964
3965         val64 = readq(&bar0->xgxs_efifo_cfg);
3966         if (val64 != 0x000000001923141EULL) {
3967                 fail = 1;
3968                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3969         }
3970
3971         val64 = 0x5A5A5A5A5A5A5A5AULL;
3972         writeq(val64, &bar0->xmsi_data);
3973         val64 = readq(&bar0->xmsi_data);
3974         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3975                 fail = 1;
3976                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3977         }
3978
3979         val64 = 0xA5A5A5A5A5A5A5A5ULL;
3980         writeq(val64, &bar0->xmsi_data);
3981         val64 = readq(&bar0->xmsi_data);
3982         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3983                 fail = 1;
3984                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3985         }
3986
3987         *data = fail;
3988         return 0;
3989 }
3990
3991 /**
3992  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3993  * @sp : private member of the device structure, which is a pointer to the
3994  * s2io_nic structure.
3995  * @data:variable that returns the result of each of the test conducted by
3996  * the driver.
3997  * Description:
3998  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3999  * register.
4000  * Return value:
4001  * 0 on success.
4002  */
4003
4004 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
4005 {
4006         int fail = 0;
4007         u32 ret_data;
4008
4009         /* Test Write Error at offset 0 */
4010         if (!write_eeprom(sp, 0, 0, 3))
4011                 fail = 1;
4012
4013         /* Test Write at offset 4f0 */
4014         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
4015                 fail = 1;
4016         if (read_eeprom(sp, 0x4F0, &ret_data))
4017                 fail = 1;
4018
4019         if (ret_data != 0x01234567)
4020                 fail = 1;
4021
4022         /* Reset the EEPROM data go FFFF */
4023         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
4024
4025         /* Test Write Request Error at offset 0x7c */
4026         if (!write_eeprom(sp, 0x07C, 0, 3))
4027                 fail = 1;
4028
4029         /* Test Write Request at offset 0x7fc */
4030         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
4031                 fail = 1;
4032         if (read_eeprom(sp, 0x7FC, &ret_data))
4033                 fail = 1;
4034
4035         if (ret_data != 0x01234567)
4036                 fail = 1;
4037
4038         /* Reset the EEPROM data go FFFF */
4039         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
4040
4041         /* Test Write Error at offset 0x80 */
4042         if (!write_eeprom(sp, 0x080, 0, 3))
4043                 fail = 1;
4044
4045         /* Test Write Error at offset 0xfc */
4046         if (!write_eeprom(sp, 0x0FC, 0, 3))
4047                 fail = 1;
4048
4049         /* Test Write Error at offset 0x100 */
4050         if (!write_eeprom(sp, 0x100, 0, 3))
4051                 fail = 1;
4052
4053         /* Test Write Error at offset 4ec */
4054         if (!write_eeprom(sp, 0x4EC, 0, 3))
4055                 fail = 1;
4056
4057         *data = fail;
4058         return 0;
4059 }
4060
4061 /**
4062  * s2io_bist_test - invokes the MemBist test of the card .
4063  * @sp : private member of the device structure, which is a pointer to the
4064  * s2io_nic structure.
4065  * @data:variable that returns the result of each of the test conducted by
4066  * the driver.
4067  * Description:
4068  * This invokes the MemBist test of the card. We give around
4069  * 2 secs time for the Test to complete. If it's still not complete
4070  * within this peiod, we consider that the test failed.
4071  * Return value:
4072  * 0 on success and -1 on failure.
4073  */
4074
4075 static int s2io_bist_test(nic_t * sp, uint64_t * data)
4076 {
4077         u8 bist = 0;
4078         int cnt = 0, ret = -1;
4079
4080         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4081         bist |= PCI_BIST_START;
4082         pci_write_config_word(sp->pdev, PCI_BIST, bist);
4083
4084         while (cnt < 20) {
4085                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
4086                 if (!(bist & PCI_BIST_START)) {
4087                         *data = (bist & PCI_BIST_CODE_MASK);
4088                         ret = 0;
4089                         break;
4090                 }
4091                 msleep(100);
4092                 cnt++;
4093         }
4094
4095         return ret;
4096 }
4097
4098 /**
4099  * s2io-link_test - verifies the link state of the nic
4100  * @sp ; private member of the device structure, which is a pointer to the
4101  * s2io_nic structure.
4102  * @data: variable that returns the result of each of the test conducted by
4103  * the driver.
4104  * Description:
4105  * The function verifies the link state of the NIC and updates the input
4106  * argument 'data' appropriately.
4107  * Return value:
4108  * 0 on success.
4109  */
4110
4111 static int s2io_link_test(nic_t * sp, uint64_t * data)
4112 {
4113         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4114         u64 val64;
4115
4116         val64 = readq(&bar0->adapter_status);
4117         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
4118                 *data = 1;
4119
4120         return 0;
4121 }
4122
4123 /**
4124  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
4125  * @sp - private member of the device structure, which is a pointer to the
4126  * s2io_nic structure.
4127  * @data - variable that returns the result of each of the test
4128  * conducted by the driver.
4129  * Description:
4130  *  This is one of the offline test that tests the read and write
4131  *  access to the RldRam chip on the NIC.
4132  * Return value:
4133  *  0 on success.
4134  */
4135
4136 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
4137 {
4138         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4139         u64 val64;
4140         int cnt, iteration = 0, test_pass = 0;
4141
4142         val64 = readq(&bar0->adapter_control);
4143         val64 &= ~ADAPTER_ECC_EN;
4144         writeq(val64, &bar0->adapter_control);
4145
4146         val64 = readq(&bar0->mc_rldram_test_ctrl);
4147         val64 |= MC_RLDRAM_TEST_MODE;
4148         writeq(val64, &bar0->mc_rldram_test_ctrl);
4149
4150         val64 = readq(&bar0->mc_rldram_mrs);
4151         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
4152         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4153
4154         val64 |= MC_RLDRAM_MRS_ENABLE;
4155         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
4156
4157         while (iteration < 2) {
4158                 val64 = 0x55555555aaaa0000ULL;
4159                 if (iteration == 1) {
4160                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4161                 }
4162                 writeq(val64, &bar0->mc_rldram_test_d0);
4163
4164                 val64 = 0xaaaa5a5555550000ULL;
4165                 if (iteration == 1) {
4166                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4167                 }
4168                 writeq(val64, &bar0->mc_rldram_test_d1);
4169
4170                 val64 = 0x55aaaaaaaa5a0000ULL;
4171                 if (iteration == 1) {
4172                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
4173                 }
4174                 writeq(val64, &bar0->mc_rldram_test_d2);
4175
4176                 val64 = (u64) (0x0000003fffff0000ULL);
4177                 writeq(val64, &bar0->mc_rldram_test_add);
4178
4179
4180                 val64 = MC_RLDRAM_TEST_MODE;
4181                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4182
4183                 val64 |=
4184                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
4185                     MC_RLDRAM_TEST_GO;
4186                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4187
4188                 for (cnt = 0; cnt < 5; cnt++) {
4189                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4190                         if (val64 & MC_RLDRAM_TEST_DONE)
4191                                 break;
4192                         msleep(200);
4193                 }
4194
4195                 if (cnt == 5)
4196                         break;
4197
4198                 val64 = MC_RLDRAM_TEST_MODE;
4199                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4200
4201                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
4202                 writeq(val64, &bar0->mc_rldram_test_ctrl);
4203
4204                 for (cnt = 0; cnt < 5; cnt++) {
4205                         val64 = readq(&bar0->mc_rldram_test_ctrl);
4206                         if (val64 & MC_RLDRAM_TEST_DONE)
4207                                 break;
4208                         msleep(500);
4209                 }
4210
4211                 if (cnt == 5)
4212                         break;
4213
4214                 val64 = readq(&bar0->mc_rldram_test_ctrl);
4215                 if (val64 & MC_RLDRAM_TEST_PASS)
4216                         test_pass = 1;
4217
4218                 iteration++;
4219         }
4220
4221         if (!test_pass)
4222                 *data = 1;
4223         else
4224                 *data = 0;
4225
4226         return 0;
4227 }
4228
4229 /**
4230  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4231  *  @sp : private member of the device structure, which is a pointer to the
4232  *  s2io_nic structure.
4233  *  @ethtest : pointer to a ethtool command specific structure that will be
4234  *  returned to the user.
4235  *  @data : variable that returns the result of each of the test
4236  * conducted by the driver.
4237  * Description:
4238  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
4239  *  the health of the card.
4240  * Return value:
4241  *  void
4242  */
4243
4244 static void s2io_ethtool_test(struct net_device *dev,
4245                               struct ethtool_test *ethtest,
4246                               uint64_t * data)
4247 {
4248         nic_t *sp = dev->priv;
4249         int orig_state = netif_running(sp->dev);
4250
4251         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4252                 /* Offline Tests. */
4253                 if (orig_state)
4254                         s2io_close(sp->dev);
4255
4256                 if (s2io_register_test(sp, &data[0]))
4257                         ethtest->flags |= ETH_TEST_FL_FAILED;
4258
4259                 s2io_reset(sp);
4260
4261                 if (s2io_rldram_test(sp, &data[3]))
4262                         ethtest->flags |= ETH_TEST_FL_FAILED;
4263
4264                 s2io_reset(sp);
4265
4266                 if (s2io_eeprom_test(sp, &data[1]))
4267                         ethtest->flags |= ETH_TEST_FL_FAILED;
4268
4269                 if (s2io_bist_test(sp, &data[4]))
4270                         ethtest->flags |= ETH_TEST_FL_FAILED;
4271
4272                 if (orig_state)
4273                         s2io_open(sp->dev);
4274
4275                 data[2] = 0;
4276         } else {
4277                 /* Online Tests. */
4278                 if (!orig_state) {
4279                         DBG_PRINT(ERR_DBG,
4280                                   "%s: is not up, cannot run test\n",
4281                                   dev->name);
4282                         data[0] = -1;
4283                         data[1] = -1;
4284                         data[2] = -1;
4285                         data[3] = -1;
4286                         data[4] = -1;
4287                 }
4288
4289                 if (s2io_link_test(sp, &data[2]))
4290                         ethtest->flags |= ETH_TEST_FL_FAILED;
4291
4292                 data[0] = 0;
4293                 data[1] = 0;
4294                 data[3] = 0;
4295                 data[4] = 0;
4296         }
4297 }
4298
4299 static void s2io_get_ethtool_stats(struct net_device *dev,
4300                                    struct ethtool_stats *estats,
4301                                    u64 * tmp_stats)
4302 {
4303         int i = 0;
4304         nic_t *sp = dev->priv;
4305         StatInfo_t *stat_info = sp->mac_control.stats_info;
4306
4307         s2io_updt_stats(sp);
4308         tmp_stats[i++] =
4309                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
4310                 le32_to_cpu(stat_info->tmac_frms);
4311         tmp_stats[i++] =
4312                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
4313                 le32_to_cpu(stat_info->tmac_data_octets);
4314         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4315         tmp_stats[i++] =
4316                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
4317                 le32_to_cpu(stat_info->tmac_mcst_frms);
4318         tmp_stats[i++] =
4319                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
4320                 le32_to_cpu(stat_info->tmac_bcst_frms);
4321         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4322         tmp_stats[i++] =
4323                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
4324                 le32_to_cpu(stat_info->tmac_any_err_frms);
4325         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4326         tmp_stats[i++] =
4327                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
4328                 le32_to_cpu(stat_info->tmac_vld_ip);
4329         tmp_stats[i++] =
4330                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
4331                 le32_to_cpu(stat_info->tmac_drop_ip);
4332         tmp_stats[i++] =
4333                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
4334                 le32_to_cpu(stat_info->tmac_icmp);
4335         tmp_stats[i++] =
4336                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
4337                 le32_to_cpu(stat_info->tmac_rst_tcp);
4338         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4339         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
4340                 le32_to_cpu(stat_info->tmac_udp);
4341         tmp_stats[i++] =
4342                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
4343                 le32_to_cpu(stat_info->rmac_vld_frms);
4344         tmp_stats[i++] =
4345                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
4346                 le32_to_cpu(stat_info->rmac_data_octets);
4347         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4348         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4349         tmp_stats[i++] =
4350                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
4351                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4352         tmp_stats[i++] =
4353                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
4354                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4355         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4356         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4357         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4358         tmp_stats[i++] =
4359                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
4360                 le32_to_cpu(stat_info->rmac_discarded_frms);
4361         tmp_stats[i++] =
4362                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
4363                 le32_to_cpu(stat_info->rmac_usized_frms);
4364         tmp_stats[i++] =
4365                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
4366                 le32_to_cpu(stat_info->rmac_osized_frms);
4367         tmp_stats[i++] =
4368                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
4369                 le32_to_cpu(stat_info->rmac_frag_frms);
4370         tmp_stats[i++] =
4371                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
4372                 le32_to_cpu(stat_info->rmac_jabber_frms);
4373         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
4374                 le32_to_cpu(stat_info->rmac_ip);
4375         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4376         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4377         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
4378                 le32_to_cpu(stat_info->rmac_drop_ip);
4379         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
4380                 le32_to_cpu(stat_info->rmac_icmp);
4381         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4382         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
4383                 le32_to_cpu(stat_info->rmac_udp);
4384         tmp_stats[i++] =
4385                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
4386                 le32_to_cpu(stat_info->rmac_err_drp_udp);
4387         tmp_stats[i++] =
4388                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
4389                 le32_to_cpu(stat_info->rmac_pause_cnt);
4390         tmp_stats[i++] =
4391                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
4392                 le32_to_cpu(stat_info->rmac_accepted_ip);
4393         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4394         tmp_stats[i++] = 0;
4395         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4396         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4397 }
4398
4399 int s2io_ethtool_get_regs_len(struct net_device *dev)
4400 {
4401         return (XENA_REG_SPACE);
4402 }
4403
4404
4405 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4406 {
4407         nic_t *sp = dev->priv;
4408
4409         return (sp->rx_csum);
4410 }
4411 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4412 {
4413         nic_t *sp = dev->priv;
4414
4415         if (data)
4416                 sp->rx_csum = 1;
4417         else
4418                 sp->rx_csum = 0;
4419
4420         return 0;
4421 }
4422 int s2io_get_eeprom_len(struct net_device *dev)
4423 {
4424         return (XENA_EEPROM_SPACE);
4425 }
4426
4427 int s2io_ethtool_self_test_count(struct net_device *dev)
4428 {
4429         return (S2IO_TEST_LEN);
4430 }
4431 void s2io_ethtool_get_strings(struct net_device *dev,
4432                               u32 stringset, u8 * data)
4433 {
4434         switch (stringset) {
4435         case ETH_SS_TEST:
4436                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4437                 break;
4438         case ETH_SS_STATS:
4439                 memcpy(data, &ethtool_stats_keys,
4440                        sizeof(ethtool_stats_keys));
4441         }
4442 }
4443 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4444 {
4445         return (S2IO_STAT_LEN);
4446 }
4447
4448 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4449 {
4450         if (data)
4451                 dev->features |= NETIF_F_IP_CSUM;
4452         else
4453                 dev->features &= ~NETIF_F_IP_CSUM;
4454
4455         return 0;
4456 }
4457
4458
4459 static struct ethtool_ops netdev_ethtool_ops = {
4460         .get_settings = s2io_ethtool_gset,
4461         .set_settings = s2io_ethtool_sset,
4462         .get_drvinfo = s2io_ethtool_gdrvinfo,
4463         .get_regs_len = s2io_ethtool_get_regs_len,
4464         .get_regs = s2io_ethtool_gregs,
4465         .get_link = ethtool_op_get_link,
4466         .get_eeprom_len = s2io_get_eeprom_len,
4467         .get_eeprom = s2io_ethtool_geeprom,
4468         .set_eeprom = s2io_ethtool_seeprom,
4469         .get_pauseparam = s2io_ethtool_getpause_data,
4470         .set_pauseparam = s2io_ethtool_setpause_data,
4471         .get_rx_csum = s2io_ethtool_get_rx_csum,
4472         .set_rx_csum = s2io_ethtool_set_rx_csum,
4473         .get_tx_csum = ethtool_op_get_tx_csum,
4474         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4475         .get_sg = ethtool_op_get_sg,
4476         .set_sg = ethtool_op_set_sg,
4477 #ifdef NETIF_F_TSO
4478         .get_tso = ethtool_op_get_tso,
4479         .set_tso = ethtool_op_set_tso,
4480 #endif
4481         .self_test_count = s2io_ethtool_self_test_count,
4482         .self_test = s2io_ethtool_test,
4483         .get_strings = s2io_ethtool_get_strings,
4484         .phys_id = s2io_ethtool_idnic,
4485         .get_stats_count = s2io_ethtool_get_stats_count,
4486         .get_ethtool_stats = s2io_get_ethtool_stats
4487 };
4488
4489 /**
4490  *  s2io_ioctl - Entry point for the Ioctl
4491  *  @dev :  Device pointer.
4492  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
4493  *  a proprietary structure used to pass information to the driver.
4494  *  @cmd :  This is used to distinguish between the different commands that
4495  *  can be passed to the IOCTL functions.
4496  *  Description:
4497  *  Currently there are no special functionality supported in IOCTL, hence
4498  *  function always return EOPNOTSUPPORTED
4499  */
4500
4501 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4502 {
4503         return -EOPNOTSUPP;
4504 }
4505
4506 /**
4507  *  s2io_change_mtu - entry point to change MTU size for the device.
4508  *   @dev : device pointer.
4509  *   @new_mtu : the new MTU size for the device.
4510  *   Description: A driver entry point to change MTU size for the device.
4511  *   Before changing the MTU the device must be stopped.
4512  *  Return value:
4513  *   0 on success and an appropriate (-)ve integer as defined in errno.h
4514  *   file on failure.
4515  */
4516
4517 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4518 {
4519         nic_t *sp = dev->priv;
4520
4521         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4522                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4523                           dev->name);
4524                 return -EPERM;
4525         }
4526
4527         dev->mtu = new_mtu;
4528         if (netif_running(dev)) {
4529                 s2io_card_down(sp);
4530                 netif_stop_queue(dev);
4531                 if (s2io_card_up(sp)) {
4532                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4533                                   __FUNCTION__);
4534                 }
4535                 if (netif_queue_stopped(dev))
4536                         netif_wake_queue(dev);
4537         } else { /* Device is down */
4538                 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4539                 u64 val64 = new_mtu;
4540
4541                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4542         }
4543
4544         return 0;
4545 }
4546
4547 /**
4548  *  s2io_tasklet - Bottom half of the ISR.
4549  *  @dev_adr : address of the device structure in dma_addr_t format.
4550  *  Description:
4551  *  This is the tasklet or the bottom half of the ISR. This is
4552  *  an extension of the ISR which is scheduled by the scheduler to be run
4553  *  when the load on the CPU is low. All low priority tasks of the ISR can
4554  *  be pushed into the tasklet. For now the tasklet is used only to
4555  *  replenish the Rx buffers in the Rx buffer descriptors.
4556  *  Return value:
4557  *  void.
4558  */
4559
4560 static void s2io_tasklet(unsigned long dev_addr)
4561 {
4562         struct net_device *dev = (struct net_device *) dev_addr;
4563         nic_t *sp = dev->priv;
4564         int i, ret;
4565         mac_info_t *mac_control;
4566         struct config_param *config;
4567
4568         mac_control = &sp->mac_control;
4569         config = &sp->config;
4570
4571         if (!TASKLET_IN_USE) {
4572                 for (i = 0; i < config->rx_ring_num; i++) {
4573                         ret = fill_rx_buffers(sp, i);
4574                         if (ret == -ENOMEM) {
4575                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
4576                                           dev->name);
4577                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4578                                 break;
4579                         } else if (ret == -EFILL) {
4580                                 DBG_PRINT(ERR_DBG,
4581                                           "%s: Rx Ring %d is full\n",
4582                                           dev->name, i);
4583                                 break;
4584                         }
4585                 }
4586                 clear_bit(0, (&sp->tasklet_status));
4587         }
4588 }
4589
4590 /**
4591  * s2io_set_link - Set the LInk status
4592  * @data: long pointer to device private structue
4593  * Description: Sets the link status for the adapter
4594  */
4595
4596 static void s2io_set_link(unsigned long data)
4597 {
4598         nic_t *nic = (nic_t *) data;
4599         struct net_device *dev = nic->dev;
4600         XENA_dev_config_t __iomem *bar0 = nic->bar0;
4601         register u64 val64;
4602         u16 subid;
4603
4604         if (test_and_set_bit(0, &(nic->link_state))) {
4605                 /* The card is being reset, no point doing anything */
4606                 return;
4607         }
4608
4609         subid = nic->pdev->subsystem_device;
4610         /*
4611          * Allow a small delay for the NICs self initiated
4612          * cleanup to complete.
4613          */
4614         msleep(100);
4615
4616         val64 = readq(&bar0->adapter_status);
4617         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4618                 if (LINK_IS_UP(val64)) {
4619                         val64 = readq(&bar0->adapter_control);
4620                         val64 |= ADAPTER_CNTL_EN;
4621                         writeq(val64, &bar0->adapter_control);
4622                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4623                                                              subid)) {
4624                                 val64 = readq(&bar0->gpio_control);
4625                                 val64 |= GPIO_CTRL_GPIO_0;
4626                                 writeq(val64, &bar0->gpio_control);
4627                                 val64 = readq(&bar0->gpio_control);
4628                         } else {
4629                                 val64 |= ADAPTER_LED_ON;
4630                                 writeq(val64, &bar0->adapter_control);
4631                         }
4632                         val64 = readq(&bar0->adapter_status);
4633                         if (!LINK_IS_UP(val64)) {
4634                                 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4635                                 DBG_PRINT(ERR_DBG, " Link down");
4636                                 DBG_PRINT(ERR_DBG, "after ");
4637                                 DBG_PRINT(ERR_DBG, "enabling ");
4638                                 DBG_PRINT(ERR_DBG, "device \n");
4639                         }
4640                         if (nic->device_enabled_once == FALSE) {
4641                                 nic->device_enabled_once = TRUE;
4642                         }
4643                         s2io_link(nic, LINK_UP);
4644                 } else {
4645                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
4646                                                               subid)) {
4647                                 val64 = readq(&bar0->gpio_control);
4648                                 val64 &= ~GPIO_CTRL_GPIO_0;
4649                                 writeq(val64, &bar0->gpio_control);
4650                                 val64 = readq(&bar0->gpio_control);
4651                         }
4652                         s2io_link(nic, LINK_DOWN);
4653                 }
4654         } else {                /* NIC is not Quiescent. */
4655                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4656                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4657                 netif_stop_queue(dev);
4658         }
4659         clear_bit(0, &(nic->link_state));
4660 }
4661
4662 static void s2io_card_down(nic_t * sp)
4663 {
4664         int cnt = 0;
4665         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4666         unsigned long flags;
4667         register u64 val64 = 0;
4668
4669         del_timer_sync(&sp->alarm_timer);
4670         /* If s2io_set_link task is executing, wait till it completes. */
4671         while (test_and_set_bit(0, &(sp->link_state))) {
4672                 msleep(50);
4673         }
4674         atomic_set(&sp->card_state, CARD_DOWN);
4675
4676         /* disable Tx and Rx traffic on the NIC */
4677         stop_nic(sp);
4678
4679         /* Kill tasklet. */
4680         tasklet_kill(&sp->task);
4681
4682         /* Check if the device is Quiescent and then Reset the NIC */
4683         do {
4684                 val64 = readq(&bar0->adapter_status);
4685                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4686                         break;
4687                 }
4688
4689                 msleep(50);
4690                 cnt++;
4691                 if (cnt == 10) {
4692                         DBG_PRINT(ERR_DBG,
4693                                   "s2io_close:Device not Quiescent ");
4694                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4695                                   (unsigned long long) val64);
4696                         break;
4697                 }
4698         } while (1);
4699         s2io_reset(sp);
4700
4701         /* Waiting till all Interrupt handlers are complete */
4702         cnt = 0;
4703         do {
4704                 msleep(10);
4705                 if (!atomic_read(&sp->isr_cnt))
4706                         break;
4707                 cnt++;
4708         } while(cnt < 5);
4709
4710         spin_lock_irqsave(&sp->tx_lock, flags);
4711         /* Free all Tx buffers */
4712         free_tx_buffers(sp);
4713         spin_unlock_irqrestore(&sp->tx_lock, flags);
4714
4715         /* Free all Rx buffers */
4716         spin_lock_irqsave(&sp->rx_lock, flags);
4717         free_rx_buffers(sp);
4718         spin_unlock_irqrestore(&sp->rx_lock, flags);
4719
4720         clear_bit(0, &(sp->link_state));
4721 }
4722
4723 static int s2io_card_up(nic_t * sp)
4724 {
4725         int i, ret;
4726         mac_info_t *mac_control;
4727         struct config_param *config;
4728         struct net_device *dev = (struct net_device *) sp->dev;
4729
4730         /* Initialize the H/W I/O registers */
4731         if (init_nic(sp) != 0) {
4732                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4733                           dev->name);
4734                 return -ENODEV;
4735         }
4736
4737         /*
4738          * Initializing the Rx buffers. For now we are considering only 1
4739          * Rx ring and initializing buffers into 30 Rx blocks
4740          */
4741         mac_control = &sp->mac_control;
4742         config = &sp->config;
4743
4744         for (i = 0; i < config->rx_ring_num; i++) {
4745                 if ((ret = fill_rx_buffers(sp, i))) {
4746                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4747                                   dev->name);
4748                         s2io_reset(sp);
4749                         free_rx_buffers(sp);
4750                         return -ENOMEM;
4751                 }
4752                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4753                           atomic_read(&sp->rx_bufs_left[i]));
4754         }
4755
4756         /* Setting its receive mode */
4757         s2io_set_multicast(dev);
4758
4759         /* Enable tasklet for the device */
4760         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4761
4762         /* Enable Rx Traffic and interrupts on the NIC */
4763         if (start_nic(sp)) {
4764                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4765                 tasklet_kill(&sp->task);
4766                 s2io_reset(sp);
4767                 free_irq(dev->irq, dev);
4768                 free_rx_buffers(sp);
4769                 return -ENODEV;
4770         }
4771
4772         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4773
4774         atomic_set(&sp->card_state, CARD_UP);
4775         return 0;
4776 }
4777
4778 /**
4779  * s2io_restart_nic - Resets the NIC.
4780  * @data : long pointer to the device private structure
4781  * Description:
4782  * This function is scheduled to be run by the s2io_tx_watchdog
4783  * function after 0.5 secs to reset the NIC. The idea is to reduce
4784  * the run time of the watch dog routine which is run holding a
4785  * spin lock.
4786  */
4787
4788 static void s2io_restart_nic(unsigned long data)
4789 {
4790         struct net_device *dev = (struct net_device *) data;
4791         nic_t *sp = dev->priv;
4792
4793         s2io_card_down(sp);
4794         if (s2io_card_up(sp)) {
4795                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4796                           dev->name);
4797         }
4798         netif_wake_queue(dev);
4799         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4800                   dev->name);
4801
4802 }
4803
4804 /**
4805  *  s2io_tx_watchdog - Watchdog for transmit side.
4806  *  @dev : Pointer to net device structure
4807  *  Description:
4808  *  This function is triggered if the Tx Queue is stopped
4809  *  for a pre-defined amount of time when the Interface is still up.
4810  *  If the Interface is jammed in such a situation, the hardware is
4811  *  reset (by s2io_close) and restarted again (by s2io_open) to
4812  *  overcome any problem that might have been caused in the hardware.
4813  *  Return value:
4814  *  void
4815  */
4816
4817 static void s2io_tx_watchdog(struct net_device *dev)
4818 {
4819         nic_t *sp = dev->priv;
4820
4821         if (netif_carrier_ok(dev)) {
4822                 schedule_work(&sp->rst_timer_task);
4823         }
4824 }
4825
4826 /**
4827  *   rx_osm_handler - To perform some OS related operations on SKB.
4828  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4829  *   @skb : the socket buffer pointer.
4830  *   @len : length of the packet
4831  *   @cksum : FCS checksum of the frame.
4832  *   @ring_no : the ring from which this RxD was extracted.
4833  *   Description:
4834  *   This function is called by the Tx interrupt serivce routine to perform
4835  *   some OS related operations on the SKB before passing it to the upper
4836  *   layers. It mainly checks if the checksum is OK, if so adds it to the
4837  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
4838  *   to the upper layer. If the checksum is wrong, it increments the Rx
4839  *   packet error count, frees the SKB and returns error.
4840  *   Return value:
4841  *   SUCCESS on success and -1 on failure.
4842  */
4843 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4844 {
4845         nic_t *sp = ring_data->nic;
4846         struct net_device *dev = (struct net_device *) sp->dev;
4847         struct sk_buff *skb = (struct sk_buff *)
4848                 ((unsigned long) rxdp->Host_Control);
4849         int ring_no = ring_data->ring_no;
4850         u16 l3_csum, l4_csum;
4851 #ifdef CONFIG_2BUFF_MODE
4852         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4853         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4854         int get_block = ring_data->rx_curr_get_info.block_index;
4855         int get_off = ring_data->rx_curr_get_info.offset;
4856         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4857         unsigned char *buff;
4858 #else
4859         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4860 #endif
4861         skb->dev = dev;
4862         if (rxdp->Control_1 & RXD_T_CODE) {
4863                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4864                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4865                           dev->name, err);
4866                 dev_kfree_skb(skb);
4867                 sp->stats.rx_crc_errors++;
4868                 atomic_dec(&sp->rx_bufs_left[ring_no]);
4869                 rxdp->Host_Control = 0;
4870                 return 0;
4871         }
4872
4873         /* Updating statistics */
4874         rxdp->Host_Control = 0;
4875         sp->rx_pkt_count++;
4876         sp->stats.rx_packets++;
4877 #ifndef CONFIG_2BUFF_MODE
4878         sp->stats.rx_bytes += len;
4879 #else
4880         sp->stats.rx_bytes += buf0_len + buf2_len;
4881 #endif
4882
4883 #ifndef CONFIG_2BUFF_MODE
4884         skb_put(skb, len);
4885 #else
4886         buff = skb_push(skb, buf0_len);
4887         memcpy(buff, ba->ba_0, buf0_len);
4888         skb_put(skb, buf2_len);
4889 #endif
4890
4891         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4892             (sp->rx_csum)) {
4893                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4894                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4895                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4896                         /*
4897                          * NIC verifies if the Checksum of the received
4898                          * frame is Ok or not and accordingly returns
4899                          * a flag in the RxD.
4900                          */
4901                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4902                 } else {
4903                         /*
4904                          * Packet with erroneous checksum, let the
4905                          * upper layers deal with it.
4906                          */
4907                         skb->ip_summed = CHECKSUM_NONE;
4908                 }
4909         } else {
4910                 skb->ip_summed = CHECKSUM_NONE;
4911         }
4912
4913         skb->protocol = eth_type_trans(skb, dev);
4914 #ifdef CONFIG_S2IO_NAPI
4915         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4916                 /* Queueing the vlan frame to the upper layer */
4917                 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
4918                         RXD_GET_VLAN_TAG(rxdp->Control_2));
4919         } else {
4920                 netif_receive_skb(skb);
4921         }
4922 #else
4923         if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
4924                 /* Queueing the vlan frame to the upper layer */
4925                 vlan_hwaccel_rx(skb, sp->vlgrp,
4926                         RXD_GET_VLAN_TAG(rxdp->Control_2));
4927         } else {
4928                 netif_rx(skb);
4929         }
4930 #endif
4931         dev->last_rx = jiffies;
4932         atomic_dec(&sp->rx_bufs_left[ring_no]);
4933         return SUCCESS;
4934 }
4935
4936 /**
4937  *  s2io_link - stops/starts the Tx queue.
4938  *  @sp : private member of the device structure, which is a pointer to the
4939  *  s2io_nic structure.
4940  *  @link : inidicates whether link is UP/DOWN.
4941  *  Description:
4942  *  This function stops/starts the Tx queue depending on whether the link
4943  *  status of the NIC is is down or up. This is called by the Alarm
4944  *  interrupt handler whenever a link change interrupt comes up.
4945  *  Return value:
4946  *  void.
4947  */
4948
4949 void s2io_link(nic_t * sp, int link)
4950 {
4951         struct net_device *dev = (struct net_device *) sp->dev;
4952
4953         if (link != sp->last_link_state) {
4954                 if (link == LINK_DOWN) {
4955                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4956                         netif_carrier_off(dev);
4957                 } else {
4958                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4959                         netif_carrier_on(dev);
4960                 }
4961         }
4962         sp->last_link_state = link;
4963 }
4964
4965 /**
4966  *  get_xena_rev_id - to identify revision ID of xena.
4967  *  @pdev : PCI Dev structure
4968  *  Description:
4969  *  Function to identify the Revision ID of xena.
4970  *  Return value:
4971  *  returns the revision ID of the device.
4972  */
4973
4974 int get_xena_rev_id(struct pci_dev *pdev)
4975 {
4976         u8 id = 0;
4977         int ret;
4978         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4979         return id;
4980 }
4981
4982 /**
4983  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4984  *  @sp : private member of the device structure, which is a pointer to the
4985  *  s2io_nic structure.
4986  *  Description:
4987  *  This function initializes a few of the PCI and PCI-X configuration registers
4988  *  with recommended values.
4989  *  Return value:
4990  *  void
4991  */
4992
4993 static void s2io_init_pci(nic_t * sp)
4994 {
4995         u16 pci_cmd = 0, pcix_cmd = 0;
4996
4997         /* Enable Data Parity Error Recovery in PCI-X command register. */
4998         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4999                              &(pcix_cmd));
5000         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5001                               (pcix_cmd | 1));
5002         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5003                              &(pcix_cmd));
5004
5005         /* Set the PErr Response bit in PCI command register. */
5006         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5007         pci_write_config_word(sp->pdev, PCI_COMMAND,
5008                               (pci_cmd | PCI_COMMAND_PARITY));
5009         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
5010
5011         /* Forcibly disabling relaxed ordering capability of the card. */
5012         pcix_cmd &= 0xfffd;
5013         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5014                               pcix_cmd);
5015         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
5016                              &(pcix_cmd));
5017 }
5018
5019 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
5020 MODULE_LICENSE("GPL");
5021 module_param(tx_fifo_num, int, 0);
5022 module_param(rx_ring_num, int, 0);
5023 module_param_array(tx_fifo_len, uint, NULL, 0);
5024 module_param_array(rx_ring_sz, uint, NULL, 0);
5025 module_param_array(rts_frm_len, uint, NULL, 0);
5026 module_param(use_continuous_tx_intrs, int, 1);
5027 module_param(rmac_pause_time, int, 0);
5028 module_param(mc_pause_threshold_q0q3, int, 0);
5029 module_param(mc_pause_threshold_q4q7, int, 0);
5030 module_param(shared_splits, int, 0);
5031 module_param(tmac_util_period, int, 0);
5032 module_param(rmac_util_period, int, 0);
5033 #ifndef CONFIG_S2IO_NAPI
5034 module_param(indicate_max_pkts, int, 0);
5035 #endif
5036
5037 /**
5038  *  s2io_init_nic - Initialization of the adapter .
5039  *  @pdev : structure containing the PCI related information of the device.
5040  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
5041  *  Description:
5042  *  The function initializes an adapter identified by the pci_dec structure.
5043  *  All OS related initialization including memory and device structure and
5044  *  initlaization of the device private variable is done. Also the swapper
5045  *  control register is initialized to enable read and write into the I/O
5046  *  registers of the device.
5047  *  Return value:
5048  *  returns 0 on success and negative on failure.
5049  */
5050
5051 static int __devinit
5052 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
5053 {
5054         nic_t *sp;
5055         struct net_device *dev;
5056         int i, j, ret;
5057         int dma_flag = FALSE;
5058         u32 mac_up, mac_down;
5059         u64 val64 = 0, tmp64 = 0;
5060         XENA_dev_config_t __iomem *bar0 = NULL;
5061         u16 subid;
5062         mac_info_t *mac_control;
5063         struct config_param *config;
5064         int mode;
5065
5066 #ifdef CONFIG_S2IO_NAPI
5067         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
5068 #endif
5069
5070         if ((ret = pci_enable_device(pdev))) {
5071                 DBG_PRINT(ERR_DBG,
5072                           "s2io_init_nic: pci_enable_device failed\n");
5073                 return ret;
5074         }
5075
5076         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
5077                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
5078                 dma_flag = TRUE;
5079                 if (pci_set_consistent_dma_mask
5080                     (pdev, DMA_64BIT_MASK)) {
5081                         DBG_PRINT(ERR_DBG,
5082                                   "Unable to obtain 64bit DMA for \
5083                                         consistent allocations\n");
5084                         pci_disable_device(pdev);
5085                         return -ENOMEM;
5086                 }
5087         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
5088                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
5089         } else {
5090                 pci_disable_device(pdev);
5091                 return -ENOMEM;
5092         }
5093
5094         if (pci_request_regions(pdev, s2io_driver_name)) {
5095                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
5096                     pci_disable_device(pdev);
5097                 return -ENODEV;
5098         }
5099
5100         dev = alloc_etherdev(sizeof(nic_t));
5101         if (dev == NULL) {
5102                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
5103                 pci_disable_device(pdev);
5104                 pci_release_regions(pdev);
5105                 return -ENODEV;
5106         }
5107
5108         pci_set_master(pdev);
5109         pci_set_drvdata(pdev, dev);
5110         SET_MODULE_OWNER(dev);
5111         SET_NETDEV_DEV(dev, &pdev->dev);
5112
5113         /*  Private member variable initialized to s2io NIC structure */
5114         sp = dev->priv;
5115         memset(sp, 0, sizeof(nic_t));
5116         sp->dev = dev;
5117         sp->pdev = pdev;
5118         sp->high_dma_flag = dma_flag;
5119         sp->device_enabled_once = FALSE;
5120
5121         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
5122                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
5123                 sp->device_type = XFRAME_II_DEVICE;
5124         else
5125                 sp->device_type = XFRAME_I_DEVICE;
5126
5127         /* Initialize some PCI/PCI-X fields of the NIC. */
5128         s2io_init_pci(sp);
5129
5130         /*
5131          * Setting the device configuration parameters.
5132          * Most of these parameters can be specified by the user during
5133          * module insertion as they are module loadable parameters. If
5134          * these parameters are not not specified during load time, they
5135          * are initialized with default values.
5136          */
5137         mac_control = &sp->mac_control;
5138         config = &sp->config;
5139
5140         /* Tx side parameters. */
5141         tx_fifo_len[0] = DEFAULT_FIFO_LEN;      /* Default value. */
5142         config->tx_fifo_num = tx_fifo_num;
5143         for (i = 0; i < MAX_TX_FIFOS; i++) {
5144                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
5145                 config->tx_cfg[i].fifo_priority = i;
5146         }
5147
5148         /* mapping the QoS priority to the configured fifos */
5149         for (i = 0; i < MAX_TX_FIFOS; i++)
5150                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
5151
5152         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
5153         for (i = 0; i < config->tx_fifo_num; i++) {
5154                 config->tx_cfg[i].f_no_snoop =
5155                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
5156                 if (config->tx_cfg[i].fifo_len < 65) {
5157                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
5158                         break;
5159                 }
5160         }
5161         config->max_txds = MAX_SKB_FRAGS;
5162
5163         /* Rx side parameters. */
5164         rx_ring_sz[0] = SMALL_BLK_CNT;  /* Default value. */
5165         config->rx_ring_num = rx_ring_num;
5166         for (i = 0; i < MAX_RX_RINGS; i++) {
5167                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
5168                     (MAX_RXDS_PER_BLOCK + 1);
5169                 config->rx_cfg[i].ring_priority = i;
5170         }
5171
5172         for (i = 0; i < rx_ring_num; i++) {
5173                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
5174                 config->rx_cfg[i].f_no_snoop =
5175                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
5176         }
5177
5178         /*  Setting Mac Control parameters */
5179         mac_control->rmac_pause_time = rmac_pause_time;
5180         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
5181         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
5182
5183
5184         /* Initialize Ring buffer parameters. */
5185         for (i = 0; i < config->rx_ring_num; i++)
5186                 atomic_set(&sp->rx_bufs_left[i], 0);
5187
5188         /* Initialize the number of ISRs currently running */
5189         atomic_set(&sp->isr_cnt, 0);
5190
5191         /*  initialize the shared memory used by the NIC and the host */
5192         if (init_shared_mem(sp)) {
5193                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
5194                           dev->name);
5195                 ret = -ENOMEM;
5196                 goto mem_alloc_failed;
5197         }
5198
5199         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
5200                                      pci_resource_len(pdev, 0));
5201         if (!sp->bar0) {
5202                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
5203                           dev->name);
5204                 ret = -ENOMEM;
5205                 goto bar0_remap_failed;
5206         }
5207
5208         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
5209                                      pci_resource_len(pdev, 2));
5210         if (!sp->bar1) {
5211                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
5212                           dev->name);
5213                 ret = -ENOMEM;
5214                 goto bar1_remap_failed;
5215         }
5216
5217         dev->irq = pdev->irq;
5218         dev->base_addr = (unsigned long) sp->bar0;
5219
5220         /* Initializing the BAR1 address as the start of the FIFO pointer. */
5221         for (j = 0; j < MAX_TX_FIFOS; j++) {
5222                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
5223                     (sp->bar1 + (j * 0x00020000));
5224         }
5225
5226         /*  Driver entry points */
5227         dev->open = &s2io_open;
5228         dev->stop = &s2io_close;
5229         dev->hard_start_xmit = &s2io_xmit;
5230         dev->get_stats = &s2io_get_stats;
5231         dev->set_multicast_list = &s2io_set_multicast;
5232         dev->do_ioctl = &s2io_ioctl;
5233         dev->change_mtu = &s2io_change_mtu;
5234         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
5235         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5236         dev->vlan_rx_register = s2io_vlan_rx_register;
5237         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
5238
5239         /*
5240          * will use eth_mac_addr() for  dev->set_mac_address
5241          * mac address will be set every time dev->open() is called
5242          */
5243 #if defined(CONFIG_S2IO_NAPI)
5244         dev->poll = s2io_poll;
5245         dev->weight = 32;
5246 #endif
5247
5248         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
5249         if (sp->high_dma_flag == TRUE)
5250                 dev->features |= NETIF_F_HIGHDMA;
5251 #ifdef NETIF_F_TSO
5252         dev->features |= NETIF_F_TSO;
5253 #endif
5254
5255         dev->tx_timeout = &s2io_tx_watchdog;
5256         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
5257         INIT_WORK(&sp->rst_timer_task,
5258                   (void (*)(void *)) s2io_restart_nic, dev);
5259         INIT_WORK(&sp->set_link_task,
5260                   (void (*)(void *)) s2io_set_link, sp);
5261
5262         if (!(sp->device_type & XFRAME_II_DEVICE)) {
5263                 pci_save_state(sp->pdev);
5264         }
5265
5266         /* Setting swapper control on the NIC, for proper reset operation */
5267         if (s2io_set_swapper(sp)) {
5268                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
5269                           dev->name);
5270                 ret = -EAGAIN;
5271                 goto set_swap_failed;
5272         }
5273
5274         /* Verify if the Herc works on the slot its placed into */
5275         if (sp->device_type & XFRAME_II_DEVICE) {
5276                 mode = s2io_verify_pci_mode(sp);
5277                 if (mode < 0) {
5278                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
5279                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
5280                         ret = -EBADSLT;
5281                         goto set_swap_failed;
5282                 }
5283         }
5284
5285         /* Not needed for Herc */
5286         if (sp->device_type & XFRAME_I_DEVICE) {
5287                 /*
5288                  * Fix for all "FFs" MAC address problems observed on
5289                  * Alpha platforms
5290                  */
5291                 fix_mac_address(sp);
5292                 s2io_reset(sp);
5293         }
5294
5295         /*
5296          * MAC address initialization.
5297          * For now only one mac address will be read and used.
5298          */
5299         bar0 = sp->bar0;
5300         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5301             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5302         writeq(val64, &bar0->rmac_addr_cmd_mem);
5303         wait_for_cmd_complete(sp);
5304
5305         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5306         mac_down = (u32) tmp64;
5307         mac_up = (u32) (tmp64 >> 32);
5308
5309         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5310
5311         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5312         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5313         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5314         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5315         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5316         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5317
5318         /*  Set the factory defined MAC address initially   */
5319         dev->addr_len = ETH_ALEN;
5320         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5321
5322         /*
5323          * Initialize the tasklet status and link state flags
5324          * and the card state parameter
5325          */
5326         atomic_set(&(sp->card_state), 0);
5327         sp->tasklet_status = 0;
5328         sp->link_state = 0;
5329
5330         /* Initialize spinlocks */
5331         spin_lock_init(&sp->tx_lock);
5332 #ifndef CONFIG_S2IO_NAPI
5333         spin_lock_init(&sp->put_lock);
5334 #endif
5335         spin_lock_init(&sp->rx_lock);
5336
5337         /*
5338          * SXE-002: Configure link and activity LED to init state
5339          * on driver load.
5340          */
5341         subid = sp->pdev->subsystem_device;
5342         if ((subid & 0xFF) >= 0x07) {
5343                 val64 = readq(&bar0->gpio_control);
5344                 val64 |= 0x0000800000000000ULL;
5345                 writeq(val64, &bar0->gpio_control);
5346                 val64 = 0x0411040400000000ULL;
5347                 writeq(val64, (void __iomem *) bar0 + 0x2700);
5348                 val64 = readq(&bar0->gpio_control);
5349         }
5350
5351         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
5352
5353         if (register_netdev(dev)) {
5354                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5355                 ret = -ENODEV;
5356                 goto register_failed;
5357         }
5358
5359         if (sp->device_type & XFRAME_II_DEVICE) {
5360                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
5361                           dev->name);
5362                 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5363                                 get_xena_rev_id(sp->pdev),
5364                                 s2io_driver_version);
5365                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5366                           sp->def_mac_addr[0].mac_addr[0],
5367                           sp->def_mac_addr[0].mac_addr[1],
5368                           sp->def_mac_addr[0].mac_addr[2],
5369                           sp->def_mac_addr[0].mac_addr[3],
5370                           sp->def_mac_addr[0].mac_addr[4],
5371                           sp->def_mac_addr[0].mac_addr[5]);
5372                 int mode = s2io_print_pci_mode(sp);
5373                 if (mode < 0) {
5374                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
5375                         ret = -EBADSLT;
5376                         goto set_swap_failed;
5377                 }
5378         } else {
5379                 DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
5380                           dev->name);
5381                 DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
5382                                         get_xena_rev_id(sp->pdev),
5383                                         s2io_driver_version);
5384                 DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
5385                           sp->def_mac_addr[0].mac_addr[0],
5386                           sp->def_mac_addr[0].mac_addr[1],
5387                           sp->def_mac_addr[0].mac_addr[2],
5388                           sp->def_mac_addr[0].mac_addr[3],
5389                           sp->def_mac_addr[0].mac_addr[4],
5390                           sp->def_mac_addr[0].mac_addr[5]);
5391         }
5392
5393         /* Initialize device name */
5394         strcpy(sp->name, dev->name);
5395         if (sp->device_type & XFRAME_II_DEVICE)
5396                 strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
5397         else
5398                 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5399
5400         /*
5401          * Make Link state as off at this point, when the Link change
5402          * interrupt comes the state will be automatically changed to
5403          * the right state.
5404          */
5405         netif_carrier_off(dev);
5406
5407         return 0;
5408
5409       register_failed:
5410       set_swap_failed:
5411         iounmap(sp->bar1);
5412       bar1_remap_failed:
5413         iounmap(sp->bar0);
5414       bar0_remap_failed:
5415       mem_alloc_failed:
5416         free_shared_mem(sp);
5417         pci_disable_device(pdev);
5418         pci_release_regions(pdev);
5419         pci_set_drvdata(pdev, NULL);
5420         free_netdev(dev);
5421
5422         return ret;
5423 }
5424
5425 /**
5426  * s2io_rem_nic - Free the PCI device
5427  * @pdev: structure containing the PCI related information of the device.
5428  * Description: This function is called by the Pci subsystem to release a
5429  * PCI device and free up all resource held up by the device. This could
5430  * be in response to a Hot plug event or when the driver is to be removed
5431  * from memory.
5432  */
5433
5434 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5435 {
5436         struct net_device *dev =
5437             (struct net_device *) pci_get_drvdata(pdev);
5438         nic_t *sp;
5439
5440         if (dev == NULL) {
5441                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5442                 return;
5443         }
5444
5445         sp = dev->priv;
5446         unregister_netdev(dev);
5447
5448         free_shared_mem(sp);
5449         iounmap(sp->bar0);
5450         iounmap(sp->bar1);
5451         pci_disable_device(pdev);
5452         pci_release_regions(pdev);
5453         pci_set_drvdata(pdev, NULL);
5454         free_netdev(dev);
5455 }
5456
5457 /**
5458  * s2io_starter - Entry point for the driver
5459  * Description: This function is the entry point for the driver. It verifies
5460  * the module loadable parameters and initializes PCI configuration space.
5461  */
5462
5463 int __init s2io_starter(void)
5464 {
5465         return pci_module_init(&s2io_driver);
5466 }
5467
5468 /**
5469  * s2io_closer - Cleanup routine for the driver
5470  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5471  */
5472
5473 void s2io_closer(void)
5474 {
5475         pci_unregister_driver(&s2io_driver);
5476         DBG_PRINT(INIT_DBG, "cleanup done\n");
5477 }
5478
5479 module_init(s2io_starter);
5480 module_exit(s2io_closer);