]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/net/s2io.c
s2io: add missing block braces to multistatement if statement
[mv-sheeva.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3  * Copyright(c) 2002-2007 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  *
30  * rx_ring_num : This can be used to program the number of receive rings used
31  * in the driver.
32  * rx_ring_sz: This defines the number of receive blocks each ring can have.
33  *     This is also an array of size 8.
34  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35  *              values are 1, 2.
36  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37  * tx_fifo_len: This too is an array of 8. Each element defines the number of
38  * Tx descriptors that can be associated with each corresponding FIFO.
39  * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40  *     2(MSI_X). Default value is '2(MSI_X)'
41  * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42  *     Possible values '1' for enable '0' for disable. Default is '0'
43  * lro_max_pkts: This parameter defines maximum number of packets can be
44  *     aggregated as a single large packet
45  * napi: This parameter used to enable/disable NAPI (polling Rx)
46  *     Possible values '1' for enable and '0' for disable. Default is '1'
47  * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48  *      Possible values '1' for enable and '0' for disable. Default is '0'
49  * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50  *                 Possible values '1' for enable , '0' for disable.
51  *                 Default is '2' - which means disable in promisc mode
52  *                 and enable in non-promiscuous mode.
53  * multiq: This parameter used to enable/disable MULTIQUEUE support.
54  *      Possible values '1' for enable and '0' for disable. Default is '0'
55  ************************************************************************/
56
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <linux/ip.h>
76 #include <linux/tcp.h>
77 #include <net/tcp.h>
78
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
81 #include <asm/io.h>
82 #include <asm/div64.h>
83 #include <asm/irq.h>
84
85 /* local include */
86 #include "s2io.h"
87 #include "s2io-regs.h"
88
89 #define DRV_VERSION "2.0.26.23"
90
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
94
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
97
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
99 {
100         int ret;
101
102         ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103                 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105         return ret;
106 }
107
108 /*
109  * Cards with following subsystem_id have a link state indication
110  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111  * macro below identifies these cards given the subsystem_id.
112  */
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114         (dev_type == XFRAME_I_DEVICE) ?                 \
115                 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
117
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120
121 static inline int is_s2io_card_up(const struct s2io_nic * sp)
122 {
123         return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124 }
125
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128         "Register test\t(offline)",
129         "Eeprom test\t(offline)",
130         "Link test\t(online)",
131         "RLDRAM test\t(offline)",
132         "BIST Test\t(offline)"
133 };
134
135 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
136         {"tmac_frms"},
137         {"tmac_data_octets"},
138         {"tmac_drop_frms"},
139         {"tmac_mcst_frms"},
140         {"tmac_bcst_frms"},
141         {"tmac_pause_ctrl_frms"},
142         {"tmac_ttl_octets"},
143         {"tmac_ucst_frms"},
144         {"tmac_nucst_frms"},
145         {"tmac_any_err_frms"},
146         {"tmac_ttl_less_fb_octets"},
147         {"tmac_vld_ip_octets"},
148         {"tmac_vld_ip"},
149         {"tmac_drop_ip"},
150         {"tmac_icmp"},
151         {"tmac_rst_tcp"},
152         {"tmac_tcp"},
153         {"tmac_udp"},
154         {"rmac_vld_frms"},
155         {"rmac_data_octets"},
156         {"rmac_fcs_err_frms"},
157         {"rmac_drop_frms"},
158         {"rmac_vld_mcst_frms"},
159         {"rmac_vld_bcst_frms"},
160         {"rmac_in_rng_len_err_frms"},
161         {"rmac_out_rng_len_err_frms"},
162         {"rmac_long_frms"},
163         {"rmac_pause_ctrl_frms"},
164         {"rmac_unsup_ctrl_frms"},
165         {"rmac_ttl_octets"},
166         {"rmac_accepted_ucst_frms"},
167         {"rmac_accepted_nucst_frms"},
168         {"rmac_discarded_frms"},
169         {"rmac_drop_events"},
170         {"rmac_ttl_less_fb_octets"},
171         {"rmac_ttl_frms"},
172         {"rmac_usized_frms"},
173         {"rmac_osized_frms"},
174         {"rmac_frag_frms"},
175         {"rmac_jabber_frms"},
176         {"rmac_ttl_64_frms"},
177         {"rmac_ttl_65_127_frms"},
178         {"rmac_ttl_128_255_frms"},
179         {"rmac_ttl_256_511_frms"},
180         {"rmac_ttl_512_1023_frms"},
181         {"rmac_ttl_1024_1518_frms"},
182         {"rmac_ip"},
183         {"rmac_ip_octets"},
184         {"rmac_hdr_err_ip"},
185         {"rmac_drop_ip"},
186         {"rmac_icmp"},
187         {"rmac_tcp"},
188         {"rmac_udp"},
189         {"rmac_err_drp_udp"},
190         {"rmac_xgmii_err_sym"},
191         {"rmac_frms_q0"},
192         {"rmac_frms_q1"},
193         {"rmac_frms_q2"},
194         {"rmac_frms_q3"},
195         {"rmac_frms_q4"},
196         {"rmac_frms_q5"},
197         {"rmac_frms_q6"},
198         {"rmac_frms_q7"},
199         {"rmac_full_q0"},
200         {"rmac_full_q1"},
201         {"rmac_full_q2"},
202         {"rmac_full_q3"},
203         {"rmac_full_q4"},
204         {"rmac_full_q5"},
205         {"rmac_full_q6"},
206         {"rmac_full_q7"},
207         {"rmac_pause_cnt"},
208         {"rmac_xgmii_data_err_cnt"},
209         {"rmac_xgmii_ctrl_err_cnt"},
210         {"rmac_accepted_ip"},
211         {"rmac_err_tcp"},
212         {"rd_req_cnt"},
213         {"new_rd_req_cnt"},
214         {"new_rd_req_rtry_cnt"},
215         {"rd_rtry_cnt"},
216         {"wr_rtry_rd_ack_cnt"},
217         {"wr_req_cnt"},
218         {"new_wr_req_cnt"},
219         {"new_wr_req_rtry_cnt"},
220         {"wr_rtry_cnt"},
221         {"wr_disc_cnt"},
222         {"rd_rtry_wr_ack_cnt"},
223         {"txp_wr_cnt"},
224         {"txd_rd_cnt"},
225         {"txd_wr_cnt"},
226         {"rxd_rd_cnt"},
227         {"rxd_wr_cnt"},
228         {"txf_rd_cnt"},
229         {"rxf_wr_cnt"}
230 };
231
232 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
233         {"rmac_ttl_1519_4095_frms"},
234         {"rmac_ttl_4096_8191_frms"},
235         {"rmac_ttl_8192_max_frms"},
236         {"rmac_ttl_gt_max_frms"},
237         {"rmac_osized_alt_frms"},
238         {"rmac_jabber_alt_frms"},
239         {"rmac_gt_max_alt_frms"},
240         {"rmac_vlan_frms"},
241         {"rmac_len_discard"},
242         {"rmac_fcs_discard"},
243         {"rmac_pf_discard"},
244         {"rmac_da_discard"},
245         {"rmac_red_discard"},
246         {"rmac_rts_discard"},
247         {"rmac_ingm_full_discard"},
248         {"link_fault_cnt"}
249 };
250
251 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
252         {"\n DRIVER STATISTICS"},
253         {"single_bit_ecc_errs"},
254         {"double_bit_ecc_errs"},
255         {"parity_err_cnt"},
256         {"serious_err_cnt"},
257         {"soft_reset_cnt"},
258         {"fifo_full_cnt"},
259         {"ring_0_full_cnt"},
260         {"ring_1_full_cnt"},
261         {"ring_2_full_cnt"},
262         {"ring_3_full_cnt"},
263         {"ring_4_full_cnt"},
264         {"ring_5_full_cnt"},
265         {"ring_6_full_cnt"},
266         {"ring_7_full_cnt"},
267         {"alarm_transceiver_temp_high"},
268         {"alarm_transceiver_temp_low"},
269         {"alarm_laser_bias_current_high"},
270         {"alarm_laser_bias_current_low"},
271         {"alarm_laser_output_power_high"},
272         {"alarm_laser_output_power_low"},
273         {"warn_transceiver_temp_high"},
274         {"warn_transceiver_temp_low"},
275         {"warn_laser_bias_current_high"},
276         {"warn_laser_bias_current_low"},
277         {"warn_laser_output_power_high"},
278         {"warn_laser_output_power_low"},
279         {"lro_aggregated_pkts"},
280         {"lro_flush_both_count"},
281         {"lro_out_of_sequence_pkts"},
282         {"lro_flush_due_to_max_pkts"},
283         {"lro_avg_aggr_pkts"},
284         {"mem_alloc_fail_cnt"},
285         {"pci_map_fail_cnt"},
286         {"watchdog_timer_cnt"},
287         {"mem_allocated"},
288         {"mem_freed"},
289         {"link_up_cnt"},
290         {"link_down_cnt"},
291         {"link_up_time"},
292         {"link_down_time"},
293         {"tx_tcode_buf_abort_cnt"},
294         {"tx_tcode_desc_abort_cnt"},
295         {"tx_tcode_parity_err_cnt"},
296         {"tx_tcode_link_loss_cnt"},
297         {"tx_tcode_list_proc_err_cnt"},
298         {"rx_tcode_parity_err_cnt"},
299         {"rx_tcode_abort_cnt"},
300         {"rx_tcode_parity_abort_cnt"},
301         {"rx_tcode_rda_fail_cnt"},
302         {"rx_tcode_unkn_prot_cnt"},
303         {"rx_tcode_fcs_err_cnt"},
304         {"rx_tcode_buf_size_err_cnt"},
305         {"rx_tcode_rxd_corrupt_cnt"},
306         {"rx_tcode_unkn_err_cnt"},
307         {"tda_err_cnt"},
308         {"pfc_err_cnt"},
309         {"pcc_err_cnt"},
310         {"tti_err_cnt"},
311         {"tpa_err_cnt"},
312         {"sm_err_cnt"},
313         {"lso_err_cnt"},
314         {"mac_tmac_err_cnt"},
315         {"mac_rmac_err_cnt"},
316         {"xgxs_txgxs_err_cnt"},
317         {"xgxs_rxgxs_err_cnt"},
318         {"rc_err_cnt"},
319         {"prc_pcix_err_cnt"},
320         {"rpa_err_cnt"},
321         {"rda_err_cnt"},
322         {"rti_err_cnt"},
323         {"mc_err_cnt"}
324 };
325
326 #define S2IO_XENA_STAT_LEN      ARRAY_SIZE(ethtool_xena_stats_keys)
327 #define S2IO_ENHANCED_STAT_LEN  ARRAY_SIZE(ethtool_enhanced_stats_keys)
328 #define S2IO_DRIVER_STAT_LEN    ARRAY_SIZE(ethtool_driver_stats_keys)
329
330 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
335
336 #define S2IO_TEST_LEN   ARRAY_SIZE(s2io_gstrings)
337 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
338
339 #define S2IO_TIMER_CONF(timer, handle, arg, exp)                \
340                         init_timer(&timer);                     \
341                         timer.function = handle;                \
342                         timer.data = (unsigned long) arg;       \
343                         mod_timer(&timer, (jiffies + exp))      \
344
345 /* copy mac addr to def_mac_addr array */
346 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347 {
348         sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349         sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350         sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351         sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352         sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353         sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354 }
355 /* Add the vlan */
356 static void s2io_vlan_rx_register(struct net_device *dev,
357                                         struct vlan_group *grp)
358 {
359         int i;
360         struct s2io_nic *nic = dev->priv;
361         unsigned long flags[MAX_TX_FIFOS];
362         struct mac_info *mac_control = &nic->mac_control;
363         struct config_param *config = &nic->config;
364
365         for (i = 0; i < config->tx_fifo_num; i++)
366                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
367
368         nic->vlgrp = grp;
369         for (i = config->tx_fifo_num - 1; i >= 0; i--)
370                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371                                 flags[i]);
372 }
373
374 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
375 static int vlan_strip_flag;
376
377 /* Unregister the vlan */
378 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379 {
380         int i;
381         struct s2io_nic *nic = dev->priv;
382         unsigned long flags[MAX_TX_FIFOS];
383         struct mac_info *mac_control = &nic->mac_control;
384         struct config_param *config = &nic->config;
385
386         for (i = 0; i < config->tx_fifo_num; i++)
387                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389         if (nic->vlgrp)
390                 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392         for (i = config->tx_fifo_num - 1; i >= 0; i--)
393                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394                         flags[i]);
395 }
396
397 /*
398  * Constants to be programmed into the Xena's registers, to configure
399  * the XAUI.
400  */
401
402 #define END_SIGN        0x0
403 static const u64 herc_act_dtx_cfg[] = {
404         /* Set address */
405         0x8000051536750000ULL, 0x80000515367500E0ULL,
406         /* Write data */
407         0x8000051536750004ULL, 0x80000515367500E4ULL,
408         /* Set address */
409         0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410         /* Write data */
411         0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412         /* Set address */
413         0x801205150D440000ULL, 0x801205150D4400E0ULL,
414         /* Write data */
415         0x801205150D440004ULL, 0x801205150D4400E4ULL,
416         /* Set address */
417         0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418         /* Write data */
419         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420         /* Done */
421         END_SIGN
422 };
423
424 static const u64 xena_dtx_cfg[] = {
425         /* Set address */
426         0x8000051500000000ULL, 0x80000515000000E0ULL,
427         /* Write data */
428         0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429         /* Set address */
430         0x8001051500000000ULL, 0x80010515000000E0ULL,
431         /* Write data */
432         0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433         /* Set address */
434         0x8002051500000000ULL, 0x80020515000000E0ULL,
435         /* Write data */
436         0x80020515F2100004ULL, 0x80020515F21000E4ULL,
437         END_SIGN
438 };
439
440 /*
441  * Constants for Fixing the MacAddress problem seen mostly on
442  * Alpha machines.
443  */
444 static const u64 fix_mac[] = {
445         0x0060000000000000ULL, 0x0060600000000000ULL,
446         0x0040600000000000ULL, 0x0000600000000000ULL,
447         0x0020600000000000ULL, 0x0060600000000000ULL,
448         0x0020600000000000ULL, 0x0060600000000000ULL,
449         0x0020600000000000ULL, 0x0060600000000000ULL,
450         0x0020600000000000ULL, 0x0060600000000000ULL,
451         0x0020600000000000ULL, 0x0060600000000000ULL,
452         0x0020600000000000ULL, 0x0060600000000000ULL,
453         0x0020600000000000ULL, 0x0060600000000000ULL,
454         0x0020600000000000ULL, 0x0060600000000000ULL,
455         0x0020600000000000ULL, 0x0060600000000000ULL,
456         0x0020600000000000ULL, 0x0060600000000000ULL,
457         0x0020600000000000ULL, 0x0000600000000000ULL,
458         0x0040600000000000ULL, 0x0060600000000000ULL,
459         END_SIGN
460 };
461
462 MODULE_LICENSE("GPL");
463 MODULE_VERSION(DRV_VERSION);
464
465
466 /* Module Loadable parameters. */
467 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
468 S2IO_PARM_INT(rx_ring_num, 1);
469 S2IO_PARM_INT(multiq, 0);
470 S2IO_PARM_INT(rx_ring_mode, 1);
471 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472 S2IO_PARM_INT(rmac_pause_time, 0x100);
473 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475 S2IO_PARM_INT(shared_splits, 0);
476 S2IO_PARM_INT(tmac_util_period, 5);
477 S2IO_PARM_INT(rmac_util_period, 5);
478 S2IO_PARM_INT(l3l4hdr_size, 128);
479 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
481 /* Frequency of Rx desc syncs expressed as power of 2 */
482 S2IO_PARM_INT(rxsync_frequency, 3);
483 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
484 S2IO_PARM_INT(intr_type, 2);
485 /* Large receive offload feature */
486 static unsigned int lro_enable;
487 module_param_named(lro, lro_enable, uint, 0);
488
489 /* Max pkts to be aggregated by LRO at one time. If not specified,
490  * aggregation happens until we hit max IP pkt size(64K)
491  */
492 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
493 S2IO_PARM_INT(indicate_max_pkts, 0);
494
495 S2IO_PARM_INT(napi, 1);
496 S2IO_PARM_INT(ufo, 0);
497 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
498
499 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500     {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502     {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503 static unsigned int rts_frm_len[MAX_RX_RINGS] =
504     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506 module_param_array(tx_fifo_len, uint, NULL, 0);
507 module_param_array(rx_ring_sz, uint, NULL, 0);
508 module_param_array(rts_frm_len, uint, NULL, 0);
509
510 /*
511  * S2IO device table.
512  * This table lists all the devices that this driver supports.
513  */
514 static struct pci_device_id s2io_tbl[] __devinitdata = {
515         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516          PCI_ANY_ID, PCI_ANY_ID},
517         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518          PCI_ANY_ID, PCI_ANY_ID},
519         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
520          PCI_ANY_ID, PCI_ANY_ID},
521         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522          PCI_ANY_ID, PCI_ANY_ID},
523         {0,}
524 };
525
526 MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
528 static struct pci_error_handlers s2io_err_handler = {
529         .error_detected = s2io_io_error_detected,
530         .slot_reset = s2io_io_slot_reset,
531         .resume = s2io_io_resume,
532 };
533
534 static struct pci_driver s2io_driver = {
535       .name = "S2IO",
536       .id_table = s2io_tbl,
537       .probe = s2io_init_nic,
538       .remove = __devexit_p(s2io_rem_nic),
539       .err_handler = &s2io_err_handler,
540 };
541
542 /* A simplifier macro used both by init and free shared_mem Fns(). */
543 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
545 /* netqueue manipulation helper functions */
546 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547 {
548         int i;
549 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
550         if (sp->config.multiq) {
551                 for (i = 0; i < sp->config.tx_fifo_num; i++)
552                         netif_stop_subqueue(sp->dev, i);
553         } else
554 #endif
555         {
556                 for (i = 0; i < sp->config.tx_fifo_num; i++)
557                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
558                 netif_stop_queue(sp->dev);
559         }
560 }
561
562 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
563 {
564 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
565         if (sp->config.multiq)
566                 netif_stop_subqueue(sp->dev, fifo_no);
567         else
568 #endif
569         {
570                 sp->mac_control.fifos[fifo_no].queue_state =
571                         FIFO_QUEUE_STOP;
572                 netif_stop_queue(sp->dev);
573         }
574 }
575
576 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
577 {
578         int i;
579 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
580         if (sp->config.multiq) {
581                 for (i = 0; i < sp->config.tx_fifo_num; i++)
582                         netif_start_subqueue(sp->dev, i);
583         } else
584 #endif
585         {
586                 for (i = 0; i < sp->config.tx_fifo_num; i++)
587                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
588                 netif_start_queue(sp->dev);
589         }
590 }
591
592 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
593 {
594 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
595         if (sp->config.multiq)
596                 netif_start_subqueue(sp->dev, fifo_no);
597         else
598 #endif
599         {
600                 sp->mac_control.fifos[fifo_no].queue_state =
601                         FIFO_QUEUE_START;
602                 netif_start_queue(sp->dev);
603         }
604 }
605
606 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
607 {
608         int i;
609 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
610         if (sp->config.multiq) {
611                 for (i = 0; i < sp->config.tx_fifo_num; i++)
612                         netif_wake_subqueue(sp->dev, i);
613         } else
614 #endif
615         {
616                 for (i = 0; i < sp->config.tx_fifo_num; i++)
617                         sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
618                 netif_wake_queue(sp->dev);
619         }
620 }
621
622 static inline void s2io_wake_tx_queue(
623         struct fifo_info *fifo, int cnt, u8 multiq)
624 {
625
626 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
627         if (multiq) {
628                 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
629                         netif_wake_subqueue(fifo->dev, fifo->fifo_no);
630         } else
631 #endif
632         if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
633                 if (netif_queue_stopped(fifo->dev)) {
634                         fifo->queue_state = FIFO_QUEUE_START;
635                         netif_wake_queue(fifo->dev);
636                 }
637         }
638 }
639
640 /**
641  * init_shared_mem - Allocation and Initialization of Memory
642  * @nic: Device private variable.
643  * Description: The function allocates all the memory areas shared
644  * between the NIC and the driver. This includes Tx descriptors,
645  * Rx descriptors and the statistics block.
646  */
647
648 static int init_shared_mem(struct s2io_nic *nic)
649 {
650         u32 size;
651         void *tmp_v_addr, *tmp_v_addr_next;
652         dma_addr_t tmp_p_addr, tmp_p_addr_next;
653         struct RxD_block *pre_rxd_blk = NULL;
654         int i, j, blk_cnt;
655         int lst_size, lst_per_page;
656         struct net_device *dev = nic->dev;
657         unsigned long tmp;
658         struct buffAdd *ba;
659
660         struct mac_info *mac_control;
661         struct config_param *config;
662         unsigned long long mem_allocated = 0;
663
664         mac_control = &nic->mac_control;
665         config = &nic->config;
666
667
668         /* Allocation and initialization of TXDLs in FIOFs */
669         size = 0;
670         for (i = 0; i < config->tx_fifo_num; i++) {
671                 size += config->tx_cfg[i].fifo_len;
672         }
673         if (size > MAX_AVAILABLE_TXDS) {
674                 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
675                 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
676                 return -EINVAL;
677         }
678
679         size = 0;
680         for (i = 0; i < config->tx_fifo_num; i++) {
681                 size = config->tx_cfg[i].fifo_len;
682                 /*
683                  * Legal values are from 2 to 8192
684                  */
685                 if (size < 2) {
686                         DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
687                         DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
688                         DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
689                                 "are 2 to 8192\n");
690                         return -EINVAL;
691                 }
692         }
693
694         lst_size = (sizeof(struct TxD) * config->max_txds);
695         lst_per_page = PAGE_SIZE / lst_size;
696
697         for (i = 0; i < config->tx_fifo_num; i++) {
698                 int fifo_len = config->tx_cfg[i].fifo_len;
699                 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
700                 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
701                                                           GFP_KERNEL);
702                 if (!mac_control->fifos[i].list_info) {
703                         DBG_PRINT(INFO_DBG,
704                                   "Malloc failed for list_info\n");
705                         return -ENOMEM;
706                 }
707                 mem_allocated += list_holder_size;
708         }
709         for (i = 0; i < config->tx_fifo_num; i++) {
710                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
711                                                 lst_per_page);
712                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
713                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
714                     config->tx_cfg[i].fifo_len - 1;
715                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
716                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
717                     config->tx_cfg[i].fifo_len - 1;
718                 mac_control->fifos[i].fifo_no = i;
719                 mac_control->fifos[i].nic = nic;
720                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
721                 mac_control->fifos[i].dev = dev;
722
723                 for (j = 0; j < page_num; j++) {
724                         int k = 0;
725                         dma_addr_t tmp_p;
726                         void *tmp_v;
727                         tmp_v = pci_alloc_consistent(nic->pdev,
728                                                      PAGE_SIZE, &tmp_p);
729                         if (!tmp_v) {
730                                 DBG_PRINT(INFO_DBG,
731                                           "pci_alloc_consistent ");
732                                 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
733                                 return -ENOMEM;
734                         }
735                         /* If we got a zero DMA address(can happen on
736                          * certain platforms like PPC), reallocate.
737                          * Store virtual address of page we don't want,
738                          * to be freed later.
739                          */
740                         if (!tmp_p) {
741                                 mac_control->zerodma_virt_addr = tmp_v;
742                                 DBG_PRINT(INIT_DBG,
743                                 "%s: Zero DMA address for TxDL. ", dev->name);
744                                 DBG_PRINT(INIT_DBG,
745                                 "Virtual address %p\n", tmp_v);
746                                 tmp_v = pci_alloc_consistent(nic->pdev,
747                                                      PAGE_SIZE, &tmp_p);
748                                 if (!tmp_v) {
749                                         DBG_PRINT(INFO_DBG,
750                                           "pci_alloc_consistent ");
751                                         DBG_PRINT(INFO_DBG, "failed for TxDL\n");
752                                         return -ENOMEM;
753                                 }
754                                 mem_allocated += PAGE_SIZE;
755                         }
756                         while (k < lst_per_page) {
757                                 int l = (j * lst_per_page) + k;
758                                 if (l == config->tx_cfg[i].fifo_len)
759                                         break;
760                                 mac_control->fifos[i].list_info[l].list_virt_addr =
761                                     tmp_v + (k * lst_size);
762                                 mac_control->fifos[i].list_info[l].list_phy_addr =
763                                     tmp_p + (k * lst_size);
764                                 k++;
765                         }
766                 }
767         }
768
769         for (i = 0; i < config->tx_fifo_num; i++) {
770                 size = config->tx_cfg[i].fifo_len;
771                 mac_control->fifos[i].ufo_in_band_v
772                         = kcalloc(size, sizeof(u64), GFP_KERNEL);
773                 if (!mac_control->fifos[i].ufo_in_band_v)
774                         return -ENOMEM;
775                 mem_allocated += (size * sizeof(u64));
776         }
777
778         /* Allocation and initialization of RXDs in Rings */
779         size = 0;
780         for (i = 0; i < config->rx_ring_num; i++) {
781                 if (config->rx_cfg[i].num_rxd %
782                     (rxd_count[nic->rxd_mode] + 1)) {
783                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
784                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
785                                   i);
786                         DBG_PRINT(ERR_DBG, "RxDs per Block");
787                         return FAILURE;
788                 }
789                 size += config->rx_cfg[i].num_rxd;
790                 mac_control->rings[i].block_count =
791                         config->rx_cfg[i].num_rxd /
792                         (rxd_count[nic->rxd_mode] + 1 );
793                 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
794                         mac_control->rings[i].block_count;
795         }
796         if (nic->rxd_mode == RXD_MODE_1)
797                 size = (size * (sizeof(struct RxD1)));
798         else
799                 size = (size * (sizeof(struct RxD3)));
800
801         for (i = 0; i < config->rx_ring_num; i++) {
802                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
803                 mac_control->rings[i].rx_curr_get_info.offset = 0;
804                 mac_control->rings[i].rx_curr_get_info.ring_len =
805                     config->rx_cfg[i].num_rxd - 1;
806                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
807                 mac_control->rings[i].rx_curr_put_info.offset = 0;
808                 mac_control->rings[i].rx_curr_put_info.ring_len =
809                     config->rx_cfg[i].num_rxd - 1;
810                 mac_control->rings[i].nic = nic;
811                 mac_control->rings[i].ring_no = i;
812                 mac_control->rings[i].lro = lro_enable;
813
814                 blk_cnt = config->rx_cfg[i].num_rxd /
815                                 (rxd_count[nic->rxd_mode] + 1);
816                 /*  Allocating all the Rx blocks */
817                 for (j = 0; j < blk_cnt; j++) {
818                         struct rx_block_info *rx_blocks;
819                         int l;
820
821                         rx_blocks = &mac_control->rings[i].rx_blocks[j];
822                         size = SIZE_OF_BLOCK; //size is always page size
823                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
824                                                           &tmp_p_addr);
825                         if (tmp_v_addr == NULL) {
826                                 /*
827                                  * In case of failure, free_shared_mem()
828                                  * is called, which should free any
829                                  * memory that was alloced till the
830                                  * failure happened.
831                                  */
832                                 rx_blocks->block_virt_addr = tmp_v_addr;
833                                 return -ENOMEM;
834                         }
835                         mem_allocated += size;
836                         memset(tmp_v_addr, 0, size);
837                         rx_blocks->block_virt_addr = tmp_v_addr;
838                         rx_blocks->block_dma_addr = tmp_p_addr;
839                         rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
840                                                   rxd_count[nic->rxd_mode],
841                                                   GFP_KERNEL);
842                         if (!rx_blocks->rxds)
843                                 return -ENOMEM;
844                         mem_allocated +=
845                         (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
846                         for (l=0; l<rxd_count[nic->rxd_mode];l++) {
847                                 rx_blocks->rxds[l].virt_addr =
848                                         rx_blocks->block_virt_addr +
849                                         (rxd_size[nic->rxd_mode] * l);
850                                 rx_blocks->rxds[l].dma_addr =
851                                         rx_blocks->block_dma_addr +
852                                         (rxd_size[nic->rxd_mode] * l);
853                         }
854                 }
855                 /* Interlinking all Rx Blocks */
856                 for (j = 0; j < blk_cnt; j++) {
857                         tmp_v_addr =
858                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
859                         tmp_v_addr_next =
860                                 mac_control->rings[i].rx_blocks[(j + 1) %
861                                               blk_cnt].block_virt_addr;
862                         tmp_p_addr =
863                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
864                         tmp_p_addr_next =
865                                 mac_control->rings[i].rx_blocks[(j + 1) %
866                                               blk_cnt].block_dma_addr;
867
868                         pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
869                         pre_rxd_blk->reserved_2_pNext_RxD_block =
870                             (unsigned long) tmp_v_addr_next;
871                         pre_rxd_blk->pNext_RxD_Blk_physical =
872                             (u64) tmp_p_addr_next;
873                 }
874         }
875         if (nic->rxd_mode == RXD_MODE_3B) {
876                 /*
877                  * Allocation of Storages for buffer addresses in 2BUFF mode
878                  * and the buffers as well.
879                  */
880                 for (i = 0; i < config->rx_ring_num; i++) {
881                         blk_cnt = config->rx_cfg[i].num_rxd /
882                            (rxd_count[nic->rxd_mode]+ 1);
883                         mac_control->rings[i].ba =
884                                 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
885                                      GFP_KERNEL);
886                         if (!mac_control->rings[i].ba)
887                                 return -ENOMEM;
888                         mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
889                         for (j = 0; j < blk_cnt; j++) {
890                                 int k = 0;
891                                 mac_control->rings[i].ba[j] =
892                                         kmalloc((sizeof(struct buffAdd) *
893                                                 (rxd_count[nic->rxd_mode] + 1)),
894                                                 GFP_KERNEL);
895                                 if (!mac_control->rings[i].ba[j])
896                                         return -ENOMEM;
897                                 mem_allocated += (sizeof(struct buffAdd) *  \
898                                         (rxd_count[nic->rxd_mode] + 1));
899                                 while (k != rxd_count[nic->rxd_mode]) {
900                                         ba = &mac_control->rings[i].ba[j][k];
901
902                                         ba->ba_0_org = (void *) kmalloc
903                                             (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
904                                         if (!ba->ba_0_org)
905                                                 return -ENOMEM;
906                                         mem_allocated +=
907                                                 (BUF0_LEN + ALIGN_SIZE);
908                                         tmp = (unsigned long)ba->ba_0_org;
909                                         tmp += ALIGN_SIZE;
910                                         tmp &= ~((unsigned long) ALIGN_SIZE);
911                                         ba->ba_0 = (void *) tmp;
912
913                                         ba->ba_1_org = (void *) kmalloc
914                                             (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
915                                         if (!ba->ba_1_org)
916                                                 return -ENOMEM;
917                                         mem_allocated
918                                                 += (BUF1_LEN + ALIGN_SIZE);
919                                         tmp = (unsigned long) ba->ba_1_org;
920                                         tmp += ALIGN_SIZE;
921                                         tmp &= ~((unsigned long) ALIGN_SIZE);
922                                         ba->ba_1 = (void *) tmp;
923                                         k++;
924                                 }
925                         }
926                 }
927         }
928
929         /* Allocation and initialization of Statistics block */
930         size = sizeof(struct stat_block);
931         mac_control->stats_mem = pci_alloc_consistent
932             (nic->pdev, size, &mac_control->stats_mem_phy);
933
934         if (!mac_control->stats_mem) {
935                 /*
936                  * In case of failure, free_shared_mem() is called, which
937                  * should free any memory that was alloced till the
938                  * failure happened.
939                  */
940                 return -ENOMEM;
941         }
942         mem_allocated += size;
943         mac_control->stats_mem_sz = size;
944
945         tmp_v_addr = mac_control->stats_mem;
946         mac_control->stats_info = (struct stat_block *) tmp_v_addr;
947         memset(tmp_v_addr, 0, size);
948         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
949                   (unsigned long long) tmp_p_addr);
950         mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
951         return SUCCESS;
952 }
953
954 /**
955  * free_shared_mem - Free the allocated Memory
956  * @nic:  Device private variable.
957  * Description: This function is to free all memory locations allocated by
958  * the init_shared_mem() function and return it to the kernel.
959  */
960
961 static void free_shared_mem(struct s2io_nic *nic)
962 {
963         int i, j, blk_cnt, size;
964         void *tmp_v_addr;
965         dma_addr_t tmp_p_addr;
966         struct mac_info *mac_control;
967         struct config_param *config;
968         int lst_size, lst_per_page;
969         struct net_device *dev;
970         int page_num = 0;
971
972         if (!nic)
973                 return;
974
975         dev = nic->dev;
976
977         mac_control = &nic->mac_control;
978         config = &nic->config;
979
980         lst_size = (sizeof(struct TxD) * config->max_txds);
981         lst_per_page = PAGE_SIZE / lst_size;
982
983         for (i = 0; i < config->tx_fifo_num; i++) {
984                 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
985                                                         lst_per_page);
986                 for (j = 0; j < page_num; j++) {
987                         int mem_blks = (j * lst_per_page);
988                         if (!mac_control->fifos[i].list_info)
989                                 return;
990                         if (!mac_control->fifos[i].list_info[mem_blks].
991                                  list_virt_addr)
992                                 break;
993                         pci_free_consistent(nic->pdev, PAGE_SIZE,
994                                             mac_control->fifos[i].
995                                             list_info[mem_blks].
996                                             list_virt_addr,
997                                             mac_control->fifos[i].
998                                             list_info[mem_blks].
999                                             list_phy_addr);
1000                         nic->mac_control.stats_info->sw_stat.mem_freed
1001                                                 += PAGE_SIZE;
1002                 }
1003                 /* If we got a zero DMA address during allocation,
1004                  * free the page now
1005                  */
1006                 if (mac_control->zerodma_virt_addr) {
1007                         pci_free_consistent(nic->pdev, PAGE_SIZE,
1008                                             mac_control->zerodma_virt_addr,
1009                                             (dma_addr_t)0);
1010                         DBG_PRINT(INIT_DBG,
1011                                 "%s: Freeing TxDL with zero DMA addr. ",
1012                                 dev->name);
1013                         DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1014                                 mac_control->zerodma_virt_addr);
1015                         nic->mac_control.stats_info->sw_stat.mem_freed
1016                                                 += PAGE_SIZE;
1017                 }
1018                 kfree(mac_control->fifos[i].list_info);
1019                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1020                 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1021         }
1022
1023         size = SIZE_OF_BLOCK;
1024         for (i = 0; i < config->rx_ring_num; i++) {
1025                 blk_cnt = mac_control->rings[i].block_count;
1026                 for (j = 0; j < blk_cnt; j++) {
1027                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1028                                 block_virt_addr;
1029                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1030                                 block_dma_addr;
1031                         if (tmp_v_addr == NULL)
1032                                 break;
1033                         pci_free_consistent(nic->pdev, size,
1034                                             tmp_v_addr, tmp_p_addr);
1035                         nic->mac_control.stats_info->sw_stat.mem_freed += size;
1036                         kfree(mac_control->rings[i].rx_blocks[j].rxds);
1037                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1038                         ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1039                 }
1040         }
1041
1042         if (nic->rxd_mode == RXD_MODE_3B) {
1043                 /* Freeing buffer storage addresses in 2BUFF mode. */
1044                 for (i = 0; i < config->rx_ring_num; i++) {
1045                         blk_cnt = config->rx_cfg[i].num_rxd /
1046                             (rxd_count[nic->rxd_mode] + 1);
1047                         for (j = 0; j < blk_cnt; j++) {
1048                                 int k = 0;
1049                                 if (!mac_control->rings[i].ba[j])
1050                                         continue;
1051                                 while (k != rxd_count[nic->rxd_mode]) {
1052                                         struct buffAdd *ba =
1053                                                 &mac_control->rings[i].ba[j][k];
1054                                         kfree(ba->ba_0_org);
1055                                         nic->mac_control.stats_info->sw_stat.\
1056                                         mem_freed += (BUF0_LEN + ALIGN_SIZE);
1057                                         kfree(ba->ba_1_org);
1058                                         nic->mac_control.stats_info->sw_stat.\
1059                                         mem_freed += (BUF1_LEN + ALIGN_SIZE);
1060                                         k++;
1061                                 }
1062                                 kfree(mac_control->rings[i].ba[j]);
1063                                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1064                                         (sizeof(struct buffAdd) *
1065                                         (rxd_count[nic->rxd_mode] + 1));
1066                         }
1067                         kfree(mac_control->rings[i].ba);
1068                         nic->mac_control.stats_info->sw_stat.mem_freed +=
1069                         (sizeof(struct buffAdd *) * blk_cnt);
1070                 }
1071         }
1072
1073         for (i = 0; i < nic->config.tx_fifo_num; i++) {
1074                 if (mac_control->fifos[i].ufo_in_band_v) {
1075                         nic->mac_control.stats_info->sw_stat.mem_freed
1076                                 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1077                         kfree(mac_control->fifos[i].ufo_in_band_v);
1078                 }
1079         }
1080
1081         if (mac_control->stats_mem) {
1082                 nic->mac_control.stats_info->sw_stat.mem_freed +=
1083                         mac_control->stats_mem_sz;
1084                 pci_free_consistent(nic->pdev,
1085                                     mac_control->stats_mem_sz,
1086                                     mac_control->stats_mem,
1087                                     mac_control->stats_mem_phy);
1088         }
1089 }
1090
1091 /**
1092  * s2io_verify_pci_mode -
1093  */
1094
1095 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1096 {
1097         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1098         register u64 val64 = 0;
1099         int     mode;
1100
1101         val64 = readq(&bar0->pci_mode);
1102         mode = (u8)GET_PCI_MODE(val64);
1103
1104         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1105                 return -1;      /* Unknown PCI mode */
1106         return mode;
1107 }
1108
1109 #define NEC_VENID   0x1033
1110 #define NEC_DEVID   0x0125
1111 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1112 {
1113         struct pci_dev *tdev = NULL;
1114         while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1115                 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1116                         if (tdev->bus == s2io_pdev->bus->parent) {
1117                                 pci_dev_put(tdev);
1118                                 return 1;
1119                         }
1120                 }
1121         }
1122         return 0;
1123 }
1124
1125 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1126 /**
1127  * s2io_print_pci_mode -
1128  */
1129 static int s2io_print_pci_mode(struct s2io_nic *nic)
1130 {
1131         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1132         register u64 val64 = 0;
1133         int     mode;
1134         struct config_param *config = &nic->config;
1135
1136         val64 = readq(&bar0->pci_mode);
1137         mode = (u8)GET_PCI_MODE(val64);
1138
1139         if ( val64 & PCI_MODE_UNKNOWN_MODE)
1140                 return -1;      /* Unknown PCI mode */
1141
1142         config->bus_speed = bus_speed[mode];
1143
1144         if (s2io_on_nec_bridge(nic->pdev)) {
1145                 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1146                                                         nic->dev->name);
1147                 return mode;
1148         }
1149
1150         if (val64 & PCI_MODE_32_BITS) {
1151                 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1152         } else {
1153                 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1154         }
1155
1156         switch(mode) {
1157                 case PCI_MODE_PCI_33:
1158                         DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1159                         break;
1160                 case PCI_MODE_PCI_66:
1161                         DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1162                         break;
1163                 case PCI_MODE_PCIX_M1_66:
1164                         DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1165                         break;
1166                 case PCI_MODE_PCIX_M1_100:
1167                         DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1168                         break;
1169                 case PCI_MODE_PCIX_M1_133:
1170                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1171                         break;
1172                 case PCI_MODE_PCIX_M2_66:
1173                         DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1174                         break;
1175                 case PCI_MODE_PCIX_M2_100:
1176                         DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1177                         break;
1178                 case PCI_MODE_PCIX_M2_133:
1179                         DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1180                         break;
1181                 default:
1182                         return -1;      /* Unsupported bus speed */
1183         }
1184
1185         return mode;
1186 }
1187
1188 /**
1189  *  init_tti - Initialization transmit traffic interrupt scheme
1190  *  @nic: device private variable
1191  *  @link: link status (UP/DOWN) used to enable/disable continuous
1192  *  transmit interrupts
1193  *  Description: The function configures transmit traffic interrupts
1194  *  Return Value:  SUCCESS on success and
1195  *  '-1' on failure
1196  */
1197
1198 static int init_tti(struct s2io_nic *nic, int link)
1199 {
1200         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1201         register u64 val64 = 0;
1202         int i;
1203         struct config_param *config;
1204
1205         config = &nic->config;
1206
1207         for (i = 0; i < config->tx_fifo_num; i++) {
1208                 /*
1209                  * TTI Initialization. Default Tx timer gets us about
1210                  * 250 interrupts per sec. Continuous interrupts are enabled
1211                  * by default.
1212                  */
1213                 if (nic->device_type == XFRAME_II_DEVICE) {
1214                         int count = (nic->config.bus_speed * 125)/2;
1215                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1216                 } else
1217                         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1218
1219                 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1220                                 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1221                                 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1222                                 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1223
1224                 if (use_continuous_tx_intrs && (link == LINK_UP))
1225                         val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1226                 writeq(val64, &bar0->tti_data1_mem);
1227
1228                 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1229                                 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1230                                 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1231                                 TTI_DATA2_MEM_TX_UFC_D(0x80);
1232
1233                 writeq(val64, &bar0->tti_data2_mem);
1234
1235                 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236                                 TTI_CMD_MEM_OFFSET(i);
1237                 writeq(val64, &bar0->tti_command_mem);
1238
1239                 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240                         TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1241                         return FAILURE;
1242         }
1243
1244         return SUCCESS;
1245 }
1246
1247 /**
1248  *  init_nic - Initialization of hardware
1249  *  @nic: device private variable
1250  *  Description: The function sequentially configures every block
1251  *  of the H/W from their reset values.
1252  *  Return Value:  SUCCESS on success and
1253  *  '-1' on failure (endian settings incorrect).
1254  */
1255
1256 static int init_nic(struct s2io_nic *nic)
1257 {
1258         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1259         struct net_device *dev = nic->dev;
1260         register u64 val64 = 0;
1261         void __iomem *add;
1262         u32 time;
1263         int i, j;
1264         struct mac_info *mac_control;
1265         struct config_param *config;
1266         int dtx_cnt = 0;
1267         unsigned long long mem_share;
1268         int mem_size;
1269
1270         mac_control = &nic->mac_control;
1271         config = &nic->config;
1272
1273         /* to set the swapper controle on the card */
1274         if(s2io_set_swapper(nic)) {
1275                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1276                 return -EIO;
1277         }
1278
1279         /*
1280          * Herc requires EOI to be removed from reset before XGXS, so..
1281          */
1282         if (nic->device_type & XFRAME_II_DEVICE) {
1283                 val64 = 0xA500000000ULL;
1284                 writeq(val64, &bar0->sw_reset);
1285                 msleep(500);
1286                 val64 = readq(&bar0->sw_reset);
1287         }
1288
1289         /* Remove XGXS from reset state */
1290         val64 = 0;
1291         writeq(val64, &bar0->sw_reset);
1292         msleep(500);
1293         val64 = readq(&bar0->sw_reset);
1294
1295         /* Ensure that it's safe to access registers by checking
1296          * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1297          */
1298         if (nic->device_type == XFRAME_II_DEVICE) {
1299                 for (i = 0; i < 50; i++) {
1300                         val64 = readq(&bar0->adapter_status);
1301                         if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1302                                 break;
1303                         msleep(10);
1304                 }
1305                 if (i == 50)
1306                         return -ENODEV;
1307         }
1308
1309         /*  Enable Receiving broadcasts */
1310         add = &bar0->mac_cfg;
1311         val64 = readq(&bar0->mac_cfg);
1312         val64 |= MAC_RMAC_BCAST_ENABLE;
1313         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314         writel((u32) val64, add);
1315         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316         writel((u32) (val64 >> 32), (add + 4));
1317
1318         /* Read registers in all blocks */
1319         val64 = readq(&bar0->mac_int_mask);
1320         val64 = readq(&bar0->mc_int_mask);
1321         val64 = readq(&bar0->xgxs_int_mask);
1322
1323         /*  Set MTU */
1324         val64 = dev->mtu;
1325         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1326
1327         if (nic->device_type & XFRAME_II_DEVICE) {
1328                 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1329                         SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1330                                           &bar0->dtx_control, UF);
1331                         if (dtx_cnt & 0x1)
1332                                 msleep(1); /* Necessary!! */
1333                         dtx_cnt++;
1334                 }
1335         } else {
1336                 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337                         SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338                                           &bar0->dtx_control, UF);
1339                         val64 = readq(&bar0->dtx_control);
1340                         dtx_cnt++;
1341                 }
1342         }
1343
1344         /*  Tx DMA Initialization */
1345         val64 = 0;
1346         writeq(val64, &bar0->tx_fifo_partition_0);
1347         writeq(val64, &bar0->tx_fifo_partition_1);
1348         writeq(val64, &bar0->tx_fifo_partition_2);
1349         writeq(val64, &bar0->tx_fifo_partition_3);
1350
1351
1352         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1353                 val64 |=
1354                     vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1355                          13) | vBIT(config->tx_cfg[i].fifo_priority,
1356                                     ((j * 32) + 5), 3);
1357
1358                 if (i == (config->tx_fifo_num - 1)) {
1359                         if (i % 2 == 0)
1360                                 i++;
1361                 }
1362
1363                 switch (i) {
1364                 case 1:
1365                         writeq(val64, &bar0->tx_fifo_partition_0);
1366                         val64 = 0;
1367                         j = 0;
1368                         break;
1369                 case 3:
1370                         writeq(val64, &bar0->tx_fifo_partition_1);
1371                         val64 = 0;
1372                         j = 0;
1373                         break;
1374                 case 5:
1375                         writeq(val64, &bar0->tx_fifo_partition_2);
1376                         val64 = 0;
1377                         j = 0;
1378                         break;
1379                 case 7:
1380                         writeq(val64, &bar0->tx_fifo_partition_3);
1381                         val64 = 0;
1382                         j = 0;
1383                         break;
1384                 default:
1385                         j++;
1386                         break;
1387                 }
1388         }
1389
1390         /*
1391          * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392          * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1393          */
1394         if ((nic->device_type == XFRAME_I_DEVICE) &&
1395                 (nic->pdev->revision < 4))
1396                 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1397
1398         val64 = readq(&bar0->tx_fifo_partition_0);
1399         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1401
1402         /*
1403          * Initialization of Tx_PA_CONFIG register to ignore packet
1404          * integrity checking.
1405          */
1406         val64 = readq(&bar0->tx_pa_cfg);
1407         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409         writeq(val64, &bar0->tx_pa_cfg);
1410
1411         /* Rx DMA intialization. */
1412         val64 = 0;
1413         for (i = 0; i < config->rx_ring_num; i++) {
1414                 val64 |=
1415                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1416                          3);
1417         }
1418         writeq(val64, &bar0->rx_queue_priority);
1419
1420         /*
1421          * Allocating equal share of memory to all the
1422          * configured Rings.
1423          */
1424         val64 = 0;
1425         if (nic->device_type & XFRAME_II_DEVICE)
1426                 mem_size = 32;
1427         else
1428                 mem_size = 64;
1429
1430         for (i = 0; i < config->rx_ring_num; i++) {
1431                 switch (i) {
1432                 case 0:
1433                         mem_share = (mem_size / config->rx_ring_num +
1434                                      mem_size % config->rx_ring_num);
1435                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1436                         continue;
1437                 case 1:
1438                         mem_share = (mem_size / config->rx_ring_num);
1439                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1440                         continue;
1441                 case 2:
1442                         mem_share = (mem_size / config->rx_ring_num);
1443                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1444                         continue;
1445                 case 3:
1446                         mem_share = (mem_size / config->rx_ring_num);
1447                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1448                         continue;
1449                 case 4:
1450                         mem_share = (mem_size / config->rx_ring_num);
1451                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1452                         continue;
1453                 case 5:
1454                         mem_share = (mem_size / config->rx_ring_num);
1455                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1456                         continue;
1457                 case 6:
1458                         mem_share = (mem_size / config->rx_ring_num);
1459                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1460                         continue;
1461                 case 7:
1462                         mem_share = (mem_size / config->rx_ring_num);
1463                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1464                         continue;
1465                 }
1466         }
1467         writeq(val64, &bar0->rx_queue_cfg);
1468
1469         /*
1470          * Filling Tx round robin registers
1471          * as per the number of FIFOs for equal scheduling priority
1472          */
1473         switch (config->tx_fifo_num) {
1474         case 1:
1475                 val64 = 0x0;
1476                 writeq(val64, &bar0->tx_w_round_robin_0);
1477                 writeq(val64, &bar0->tx_w_round_robin_1);
1478                 writeq(val64, &bar0->tx_w_round_robin_2);
1479                 writeq(val64, &bar0->tx_w_round_robin_3);
1480                 writeq(val64, &bar0->tx_w_round_robin_4);
1481                 break;
1482         case 2:
1483                 val64 = 0x0001000100010001ULL;
1484                 writeq(val64, &bar0->tx_w_round_robin_0);
1485                 writeq(val64, &bar0->tx_w_round_robin_1);
1486                 writeq(val64, &bar0->tx_w_round_robin_2);
1487                 writeq(val64, &bar0->tx_w_round_robin_3);
1488                 val64 = 0x0001000100000000ULL;
1489                 writeq(val64, &bar0->tx_w_round_robin_4);
1490                 break;
1491         case 3:
1492                 val64 = 0x0001020001020001ULL;
1493                 writeq(val64, &bar0->tx_w_round_robin_0);
1494                 val64 = 0x0200010200010200ULL;
1495                 writeq(val64, &bar0->tx_w_round_robin_1);
1496                 val64 = 0x0102000102000102ULL;
1497                 writeq(val64, &bar0->tx_w_round_robin_2);
1498                 val64 = 0x0001020001020001ULL;
1499                 writeq(val64, &bar0->tx_w_round_robin_3);
1500                 val64 = 0x0200010200000000ULL;
1501                 writeq(val64, &bar0->tx_w_round_robin_4);
1502                 break;
1503         case 4:
1504                 val64 = 0x0001020300010203ULL;
1505                 writeq(val64, &bar0->tx_w_round_robin_0);
1506                 writeq(val64, &bar0->tx_w_round_robin_1);
1507                 writeq(val64, &bar0->tx_w_round_robin_2);
1508                 writeq(val64, &bar0->tx_w_round_robin_3);
1509                 val64 = 0x0001020300000000ULL;
1510                 writeq(val64, &bar0->tx_w_round_robin_4);
1511                 break;
1512         case 5:
1513                 val64 = 0x0001020304000102ULL;
1514                 writeq(val64, &bar0->tx_w_round_robin_0);
1515                 val64 = 0x0304000102030400ULL;
1516                 writeq(val64, &bar0->tx_w_round_robin_1);
1517                 val64 = 0x0102030400010203ULL;
1518                 writeq(val64, &bar0->tx_w_round_robin_2);
1519                 val64 = 0x0400010203040001ULL;
1520                 writeq(val64, &bar0->tx_w_round_robin_3);
1521                 val64 = 0x0203040000000000ULL;
1522                 writeq(val64, &bar0->tx_w_round_robin_4);
1523                 break;
1524         case 6:
1525                 val64 = 0x0001020304050001ULL;
1526                 writeq(val64, &bar0->tx_w_round_robin_0);
1527                 val64 = 0x0203040500010203ULL;
1528                 writeq(val64, &bar0->tx_w_round_robin_1);
1529                 val64 = 0x0405000102030405ULL;
1530                 writeq(val64, &bar0->tx_w_round_robin_2);
1531                 val64 = 0x0001020304050001ULL;
1532                 writeq(val64, &bar0->tx_w_round_robin_3);
1533                 val64 = 0x0203040500000000ULL;
1534                 writeq(val64, &bar0->tx_w_round_robin_4);
1535                 break;
1536         case 7:
1537                 val64 = 0x0001020304050600ULL;
1538                 writeq(val64, &bar0->tx_w_round_robin_0);
1539                 val64 = 0x0102030405060001ULL;
1540                 writeq(val64, &bar0->tx_w_round_robin_1);
1541                 val64 = 0x0203040506000102ULL;
1542                 writeq(val64, &bar0->tx_w_round_robin_2);
1543                 val64 = 0x0304050600010203ULL;
1544                 writeq(val64, &bar0->tx_w_round_robin_3);
1545                 val64 = 0x0405060000000000ULL;
1546                 writeq(val64, &bar0->tx_w_round_robin_4);
1547                 break;
1548         case 8:
1549                 val64 = 0x0001020304050607ULL;
1550                 writeq(val64, &bar0->tx_w_round_robin_0);
1551                 writeq(val64, &bar0->tx_w_round_robin_1);
1552                 writeq(val64, &bar0->tx_w_round_robin_2);
1553                 writeq(val64, &bar0->tx_w_round_robin_3);
1554                 val64 = 0x0001020300000000ULL;
1555                 writeq(val64, &bar0->tx_w_round_robin_4);
1556                 break;
1557         }
1558
1559         /* Enable all configured Tx FIFO partitions */
1560         val64 = readq(&bar0->tx_fifo_partition_0);
1561         val64 |= (TX_FIFO_PARTITION_EN);
1562         writeq(val64, &bar0->tx_fifo_partition_0);
1563
1564         /* Filling the Rx round robin registers as per the
1565          * number of Rings and steering based on QoS with
1566          * equal priority.
1567          */
1568         switch (config->rx_ring_num) {
1569         case 1:
1570                 val64 = 0x0;
1571                 writeq(val64, &bar0->rx_w_round_robin_0);
1572                 writeq(val64, &bar0->rx_w_round_robin_1);
1573                 writeq(val64, &bar0->rx_w_round_robin_2);
1574                 writeq(val64, &bar0->rx_w_round_robin_3);
1575                 writeq(val64, &bar0->rx_w_round_robin_4);
1576
1577                 val64 = 0x8080808080808080ULL;
1578                 writeq(val64, &bar0->rts_qos_steering);
1579                 break;
1580         case 2:
1581                 val64 = 0x0001000100010001ULL;
1582                 writeq(val64, &bar0->rx_w_round_robin_0);
1583                 writeq(val64, &bar0->rx_w_round_robin_1);
1584                 writeq(val64, &bar0->rx_w_round_robin_2);
1585                 writeq(val64, &bar0->rx_w_round_robin_3);
1586                 val64 = 0x0001000100000000ULL;
1587                 writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589                 val64 = 0x8080808040404040ULL;
1590                 writeq(val64, &bar0->rts_qos_steering);
1591                 break;
1592         case 3:
1593                 val64 = 0x0001020001020001ULL;
1594                 writeq(val64, &bar0->rx_w_round_robin_0);
1595                 val64 = 0x0200010200010200ULL;
1596                 writeq(val64, &bar0->rx_w_round_robin_1);
1597                 val64 = 0x0102000102000102ULL;
1598                 writeq(val64, &bar0->rx_w_round_robin_2);
1599                 val64 = 0x0001020001020001ULL;
1600                 writeq(val64, &bar0->rx_w_round_robin_3);
1601                 val64 = 0x0200010200000000ULL;
1602                 writeq(val64, &bar0->rx_w_round_robin_4);
1603
1604                 val64 = 0x8080804040402020ULL;
1605                 writeq(val64, &bar0->rts_qos_steering);
1606                 break;
1607         case 4:
1608                 val64 = 0x0001020300010203ULL;
1609                 writeq(val64, &bar0->rx_w_round_robin_0);
1610                 writeq(val64, &bar0->rx_w_round_robin_1);
1611                 writeq(val64, &bar0->rx_w_round_robin_2);
1612                 writeq(val64, &bar0->rx_w_round_robin_3);
1613                 val64 = 0x0001020300000000ULL;
1614                 writeq(val64, &bar0->rx_w_round_robin_4);
1615
1616                 val64 = 0x8080404020201010ULL;
1617                 writeq(val64, &bar0->rts_qos_steering);
1618                 break;
1619         case 5:
1620                 val64 = 0x0001020304000102ULL;
1621                 writeq(val64, &bar0->rx_w_round_robin_0);
1622                 val64 = 0x0304000102030400ULL;
1623                 writeq(val64, &bar0->rx_w_round_robin_1);
1624                 val64 = 0x0102030400010203ULL;
1625                 writeq(val64, &bar0->rx_w_round_robin_2);
1626                 val64 = 0x0400010203040001ULL;
1627                 writeq(val64, &bar0->rx_w_round_robin_3);
1628                 val64 = 0x0203040000000000ULL;
1629                 writeq(val64, &bar0->rx_w_round_robin_4);
1630
1631                 val64 = 0x8080404020201008ULL;
1632                 writeq(val64, &bar0->rts_qos_steering);
1633                 break;
1634         case 6:
1635                 val64 = 0x0001020304050001ULL;
1636                 writeq(val64, &bar0->rx_w_round_robin_0);
1637                 val64 = 0x0203040500010203ULL;
1638                 writeq(val64, &bar0->rx_w_round_robin_1);
1639                 val64 = 0x0405000102030405ULL;
1640                 writeq(val64, &bar0->rx_w_round_robin_2);
1641                 val64 = 0x0001020304050001ULL;
1642                 writeq(val64, &bar0->rx_w_round_robin_3);
1643                 val64 = 0x0203040500000000ULL;
1644                 writeq(val64, &bar0->rx_w_round_robin_4);
1645
1646                 val64 = 0x8080404020100804ULL;
1647                 writeq(val64, &bar0->rts_qos_steering);
1648                 break;
1649         case 7:
1650                 val64 = 0x0001020304050600ULL;
1651                 writeq(val64, &bar0->rx_w_round_robin_0);
1652                 val64 = 0x0102030405060001ULL;
1653                 writeq(val64, &bar0->rx_w_round_robin_1);
1654                 val64 = 0x0203040506000102ULL;
1655                 writeq(val64, &bar0->rx_w_round_robin_2);
1656                 val64 = 0x0304050600010203ULL;
1657                 writeq(val64, &bar0->rx_w_round_robin_3);
1658                 val64 = 0x0405060000000000ULL;
1659                 writeq(val64, &bar0->rx_w_round_robin_4);
1660
1661                 val64 = 0x8080402010080402ULL;
1662                 writeq(val64, &bar0->rts_qos_steering);
1663                 break;
1664         case 8:
1665                 val64 = 0x0001020304050607ULL;
1666                 writeq(val64, &bar0->rx_w_round_robin_0);
1667                 writeq(val64, &bar0->rx_w_round_robin_1);
1668                 writeq(val64, &bar0->rx_w_round_robin_2);
1669                 writeq(val64, &bar0->rx_w_round_robin_3);
1670                 val64 = 0x0001020300000000ULL;
1671                 writeq(val64, &bar0->rx_w_round_robin_4);
1672
1673                 val64 = 0x8040201008040201ULL;
1674                 writeq(val64, &bar0->rts_qos_steering);
1675                 break;
1676         }
1677
1678         /* UDP Fix */
1679         val64 = 0;
1680         for (i = 0; i < 8; i++)
1681                 writeq(val64, &bar0->rts_frm_len_n[i]);
1682
1683         /* Set the default rts frame length for the rings configured */
1684         val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685         for (i = 0 ; i < config->rx_ring_num ; i++)
1686                 writeq(val64, &bar0->rts_frm_len_n[i]);
1687
1688         /* Set the frame length for the configured rings
1689          * desired by the user
1690          */
1691         for (i = 0; i < config->rx_ring_num; i++) {
1692                 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693                  * specified frame length steering.
1694                  * If the user provides the frame length then program
1695                  * the rts_frm_len register for those values or else
1696                  * leave it as it is.
1697                  */
1698                 if (rts_frm_len[i] != 0) {
1699                         writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700                                 &bar0->rts_frm_len_n[i]);
1701                 }
1702         }
1703
1704         /* Disable differentiated services steering logic */
1705         for (i = 0; i < 64; i++) {
1706                 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707                         DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1708                                 dev->name);
1709                         DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1710                         return -ENODEV;
1711                 }
1712         }
1713
1714         /* Program statistics memory */
1715         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1716
1717         if (nic->device_type == XFRAME_II_DEVICE) {
1718                 val64 = STAT_BC(0x320);
1719                 writeq(val64, &bar0->stat_byte_cnt);
1720         }
1721
1722         /*
1723          * Initializing the sampling rate for the device to calculate the
1724          * bandwidth utilization.
1725          */
1726         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728         writeq(val64, &bar0->mac_link_util);
1729
1730         /*
1731          * Initializing the Transmit and Receive Traffic Interrupt
1732          * Scheme.
1733          */
1734
1735         /* Initialize TTI */
1736         if (SUCCESS != init_tti(nic, nic->last_link_state))
1737                 return -ENODEV;
1738
1739         /* RTI Initialization */
1740         if (nic->device_type == XFRAME_II_DEVICE) {
1741                 /*
1742                  * Programmed to generate Apprx 500 Intrs per
1743                  * second
1744                  */
1745                 int count = (nic->config.bus_speed * 125)/4;
1746                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1747         } else
1748                 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749         val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750                  RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751                  RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1752
1753         writeq(val64, &bar0->rti_data1_mem);
1754
1755         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756                 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757         if (nic->config.intr_type == MSI_X)
1758             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759                         RTI_DATA2_MEM_RX_UFC_D(0x40));
1760         else
1761             val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762                         RTI_DATA2_MEM_RX_UFC_D(0x80));
1763         writeq(val64, &bar0->rti_data2_mem);
1764
1765         for (i = 0; i < config->rx_ring_num; i++) {
1766                 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767                                 | RTI_CMD_MEM_OFFSET(i);
1768                 writeq(val64, &bar0->rti_command_mem);
1769
1770                 /*
1771                  * Once the operation completes, the Strobe bit of the
1772                  * command register will be reset. We poll for this
1773                  * particular condition. We wait for a maximum of 500ms
1774                  * for the operation to complete, if it's not complete
1775                  * by then we return error.
1776                  */
1777                 time = 0;
1778                 while (TRUE) {
1779                         val64 = readq(&bar0->rti_command_mem);
1780                         if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1781                                 break;
1782
1783                         if (time > 10) {
1784                                 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1785                                           dev->name);
1786                                 return -ENODEV;
1787                         }
1788                         time++;
1789                         msleep(50);
1790                 }
1791         }
1792
1793         /*
1794          * Initializing proper values as Pause threshold into all
1795          * the 8 Queues on Rx side.
1796          */
1797         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1799
1800         /* Disable RMAC PAD STRIPPING */
1801         add = &bar0->mac_cfg;
1802         val64 = readq(&bar0->mac_cfg);
1803         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805         writel((u32) (val64), add);
1806         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807         writel((u32) (val64 >> 32), (add + 4));
1808         val64 = readq(&bar0->mac_cfg);
1809
1810         /* Enable FCS stripping by adapter */
1811         add = &bar0->mac_cfg;
1812         val64 = readq(&bar0->mac_cfg);
1813         val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814         if (nic->device_type == XFRAME_II_DEVICE)
1815                 writeq(val64, &bar0->mac_cfg);
1816         else {
1817                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818                 writel((u32) (val64), add);
1819                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820                 writel((u32) (val64 >> 32), (add + 4));
1821         }
1822
1823         /*
1824          * Set the time value to be inserted in the pause frame
1825          * generated by xena.
1826          */
1827         val64 = readq(&bar0->rmac_pause_cfg);
1828         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830         writeq(val64, &bar0->rmac_pause_cfg);
1831
1832         /*
1833          * Set the Threshold Limit for Generating the pause frame
1834          * If the amount of data in any Queue exceeds ratio of
1835          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836          * pause frame is generated
1837          */
1838         val64 = 0;
1839         for (i = 0; i < 4; i++) {
1840                 val64 |=
1841                     (((u64) 0xFF00 | nic->mac_control.
1842                       mc_pause_threshold_q0q3)
1843                      << (i * 2 * 8));
1844         }
1845         writeq(val64, &bar0->mc_pause_thresh_q0q3);
1846
1847         val64 = 0;
1848         for (i = 0; i < 4; i++) {
1849                 val64 |=
1850                     (((u64) 0xFF00 | nic->mac_control.
1851                       mc_pause_threshold_q4q7)
1852                      << (i * 2 * 8));
1853         }
1854         writeq(val64, &bar0->mc_pause_thresh_q4q7);
1855
1856         /*
1857          * TxDMA will stop Read request if the number of read split has
1858          * exceeded the limit pointed by shared_splits
1859          */
1860         val64 = readq(&bar0->pic_control);
1861         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862         writeq(val64, &bar0->pic_control);
1863
1864         if (nic->config.bus_speed == 266) {
1865                 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866                 writeq(0x0, &bar0->read_retry_delay);
1867                 writeq(0x0, &bar0->write_retry_delay);
1868         }
1869
1870         /*
1871          * Programming the Herc to split every write transaction
1872          * that does not start on an ADB to reduce disconnects.
1873          */
1874         if (nic->device_type == XFRAME_II_DEVICE) {
1875                 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876                         MISC_LINK_STABILITY_PRD(3);
1877                 writeq(val64, &bar0->misc_control);
1878                 val64 = readq(&bar0->pic_control2);
1879                 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1880                 writeq(val64, &bar0->pic_control2);
1881         }
1882         if (strstr(nic->product_name, "CX4")) {
1883                 val64 = TMAC_AVG_IPG(0x17);
1884                 writeq(val64, &bar0->tmac_avg_ipg);
1885         }
1886
1887         return SUCCESS;
1888 }
1889 #define LINK_UP_DOWN_INTERRUPT          1
1890 #define MAC_RMAC_ERR_TIMER              2
1891
1892 static int s2io_link_fault_indication(struct s2io_nic *nic)
1893 {
1894         if (nic->config.intr_type != INTA)
1895                 return MAC_RMAC_ERR_TIMER;
1896         if (nic->device_type == XFRAME_II_DEVICE)
1897                 return LINK_UP_DOWN_INTERRUPT;
1898         else
1899                 return MAC_RMAC_ERR_TIMER;
1900 }
1901
1902 /**
1903  *  do_s2io_write_bits -  update alarm bits in alarm register
1904  *  @value: alarm bits
1905  *  @flag: interrupt status
1906  *  @addr: address value
1907  *  Description: update alarm bits in alarm register
1908  *  Return Value:
1909  *  NONE.
1910  */
1911 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1912 {
1913         u64 temp64;
1914
1915         temp64 = readq(addr);
1916
1917         if(flag == ENABLE_INTRS)
1918                 temp64 &= ~((u64) value);
1919         else
1920                 temp64 |= ((u64) value);
1921         writeq(temp64, addr);
1922 }
1923
1924 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1925 {
1926         struct XENA_dev_config __iomem *bar0 = nic->bar0;
1927         register u64 gen_int_mask = 0;
1928
1929         if (mask & TX_DMA_INTR) {
1930
1931                 gen_int_mask |= TXDMA_INT_M;
1932
1933                 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934                                 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935                                 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936                                 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1937
1938                 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940                                 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941                                 &bar0->pfc_err_mask);
1942
1943                 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944                                 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945                                 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1946
1947                 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948                                 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949                                 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950                                 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951                                 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952                                 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1953
1954                 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955                                 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1956
1957                 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958                                 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959                                 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960                                 flag, &bar0->lso_err_mask);
1961
1962                 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963                                 flag, &bar0->tpa_err_mask);
1964
1965                 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1966
1967         }
1968
1969         if (mask & TX_MAC_INTR) {
1970                 gen_int_mask |= TXMAC_INT_M;
1971                 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972                                 &bar0->mac_int_mask);
1973                 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974                                 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975                                 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976                                 flag, &bar0->mac_tmac_err_mask);
1977         }
1978
1979         if (mask & TX_XGXS_INTR) {
1980                 gen_int_mask |= TXXGXS_INT_M;
1981                 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982                                 &bar0->xgxs_int_mask);
1983                 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984                                 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985                                 flag, &bar0->xgxs_txgxs_err_mask);
1986         }
1987
1988         if (mask & RX_DMA_INTR) {
1989                 gen_int_mask |= RXDMA_INT_M;
1990                 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991                                 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992                                 flag, &bar0->rxdma_int_mask);
1993                 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994                                 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995                                 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996                                 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997                 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998                                 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999                                 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000                                 &bar0->prc_pcix_err_mask);
2001                 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002                                 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003                                 &bar0->rpa_err_mask);
2004                 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005                                 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006                                 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007                                 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008                                 flag, &bar0->rda_err_mask);
2009                 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010                                 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011                                 flag, &bar0->rti_err_mask);
2012         }
2013
2014         if (mask & RX_MAC_INTR) {
2015                 gen_int_mask |= RXMAC_INT_M;
2016                 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017                                 &bar0->mac_int_mask);
2018                 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019                                 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020                                 RMAC_DOUBLE_ECC_ERR |
2021                                 RMAC_LINK_STATE_CHANGE_INT,
2022                                 flag, &bar0->mac_rmac_err_mask);
2023         }
2024
2025         if (mask & RX_XGXS_INTR)
2026         {
2027                 gen_int_mask |= RXXGXS_INT_M;
2028                 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2029                                 &bar0->xgxs_int_mask);
2030                 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2031                                 &bar0->xgxs_rxgxs_err_mask);
2032         }
2033
2034         if (mask & MC_INTR) {
2035                 gen_int_mask |= MC_INT_M;
2036                 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2037                 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2038                                 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2039                                 &bar0->mc_err_mask);
2040         }
2041         nic->general_int_mask = gen_int_mask;
2042
2043         /* Remove this line when alarm interrupts are enabled */
2044         nic->general_int_mask = 0;
2045 }
2046 /**
2047  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
2048  *  @nic: device private variable,
2049  *  @mask: A mask indicating which Intr block must be modified and,
2050  *  @flag: A flag indicating whether to enable or disable the Intrs.
2051  *  Description: This function will either disable or enable the interrupts
2052  *  depending on the flag argument. The mask argument can be used to
2053  *  enable/disable any Intr block.
2054  *  Return Value: NONE.
2055  */
2056
2057 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2058 {
2059         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2060         register u64 temp64 = 0, intr_mask = 0;
2061
2062         intr_mask = nic->general_int_mask;
2063
2064         /*  Top level interrupt classification */
2065         /*  PIC Interrupts */
2066         if (mask & TX_PIC_INTR) {
2067                 /*  Enable PIC Intrs in the general intr mask register */
2068                 intr_mask |= TXPIC_INT_M;
2069                 if (flag == ENABLE_INTRS) {
2070                         /*
2071                          * If Hercules adapter enable GPIO otherwise
2072                          * disable all PCIX, Flash, MDIO, IIC and GPIO
2073                          * interrupts for now.
2074                          * TODO
2075                          */
2076                         if (s2io_link_fault_indication(nic) ==
2077                                         LINK_UP_DOWN_INTERRUPT ) {
2078                                 do_s2io_write_bits(PIC_INT_GPIO, flag,
2079                                                 &bar0->pic_int_mask);
2080                                 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2081                                                 &bar0->gpio_int_mask);
2082                         } else
2083                                 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2084                 } else if (flag == DISABLE_INTRS) {
2085                         /*
2086                          * Disable PIC Intrs in the general
2087                          * intr mask register
2088                          */
2089                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2090                 }
2091         }
2092
2093         /*  Tx traffic interrupts */
2094         if (mask & TX_TRAFFIC_INTR) {
2095                 intr_mask |= TXTRAFFIC_INT_M;
2096                 if (flag == ENABLE_INTRS) {
2097                         /*
2098                          * Enable all the Tx side interrupts
2099                          * writing 0 Enables all 64 TX interrupt levels
2100                          */
2101                         writeq(0x0, &bar0->tx_traffic_mask);
2102                 } else if (flag == DISABLE_INTRS) {
2103                         /*
2104                          * Disable Tx Traffic Intrs in the general intr mask
2105                          * register.
2106                          */
2107                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2108                 }
2109         }
2110
2111         /*  Rx traffic interrupts */
2112         if (mask & RX_TRAFFIC_INTR) {
2113                 intr_mask |= RXTRAFFIC_INT_M;
2114                 if (flag == ENABLE_INTRS) {
2115                         /* writing 0 Enables all 8 RX interrupt levels */
2116                         writeq(0x0, &bar0->rx_traffic_mask);
2117                 } else if (flag == DISABLE_INTRS) {
2118                         /*
2119                          * Disable Rx Traffic Intrs in the general intr mask
2120                          * register.
2121                          */
2122                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2123                 }
2124         }
2125
2126         temp64 = readq(&bar0->general_int_mask);
2127         if (flag == ENABLE_INTRS)
2128                 temp64 &= ~((u64) intr_mask);
2129         else
2130                 temp64 = DISABLE_ALL_INTRS;
2131         writeq(temp64, &bar0->general_int_mask);
2132
2133         nic->general_int_mask = readq(&bar0->general_int_mask);
2134 }
2135
2136 /**
2137  *  verify_pcc_quiescent- Checks for PCC quiescent state
2138  *  Return: 1 If PCC is quiescence
2139  *          0 If PCC is not quiescence
2140  */
2141 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2142 {
2143         int ret = 0, herc;
2144         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2145         u64 val64 = readq(&bar0->adapter_status);
2146
2147         herc = (sp->device_type == XFRAME_II_DEVICE);
2148
2149         if (flag == FALSE) {
2150                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2151                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2152                                 ret = 1;
2153                 } else {
2154                         if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2155                                 ret = 1;
2156                 }
2157         } else {
2158                 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2159                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2160                              ADAPTER_STATUS_RMAC_PCC_IDLE))
2161                                 ret = 1;
2162                 } else {
2163                         if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2164                              ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2165                                 ret = 1;
2166                 }
2167         }
2168
2169         return ret;
2170 }
2171 /**
2172  *  verify_xena_quiescence - Checks whether the H/W is ready
2173  *  Description: Returns whether the H/W is ready to go or not. Depending
2174  *  on whether adapter enable bit was written or not the comparison
2175  *  differs and the calling function passes the input argument flag to
2176  *  indicate this.
2177  *  Return: 1 If xena is quiescence
2178  *          0 If Xena is not quiescence
2179  */
2180
2181 static int verify_xena_quiescence(struct s2io_nic *sp)
2182 {
2183         int  mode;
2184         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2185         u64 val64 = readq(&bar0->adapter_status);
2186         mode = s2io_verify_pci_mode(sp);
2187
2188         if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2189                 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2190                 return 0;
2191         }
2192         if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2193         DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2194                 return 0;
2195         }
2196         if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2197                 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2198                 return 0;
2199         }
2200         if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2201                 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2202                 return 0;
2203         }
2204         if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2205                 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2206                 return 0;
2207         }
2208         if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2209                 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2210                 return 0;
2211         }
2212         if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2213                 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2214                 return 0;
2215         }
2216         if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2217                 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2218                 return 0;
2219         }
2220
2221         /*
2222          * In PCI 33 mode, the P_PLL is not used, and therefore,
2223          * the the P_PLL_LOCK bit in the adapter_status register will
2224          * not be asserted.
2225          */
2226         if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2227                 sp->device_type == XFRAME_II_DEVICE && mode !=
2228                 PCI_MODE_PCI_33) {
2229                 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2230                 return 0;
2231         }
2232         if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2233                         ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2234                 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2235                 return 0;
2236         }
2237         return 1;
2238 }
2239
2240 /**
2241  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
2242  * @sp: Pointer to device specifc structure
2243  * Description :
2244  * New procedure to clear mac address reading  problems on Alpha platforms
2245  *
2246  */
2247
2248 static void fix_mac_address(struct s2io_nic * sp)
2249 {
2250         struct XENA_dev_config __iomem *bar0 = sp->bar0;
2251         u64 val64;
2252         int i = 0;
2253
2254         while (fix_mac[i] != END_SIGN) {
2255                 writeq(fix_mac[i++], &bar0->gpio_control);
2256                 udelay(10);
2257                 val64 = readq(&bar0->gpio_control);
2258         }
2259 }
2260
2261 /**
2262  *  start_nic - Turns the device on
2263  *  @nic : device private variable.
2264  *  Description:
2265  *  This function actually turns the device on. Before this  function is
2266  *  called,all Registers are configured from their reset states
2267  *  and shared memory is allocated but the NIC is still quiescent. On
2268  *  calling this function, the device interrupts are cleared and the NIC is
2269  *  literally switched on by writing into the adapter control register.
2270  *  Return Value:
2271  *  SUCCESS on success and -1 on failure.
2272  */
2273
2274 static int start_nic(struct s2io_nic *nic)
2275 {
2276         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2277         struct net_device *dev = nic->dev;
2278         register u64 val64 = 0;
2279         u16 subid, i;
2280         struct mac_info *mac_control;
2281         struct config_param *config;
2282
2283         mac_control = &nic->mac_control;
2284         config = &nic->config;
2285
2286         /*  PRC Initialization and configuration */
2287         for (i = 0; i < config->rx_ring_num; i++) {
2288                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2289                        &bar0->prc_rxd0_n[i]);
2290
2291                 val64 = readq(&bar0->prc_ctrl_n[i]);
2292                 if (nic->rxd_mode == RXD_MODE_1)
2293                         val64 |= PRC_CTRL_RC_ENABLED;
2294                 else
2295                         val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2296                 if (nic->device_type == XFRAME_II_DEVICE)
2297                         val64 |= PRC_CTRL_GROUP_READS;
2298                 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2299                 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2300                 writeq(val64, &bar0->prc_ctrl_n[i]);
2301         }
2302
2303         if (nic->rxd_mode == RXD_MODE_3B) {
2304                 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2305                 val64 = readq(&bar0->rx_pa_cfg);
2306                 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2307                 writeq(val64, &bar0->rx_pa_cfg);
2308         }
2309
2310         if (vlan_tag_strip == 0) {
2311                 val64 = readq(&bar0->rx_pa_cfg);
2312                 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2313                 writeq(val64, &bar0->rx_pa_cfg);
2314                 vlan_strip_flag = 0;
2315         }
2316
2317         /*
2318          * Enabling MC-RLDRAM. After enabling the device, we timeout
2319          * for around 100ms, which is approximately the time required
2320          * for the device to be ready for operation.
2321          */
2322         val64 = readq(&bar0->mc_rldram_mrs);
2323         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2324         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2325         val64 = readq(&bar0->mc_rldram_mrs);
2326
2327         msleep(100);    /* Delay by around 100 ms. */
2328
2329         /* Enabling ECC Protection. */
2330         val64 = readq(&bar0->adapter_control);
2331         val64 &= ~ADAPTER_ECC_EN;
2332         writeq(val64, &bar0->adapter_control);
2333
2334         /*
2335          * Verify if the device is ready to be enabled, if so enable
2336          * it.
2337          */
2338         val64 = readq(&bar0->adapter_status);
2339         if (!verify_xena_quiescence(nic)) {
2340                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2341                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2342                           (unsigned long long) val64);
2343                 return FAILURE;
2344         }
2345
2346         /*
2347          * With some switches, link might be already up at this point.
2348          * Because of this weird behavior, when we enable laser,
2349          * we may not get link. We need to handle this. We cannot
2350          * figure out which switch is misbehaving. So we are forced to
2351          * make a global change.
2352          */
2353
2354         /* Enabling Laser. */
2355         val64 = readq(&bar0->adapter_control);
2356         val64 |= ADAPTER_EOI_TX_ON;
2357         writeq(val64, &bar0->adapter_control);
2358
2359         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2360                 /*
2361                  * Dont see link state interrupts initally on some switches,
2362                  * so directly scheduling the link state task here.
2363                  */
2364                 schedule_work(&nic->set_link_task);
2365         }
2366         /* SXE-002: Initialize link and activity LED */
2367         subid = nic->pdev->subsystem_device;
2368         if (((subid & 0xFF) >= 0x07) &&
2369             (nic->device_type == XFRAME_I_DEVICE)) {
2370                 val64 = readq(&bar0->gpio_control);
2371                 val64 |= 0x0000800000000000ULL;
2372                 writeq(val64, &bar0->gpio_control);
2373                 val64 = 0x0411040400000000ULL;
2374                 writeq(val64, (void __iomem *)bar0 + 0x2700);
2375         }
2376
2377         return SUCCESS;
2378 }
2379 /**
2380  * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2381  */
2382 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2383                                         TxD *txdlp, int get_off)
2384 {
2385         struct s2io_nic *nic = fifo_data->nic;
2386         struct sk_buff *skb;
2387         struct TxD *txds;
2388         u16 j, frg_cnt;
2389
2390         txds = txdlp;
2391         if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2392                 pci_unmap_single(nic->pdev, (dma_addr_t)
2393                         txds->Buffer_Pointer, sizeof(u64),
2394                         PCI_DMA_TODEVICE);
2395                 txds++;
2396         }
2397
2398         skb = (struct sk_buff *) ((unsigned long)
2399                         txds->Host_Control);
2400         if (!skb) {
2401                 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2402                 return NULL;
2403         }
2404         pci_unmap_single(nic->pdev, (dma_addr_t)
2405                          txds->Buffer_Pointer,
2406                          skb->len - skb->data_len,
2407                          PCI_DMA_TODEVICE);
2408         frg_cnt = skb_shinfo(skb)->nr_frags;
2409         if (frg_cnt) {
2410                 txds++;
2411                 for (j = 0; j < frg_cnt; j++, txds++) {
2412                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2413                         if (!txds->Buffer_Pointer)
2414                                 break;
2415                         pci_unmap_page(nic->pdev, (dma_addr_t)
2416                                         txds->Buffer_Pointer,
2417                                        frag->size, PCI_DMA_TODEVICE);
2418                 }
2419         }
2420         memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2421         return(skb);
2422 }
2423
2424 /**
2425  *  free_tx_buffers - Free all queued Tx buffers
2426  *  @nic : device private variable.
2427  *  Description:
2428  *  Free all queued Tx buffers.
2429  *  Return Value: void
2430 */
2431
2432 static void free_tx_buffers(struct s2io_nic *nic)
2433 {
2434         struct net_device *dev = nic->dev;
2435         struct sk_buff *skb;
2436         struct TxD *txdp;
2437         int i, j;
2438         struct mac_info *mac_control;
2439         struct config_param *config;
2440         int cnt = 0;
2441
2442         mac_control = &nic->mac_control;
2443         config = &nic->config;
2444
2445         for (i = 0; i < config->tx_fifo_num; i++) {
2446                 unsigned long flags;
2447                 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2448                 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
2449                         txdp = (struct TxD *) \
2450                         mac_control->fifos[i].list_info[j].list_virt_addr;
2451                         skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2452                         if (skb) {
2453                                 nic->mac_control.stats_info->sw_stat.mem_freed
2454                                         += skb->truesize;
2455                                 dev_kfree_skb(skb);
2456                                 cnt++;
2457                         }
2458                 }
2459                 DBG_PRINT(INTR_DBG,
2460                           "%s:forcibly freeing %d skbs on FIFO%d\n",
2461                           dev->name, cnt, i);
2462                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2463                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2464                 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2465         }
2466 }
2467
2468 /**
2469  *   stop_nic -  To stop the nic
2470  *   @nic ; device private variable.
2471  *   Description:
2472  *   This function does exactly the opposite of what the start_nic()
2473  *   function does. This function is called to stop the device.
2474  *   Return Value:
2475  *   void.
2476  */
2477
2478 static void stop_nic(struct s2io_nic *nic)
2479 {
2480         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2481         register u64 val64 = 0;
2482         u16 interruptible;
2483         struct mac_info *mac_control;
2484         struct config_param *config;
2485
2486         mac_control = &nic->mac_control;
2487         config = &nic->config;
2488
2489         /*  Disable all interrupts */
2490         en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2491         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2492         interruptible |= TX_PIC_INTR;
2493         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2494
2495         /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2496         val64 = readq(&bar0->adapter_control);
2497         val64 &= ~(ADAPTER_CNTL_EN);
2498         writeq(val64, &bar0->adapter_control);
2499 }
2500
2501 /**
2502  *  fill_rx_buffers - Allocates the Rx side skbs
2503  *  @ring_info: per ring structure
2504  *  Description:
2505  *  The function allocates Rx side skbs and puts the physical
2506  *  address of these buffers into the RxD buffer pointers, so that the NIC
2507  *  can DMA the received frame into these locations.
2508  *  The NIC supports 3 receive modes, viz
2509  *  1. single buffer,
2510  *  2. three buffer and
2511  *  3. Five buffer modes.
2512  *  Each mode defines how many fragments the received frame will be split
2513  *  up into by the NIC. The frame is split into L3 header, L4 Header,
2514  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2515  *  is split into 3 fragments. As of now only single buffer mode is
2516  *  supported.
2517  *   Return Value:
2518  *  SUCCESS on success or an appropriate -ve value on failure.
2519  */
2520
2521 static int fill_rx_buffers(struct ring_info *ring)
2522 {
2523         struct sk_buff *skb;
2524         struct RxD_t *rxdp;
2525         int off, size, block_no, block_no1;
2526         u32 alloc_tab = 0;
2527         u32 alloc_cnt;
2528         u64 tmp;
2529         struct buffAdd *ba;
2530         struct RxD_t *first_rxdp = NULL;
2531         u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2532         int rxd_index = 0;
2533         struct RxD1 *rxdp1;
2534         struct RxD3 *rxdp3;
2535         struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
2536
2537         alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
2538
2539         block_no1 = ring->rx_curr_get_info.block_index;
2540         while (alloc_tab < alloc_cnt) {
2541                 block_no = ring->rx_curr_put_info.block_index;
2542
2543                 off = ring->rx_curr_put_info.offset;
2544
2545                 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2546
2547                 rxd_index = off + 1;
2548                 if (block_no)
2549                         rxd_index += (block_no * ring->rxd_count);
2550
2551                 if ((block_no == block_no1) && 
2552                         (off == ring->rx_curr_get_info.offset) &&
2553                         (rxdp->Host_Control)) {
2554                         DBG_PRINT(INTR_DBG, "%s: Get and Put",
2555                                 ring->dev->name);
2556                         DBG_PRINT(INTR_DBG, " info equated\n");
2557                         goto end;
2558                 }
2559                 if (off && (off == ring->rxd_count)) {
2560                         ring->rx_curr_put_info.block_index++;
2561                         if (ring->rx_curr_put_info.block_index ==
2562                                                         ring->block_count)
2563                                 ring->rx_curr_put_info.block_index = 0;
2564                         block_no = ring->rx_curr_put_info.block_index;
2565                         off = 0;
2566                         ring->rx_curr_put_info.offset = off;
2567                         rxdp = ring->rx_blocks[block_no].block_virt_addr;
2568                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2569                                   ring->dev->name, rxdp);
2570
2571                 }
2572
2573                 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2574                         ((ring->rxd_mode == RXD_MODE_3B) &&
2575                                 (rxdp->Control_2 & s2BIT(0)))) {
2576                         ring->rx_curr_put_info.offset = off;
2577                         goto end;
2578                 }
2579                 /* calculate size of skb based on ring mode */
2580                 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2581                                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2582                 if (ring->rxd_mode == RXD_MODE_1)
2583                         size += NET_IP_ALIGN;
2584                 else
2585                         size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2586
2587                 /* allocate skb */
2588                 skb = dev_alloc_skb(size);
2589                 if(!skb) {
2590                         DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
2591                         DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2592                         if (first_rxdp) {
2593                                 wmb();
2594                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2595                         }
2596                         stats->mem_alloc_fail_cnt++;
2597                                 
2598                         return -ENOMEM ;
2599                 }
2600                 stats->mem_allocated += skb->truesize;
2601
2602                 if (ring->rxd_mode == RXD_MODE_1) {
2603                         /* 1 buffer mode - normal operation mode */
2604                         rxdp1 = (struct RxD1*)rxdp;
2605                         memset(rxdp, 0, sizeof(struct RxD1));
2606                         skb_reserve(skb, NET_IP_ALIGN);
2607                         rxdp1->Buffer0_ptr = pci_map_single
2608                             (ring->pdev, skb->data, size - NET_IP_ALIGN,
2609                                 PCI_DMA_FROMDEVICE);
2610                         if( (rxdp1->Buffer0_ptr == 0) ||
2611                                 (rxdp1->Buffer0_ptr ==
2612                                 DMA_ERROR_CODE))
2613                                 goto pci_map_failed;
2614
2615                         rxdp->Control_2 =
2616                                 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2617                         rxdp->Host_Control = (unsigned long) (skb);
2618                 } else if (ring->rxd_mode == RXD_MODE_3B) {
2619                         /*
2620                          * 2 buffer mode -
2621                          * 2 buffer mode provides 128
2622                          * byte aligned receive buffers.
2623                          */
2624
2625                         rxdp3 = (struct RxD3*)rxdp;
2626                         /* save buffer pointers to avoid frequent dma mapping */
2627                         Buffer0_ptr = rxdp3->Buffer0_ptr;
2628                         Buffer1_ptr = rxdp3->Buffer1_ptr;
2629                         memset(rxdp, 0, sizeof(struct RxD3));
2630                         /* restore the buffer pointers for dma sync*/
2631                         rxdp3->Buffer0_ptr = Buffer0_ptr;
2632                         rxdp3->Buffer1_ptr = Buffer1_ptr;
2633
2634                         ba = &ring->ba[block_no][off];
2635                         skb_reserve(skb, BUF0_LEN);
2636                         tmp = (u64)(unsigned long) skb->data;
2637                         tmp += ALIGN_SIZE;
2638                         tmp &= ~ALIGN_SIZE;
2639                         skb->data = (void *) (unsigned long)tmp;
2640                         skb_reset_tail_pointer(skb);
2641
2642                         if (!(rxdp3->Buffer0_ptr))
2643                                 rxdp3->Buffer0_ptr =
2644                                    pci_map_single(ring->pdev, ba->ba_0,
2645                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
2646                         else
2647                                 pci_dma_sync_single_for_device(ring->pdev,
2648                                 (dma_addr_t) rxdp3->Buffer0_ptr,
2649                                     BUF0_LEN, PCI_DMA_FROMDEVICE);
2650                         if( (rxdp3->Buffer0_ptr == 0) ||
2651                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2652                                 goto pci_map_failed;
2653
2654                         rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2655                         if (ring->rxd_mode == RXD_MODE_3B) {
2656                                 /* Two buffer mode */
2657
2658                                 /*
2659                                  * Buffer2 will have L3/L4 header plus
2660                                  * L4 payload
2661                                  */
2662                                 rxdp3->Buffer2_ptr = pci_map_single
2663                                 (ring->pdev, skb->data, ring->mtu + 4,
2664                                                 PCI_DMA_FROMDEVICE);
2665
2666                                 if( (rxdp3->Buffer2_ptr == 0) ||
2667                                         (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2668                                         goto pci_map_failed;
2669
2670                                 if (!rxdp3->Buffer1_ptr)
2671                                         rxdp3->Buffer1_ptr =
2672                                                 pci_map_single(ring->pdev,
2673                                                 ba->ba_1, BUF1_LEN,
2674                                                 PCI_DMA_FROMDEVICE);
2675
2676                                 if( (rxdp3->Buffer1_ptr == 0) ||
2677                                         (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2678                                         pci_unmap_single
2679                                                 (ring->pdev,
2680                                                 (dma_addr_t)(unsigned long)
2681                                                 skb->data,
2682                                                 ring->mtu + 4,
2683                                                 PCI_DMA_FROMDEVICE);
2684                                         goto pci_map_failed;
2685                                 }
2686                                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2687                                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2688                                                                 (ring->mtu + 4);
2689                         }
2690                         rxdp->Control_2 |= s2BIT(0);
2691                         rxdp->Host_Control = (unsigned long) (skb);
2692                 }
2693                 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2694                         rxdp->Control_1 |= RXD_OWN_XENA;
2695                 off++;
2696                 if (off == (ring->rxd_count + 1))
2697                         off = 0;
2698                 ring->rx_curr_put_info.offset = off;
2699
2700                 rxdp->Control_2 |= SET_RXD_MARKER;
2701                 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2702                         if (first_rxdp) {
2703                                 wmb();
2704                                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2705                         }
2706                         first_rxdp = rxdp;
2707                 }
2708                 ring->rx_bufs_left += 1;
2709                 alloc_tab++;
2710         }
2711
2712       end:
2713         /* Transfer ownership of first descriptor to adapter just before
2714          * exiting. Before that, use memory barrier so that ownership
2715          * and other fields are seen by adapter correctly.
2716          */
2717         if (first_rxdp) {
2718                 wmb();
2719                 first_rxdp->Control_1 |= RXD_OWN_XENA;
2720         }
2721
2722         return SUCCESS;
2723 pci_map_failed:
2724         stats->pci_map_fail_cnt++;
2725         stats->mem_freed += skb->truesize;
2726         dev_kfree_skb_irq(skb);
2727         return -ENOMEM;
2728 }
2729
2730 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2731 {
2732         struct net_device *dev = sp->dev;
2733         int j;
2734         struct sk_buff *skb;
2735         struct RxD_t *rxdp;
2736         struct mac_info *mac_control;
2737         struct buffAdd *ba;
2738         struct RxD1 *rxdp1;
2739         struct RxD3 *rxdp3;
2740
2741         mac_control = &sp->mac_control;
2742         for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2743                 rxdp = mac_control->rings[ring_no].
2744                                 rx_blocks[blk].rxds[j].virt_addr;
2745                 skb = (struct sk_buff *)
2746                         ((unsigned long) rxdp->Host_Control);
2747                 if (!skb) {
2748                         continue;
2749                 }
2750                 if (sp->rxd_mode == RXD_MODE_1) {
2751                         rxdp1 = (struct RxD1*)rxdp;
2752                         pci_unmap_single(sp->pdev, (dma_addr_t)
2753                                 rxdp1->Buffer0_ptr,
2754                                 dev->mtu +
2755                                 HEADER_ETHERNET_II_802_3_SIZE
2756                                 + HEADER_802_2_SIZE +
2757                                 HEADER_SNAP_SIZE,
2758                                 PCI_DMA_FROMDEVICE);
2759                         memset(rxdp, 0, sizeof(struct RxD1));
2760                 } else if(sp->rxd_mode == RXD_MODE_3B) {
2761                         rxdp3 = (struct RxD3*)rxdp;
2762                         ba = &mac_control->rings[ring_no].
2763                                 ba[blk][j];
2764                         pci_unmap_single(sp->pdev, (dma_addr_t)
2765                                 rxdp3->Buffer0_ptr,
2766                                 BUF0_LEN,
2767                                 PCI_DMA_FROMDEVICE);
2768                         pci_unmap_single(sp->pdev, (dma_addr_t)
2769                                 rxdp3->Buffer1_ptr,
2770                                 BUF1_LEN,
2771                                 PCI_DMA_FROMDEVICE);
2772                         pci_unmap_single(sp->pdev, (dma_addr_t)
2773                                 rxdp3->Buffer2_ptr,
2774                                 dev->mtu + 4,
2775                                 PCI_DMA_FROMDEVICE);
2776                         memset(rxdp, 0, sizeof(struct RxD3));
2777                 }
2778                 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2779                 dev_kfree_skb(skb);
2780                 mac_control->rings[ring_no].rx_bufs_left -= 1;
2781         }
2782 }
2783
2784 /**
2785  *  free_rx_buffers - Frees all Rx buffers
2786  *  @sp: device private variable.
2787  *  Description:
2788  *  This function will free all Rx buffers allocated by host.
2789  *  Return Value:
2790  *  NONE.
2791  */
2792
2793 static void free_rx_buffers(struct s2io_nic *sp)
2794 {
2795         struct net_device *dev = sp->dev;
2796         int i, blk = 0, buf_cnt = 0;
2797         struct mac_info *mac_control;
2798         struct config_param *config;
2799
2800         mac_control = &sp->mac_control;
2801         config = &sp->config;
2802
2803         for (i = 0; i < config->rx_ring_num; i++) {
2804                 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2805                         free_rxd_blk(sp,i,blk);
2806
2807                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2808                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2809                 mac_control->rings[i].rx_curr_put_info.offset = 0;
2810                 mac_control->rings[i].rx_curr_get_info.offset = 0;
2811                 mac_control->rings[i].rx_bufs_left = 0;
2812                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2813                           dev->name, buf_cnt, i);
2814         }
2815 }
2816
2817 /**
2818  * s2io_poll - Rx interrupt handler for NAPI support
2819  * @napi : pointer to the napi structure.
2820  * @budget : The number of packets that were budgeted to be processed
2821  * during  one pass through the 'Poll" function.
2822  * Description:
2823  * Comes into picture only if NAPI support has been incorporated. It does
2824  * the same thing that rx_intr_handler does, but not in a interrupt context
2825  * also It will process only a given number of packets.
2826  * Return value:
2827  * 0 on success and 1 if there are No Rx packets to be processed.
2828  */
2829
2830 static int s2io_poll(struct napi_struct *napi, int budget)
2831 {
2832         struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2833         struct net_device *dev = nic->dev;
2834         int pkt_cnt = 0, org_pkts_to_process;
2835         struct mac_info *mac_control;
2836         struct config_param *config;
2837         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2838         int i;
2839
2840         mac_control = &nic->mac_control;
2841         config = &nic->config;
2842
2843         nic->pkts_to_process = budget;
2844         org_pkts_to_process = nic->pkts_to_process;
2845
2846         writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2847         readl(&bar0->rx_traffic_int);
2848
2849         for (i = 0; i < config->rx_ring_num; i++) {
2850                 rx_intr_handler(&mac_control->rings[i]);
2851                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2852                 if (!nic->pkts_to_process) {
2853                         /* Quota for the current iteration has been met */
2854                         goto no_rx;
2855                 }
2856         }
2857
2858         netif_rx_complete(dev, napi);
2859
2860         for (i = 0; i < config->rx_ring_num; i++) {
2861                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2862                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2863                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2864                         break;
2865                 }
2866         }
2867         /* Re enable the Rx interrupts. */
2868         writeq(0x0, &bar0->rx_traffic_mask);
2869         readl(&bar0->rx_traffic_mask);
2870         return pkt_cnt;
2871
2872 no_rx:
2873         for (i = 0; i < config->rx_ring_num; i++) {
2874                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2875                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2876                         DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2877                         break;
2878                 }
2879         }
2880         return pkt_cnt;
2881 }
2882
2883 #ifdef CONFIG_NET_POLL_CONTROLLER
2884 /**
2885  * s2io_netpoll - netpoll event handler entry point
2886  * @dev : pointer to the device structure.
2887  * Description:
2888  *      This function will be called by upper layer to check for events on the
2889  * interface in situations where interrupts are disabled. It is used for
2890  * specific in-kernel networking tasks, such as remote consoles and kernel
2891  * debugging over the network (example netdump in RedHat).
2892  */
2893 static void s2io_netpoll(struct net_device *dev)
2894 {
2895         struct s2io_nic *nic = dev->priv;
2896         struct mac_info *mac_control;
2897         struct config_param *config;
2898         struct XENA_dev_config __iomem *bar0 = nic->bar0;
2899         u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2900         int i;
2901
2902         if (pci_channel_offline(nic->pdev))
2903                 return;
2904
2905         disable_irq(dev->irq);
2906
2907         mac_control = &nic->mac_control;
2908         config = &nic->config;
2909
2910         writeq(val64, &bar0->rx_traffic_int);
2911         writeq(val64, &bar0->tx_traffic_int);
2912
2913         /* we need to free up the transmitted skbufs or else netpoll will
2914          * run out of skbs and will fail and eventually netpoll application such
2915          * as netdump will fail.
2916          */
2917         for (i = 0; i < config->tx_fifo_num; i++)
2918                 tx_intr_handler(&mac_control->fifos[i]);
2919
2920         /* check for received packet and indicate up to network */
2921         for (i = 0; i < config->rx_ring_num; i++)
2922                 rx_intr_handler(&mac_control->rings[i]);
2923
2924         for (i = 0; i < config->rx_ring_num; i++) {
2925                 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
2926                         DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2927                         DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2928                         break;
2929                 }
2930         }
2931         enable_irq(dev->irq);
2932         return;
2933 }
2934 #endif
2935
2936 /**
2937  *  rx_intr_handler - Rx interrupt handler
2938  *  @nic: device private variable.
2939  *  Description:
2940  *  If the interrupt is because of a received frame or if the
2941  *  receive ring contains fresh as yet un-processed frames,this function is
2942  *  called. It picks out the RxD at which place the last Rx processing had
2943  *  stopped and sends the skb to the OSM's Rx handler and then increments
2944  *  the offset.
2945  *  Return Value:
2946  *  NONE.
2947  */
2948 static void rx_intr_handler(struct ring_info *ring_data)
2949 {
2950         int get_block, put_block;
2951         struct rx_curr_get_info get_info, put_info;
2952         struct RxD_t *rxdp;
2953         struct sk_buff *skb;
2954         int pkt_cnt = 0;
2955         int i;
2956         struct RxD1* rxdp1;
2957         struct RxD3* rxdp3;
2958
2959         get_info = ring_data->rx_curr_get_info;
2960         get_block = get_info.block_index;
2961         memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2962         put_block = put_info.block_index;
2963         rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2964
2965         while (RXD_IS_UP2DT(rxdp)) {
2966                 /*
2967                  * If your are next to put index then it's
2968                  * FIFO full condition
2969                  */
2970                 if ((get_block == put_block) &&
2971                     (get_info.offset + 1) == put_info.offset) {
2972                         DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2973                                 ring_data->dev->name);
2974                         break;
2975                 }
2976                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2977                 if (skb == NULL) {
2978                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
2979                                   ring_data->dev->name);
2980                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2981                         return;
2982                 }
2983                 if (ring_data->rxd_mode == RXD_MODE_1) {
2984                         rxdp1 = (struct RxD1*)rxdp;
2985                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
2986                                 rxdp1->Buffer0_ptr,
2987                                 ring_data->mtu +
2988                                 HEADER_ETHERNET_II_802_3_SIZE +
2989                                 HEADER_802_2_SIZE +
2990                                 HEADER_SNAP_SIZE,
2991                                 PCI_DMA_FROMDEVICE);
2992                 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
2993                         rxdp3 = (struct RxD3*)rxdp;
2994                         pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
2995                                 rxdp3->Buffer0_ptr,
2996                                 BUF0_LEN, PCI_DMA_FROMDEVICE);
2997                         pci_unmap_single(ring_data->pdev, (dma_addr_t)
2998                                 rxdp3->Buffer2_ptr,
2999                                 ring_data->mtu + 4,
3000                                 PCI_DMA_FROMDEVICE);
3001                 }
3002                 prefetch(skb->data);
3003                 rx_osm_handler(ring_data, rxdp);
3004                 get_info.offset++;
3005                 ring_data->rx_curr_get_info.offset = get_info.offset;
3006                 rxdp = ring_data->rx_blocks[get_block].
3007                                 rxds[get_info.offset].virt_addr;
3008                 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
3009                         get_info.offset = 0;
3010                         ring_data->rx_curr_get_info.offset = get_info.offset;
3011                         get_block++;
3012                         if (get_block == ring_data->block_count)
3013                                 get_block = 0;
3014                         ring_data->rx_curr_get_info.block_index = get_block;
3015                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3016                 }
3017
3018                 if(ring_data->nic->config.napi){
3019                         ring_data->nic->pkts_to_process -= 1;
3020                         if (!ring_data->nic->pkts_to_process)
3021                                 break;
3022                 }
3023                 pkt_cnt++;
3024                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3025                         break;
3026         }
3027         if (ring_data->lro) {
3028                 /* Clear all LRO sessions before exiting */
3029                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3030                         struct lro *lro = &ring_data->lro0_n[i];
3031                         if (lro->in_use) {
3032                                 update_L3L4_header(ring_data->nic, lro);
3033                                 queue_rx_frame(lro->parent, lro->vlan_tag);
3034                                 clear_lro_session(lro);
3035                         }
3036                 }
3037         }
3038 }
3039
3040 /**
3041  *  tx_intr_handler - Transmit interrupt handler
3042  *  @nic : device private variable
3043  *  Description:
3044  *  If an interrupt was raised to indicate DMA complete of the
3045  *  Tx packet, this function is called. It identifies the last TxD
3046  *  whose buffer was freed and frees all skbs whose data have already
3047  *  DMA'ed into the NICs internal memory.
3048  *  Return Value:
3049  *  NONE
3050  */
3051
3052 static void tx_intr_handler(struct fifo_info *fifo_data)
3053 {
3054         struct s2io_nic *nic = fifo_data->nic;
3055         struct tx_curr_get_info get_info, put_info;
3056         struct sk_buff *skb = NULL;
3057         struct TxD *txdlp;
3058         int pkt_cnt = 0;
3059         unsigned long flags = 0;
3060         u8 err_mask;
3061
3062         if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3063                         return;
3064
3065         get_info = fifo_data->tx_curr_get_info;
3066         memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3067         txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3068             list_virt_addr;
3069         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3070                (get_info.offset != put_info.offset) &&
3071                (txdlp->Host_Control)) {
3072                 /* Check for TxD errors */
3073                 if (txdlp->Control_1 & TXD_T_CODE) {
3074                         unsigned long long err;
3075                         err = txdlp->Control_1 & TXD_T_CODE;
3076                         if (err & 0x1) {
3077                                 nic->mac_control.stats_info->sw_stat.
3078                                                 parity_err_cnt++;
3079                         }
3080
3081                         /* update t_code statistics */
3082                         err_mask = err >> 48;
3083                         switch(err_mask) {
3084                                 case 2:
3085                                         nic->mac_control.stats_info->sw_stat.
3086                                                         tx_buf_abort_cnt++;
3087                                 break;
3088
3089                                 case 3:
3090                                         nic->mac_control.stats_info->sw_stat.
3091                                                         tx_desc_abort_cnt++;
3092                                 break;
3093
3094                                 case 7:
3095                                         nic->mac_control.stats_info->sw_stat.
3096                                                         tx_parity_err_cnt++;
3097                                 break;
3098
3099                                 case 10:
3100                                         nic->mac_control.stats_info->sw_stat.
3101                                                         tx_link_loss_cnt++;
3102                                 break;
3103
3104                                 case 15:
3105                                         nic->mac_control.stats_info->sw_stat.
3106                                                         tx_list_proc_err_cnt++;
3107                                 break;
3108                         }
3109                 }
3110
3111                 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3112                 if (skb == NULL) {
3113                         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3114                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
3115                         __FUNCTION__);
3116                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3117                         return;
3118                 }
3119                 pkt_cnt++;
3120
3121                 /* Updating the statistics block */
3122                 nic->stats.tx_bytes += skb->len;
3123                 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3124                 dev_kfree_skb_irq(skb);
3125
3126                 get_info.offset++;
3127                 if (get_info.offset == get_info.fifo_len + 1)
3128                         get_info.offset = 0;
3129                 txdlp = (struct TxD *) fifo_data->list_info
3130                     [get_info.offset].list_virt_addr;
3131                 fifo_data->tx_curr_get_info.offset =
3132                     get_info.offset;
3133         }
3134
3135         s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3136
3137         spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3138 }
3139
3140 /**
3141  *  s2io_mdio_write - Function to write in to MDIO registers
3142  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3143  *  @addr     : address value
3144  *  @value    : data value
3145  *  @dev      : pointer to net_device structure
3146  *  Description:
3147  *  This function is used to write values to the MDIO registers
3148  *  NONE
3149  */
3150 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3151 {
3152         u64 val64 = 0x0;
3153         struct s2io_nic *sp = dev->priv;
3154         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3155
3156         //address transaction
3157         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3158                         | MDIO_MMD_DEV_ADDR(mmd_type)
3159                         | MDIO_MMS_PRT_ADDR(0x0);
3160         writeq(val64, &bar0->mdio_control);
3161         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3162         writeq(val64, &bar0->mdio_control);
3163         udelay(100);
3164
3165         //Data transaction
3166         val64 = 0x0;
3167         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3168                         | MDIO_MMD_DEV_ADDR(mmd_type)
3169                         | MDIO_MMS_PRT_ADDR(0x0)
3170                         | MDIO_MDIO_DATA(value)
3171                         | MDIO_OP(MDIO_OP_WRITE_TRANS);
3172         writeq(val64, &bar0->mdio_control);
3173         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3174         writeq(val64, &bar0->mdio_control);
3175         udelay(100);
3176
3177         val64 = 0x0;
3178         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3179         | MDIO_MMD_DEV_ADDR(mmd_type)
3180         | MDIO_MMS_PRT_ADDR(0x0)
3181         | MDIO_OP(MDIO_OP_READ_TRANS);
3182         writeq(val64, &bar0->mdio_control);
3183         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3184         writeq(val64, &bar0->mdio_control);
3185         udelay(100);
3186
3187 }
3188
3189 /**
3190  *  s2io_mdio_read - Function to write in to MDIO registers
3191  *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3192  *  @addr     : address value
3193  *  @dev      : pointer to net_device structure
3194  *  Description:
3195  *  This function is used to read values to the MDIO registers
3196  *  NONE
3197  */
3198 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3199 {
3200         u64 val64 = 0x0;
3201         u64 rval64 = 0x0;
3202         struct s2io_nic *sp = dev->priv;
3203         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3204
3205         /* address transaction */
3206         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3207                         | MDIO_MMD_DEV_ADDR(mmd_type)
3208                         | MDIO_MMS_PRT_ADDR(0x0);
3209         writeq(val64, &bar0->mdio_control);
3210         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3211         writeq(val64, &bar0->mdio_control);
3212         udelay(100);
3213
3214         /* Data transaction */
3215         val64 = 0x0;
3216         val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3217                         | MDIO_MMD_DEV_ADDR(mmd_type)
3218                         | MDIO_MMS_PRT_ADDR(0x0)
3219                         | MDIO_OP(MDIO_OP_READ_TRANS);
3220         writeq(val64, &bar0->mdio_control);
3221         val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3222         writeq(val64, &bar0->mdio_control);
3223         udelay(100);
3224
3225         /* Read the value from regs */
3226         rval64 = readq(&bar0->mdio_control);
3227         rval64 = rval64 & 0xFFFF0000;
3228         rval64 = rval64 >> 16;
3229         return rval64;
3230 }
3231 /**
3232  *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
3233  *  @counter      : couter value to be updated
3234  *  @flag         : flag to indicate the status
3235  *  @type         : counter type
3236  *  Description:
3237  *  This function is to check the status of the xpak counters value
3238  *  NONE
3239  */
3240
3241 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3242 {
3243         u64 mask = 0x3;
3244         u64 val64;
3245         int i;
3246         for(i = 0; i <index; i++)
3247                 mask = mask << 0x2;
3248
3249         if(flag > 0)
3250         {
3251                 *counter = *counter + 1;
3252                 val64 = *regs_stat & mask;
3253                 val64 = val64 >> (index * 0x2);
3254                 val64 = val64 + 1;
3255                 if(val64 == 3)
3256                 {
3257                         switch(type)
3258                         {
3259                         case 1:
3260                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3261                                           "service. Excessive temperatures may "
3262                                           "result in premature transceiver "
3263                                           "failure \n");
3264                         break;
3265                         case 2:
3266                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3267                                           "service Excessive bias currents may "
3268                                           "indicate imminent laser diode "
3269                                           "failure \n");
3270                         break;
3271                         case 3:
3272                                 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3273                                           "service Excessive laser output "
3274                                           "power may saturate far-end "
3275                                           "receiver\n");
3276                         break;
3277                         default:
3278                                 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3279                                           "type \n");
3280                         }
3281                         val64 = 0x0;
3282                 }
3283                 val64 = val64 << (index * 0x2);
3284                 *regs_stat = (*regs_stat & (~mask)) | (val64);
3285
3286         } else {
3287                 *regs_stat = *regs_stat & (~mask);
3288         }
3289 }
3290
3291 /**
3292  *  s2io_updt_xpak_counter - Function to update the xpak counters
3293  *  @dev         : pointer to net_device struct
3294  *  Description:
3295  *  This function is to upate the status of the xpak counters value
3296  *  NONE
3297  */
3298 static void s2io_updt_xpak_counter(struct net_device *dev)
3299 {
3300         u16 flag  = 0x0;
3301         u16 type  = 0x0;
3302         u16 val16 = 0x0;
3303         u64 val64 = 0x0;
3304         u64 addr  = 0x0;
3305
3306         struct s2io_nic *sp = dev->priv;
3307         struct stat_block *stat_info = sp->mac_control.stats_info;
3308
3309         /* Check the communication with the MDIO slave */
3310         addr = 0x0000;
3311         val64 = 0x0;
3312         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3313         if((val64 == 0xFFFF) || (val64 == 0x0000))
3314         {
3315                 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3316                           "Returned %llx\n", (unsigned long long)val64);
3317                 return;
3318         }
3319
3320         /* Check for the expecte value of 2040 at PMA address 0x0000 */
3321         if(val64 != 0x2040)
3322         {
3323                 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3324                 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3325                           (unsigned long long)val64);
3326                 return;
3327         }
3328
3329         /* Loading the DOM register to MDIO register */
3330         addr = 0xA100;
3331         s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3332         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3333
3334         /* Reading the Alarm flags */
3335         addr = 0xA070;
3336         val64 = 0x0;
3337         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3338
3339         flag = CHECKBIT(val64, 0x7);
3340         type = 1;
3341         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3342                                 &stat_info->xpak_stat.xpak_regs_stat,
3343                                 0x0, flag, type);
3344
3345         if(CHECKBIT(val64, 0x6))
3346                 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3347
3348         flag = CHECKBIT(val64, 0x3);
3349         type = 2;
3350         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3351                                 &stat_info->xpak_stat.xpak_regs_stat,
3352                                 0x2, flag, type);
3353
3354         if(CHECKBIT(val64, 0x2))
3355                 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3356
3357         flag = CHECKBIT(val64, 0x1);
3358         type = 3;
3359         s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3360                                 &stat_info->xpak_stat.xpak_regs_stat,
3361                                 0x4, flag, type);
3362
3363         if(CHECKBIT(val64, 0x0))
3364                 stat_info->xpak_stat.alarm_laser_output_power_low++;
3365
3366         /* Reading the Warning flags */
3367         addr = 0xA074;
3368         val64 = 0x0;
3369         val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3370
3371         if(CHECKBIT(val64, 0x7))
3372                 stat_info->xpak_stat.warn_transceiver_temp_high++;
3373
3374         if(CHECKBIT(val64, 0x6))
3375                 stat_info->xpak_stat.warn_transceiver_temp_low++;
3376
3377         if(CHECKBIT(val64, 0x3))
3378                 stat_info->xpak_stat.warn_laser_bias_current_high++;
3379
3380         if(CHECKBIT(val64, 0x2))
3381                 stat_info->xpak_stat.warn_laser_bias_current_low++;
3382
3383         if(CHECKBIT(val64, 0x1))
3384                 stat_info->xpak_stat.warn_laser_output_power_high++;
3385
3386         if(CHECKBIT(val64, 0x0))
3387                 stat_info->xpak_stat.warn_laser_output_power_low++;
3388 }
3389
3390 /**
3391  *  wait_for_cmd_complete - waits for a command to complete.
3392  *  @sp : private member of the device structure, which is a pointer to the
3393  *  s2io_nic structure.
3394  *  Description: Function that waits for a command to Write into RMAC
3395  *  ADDR DATA registers to be completed and returns either success or
3396  *  error depending on whether the command was complete or not.
3397  *  Return value:
3398  *   SUCCESS on success and FAILURE on failure.
3399  */
3400
3401 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3402                                 int bit_state)
3403 {
3404         int ret = FAILURE, cnt = 0, delay = 1;
3405         u64 val64;
3406
3407         if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3408                 return FAILURE;
3409
3410         do {
3411                 val64 = readq(addr);
3412                 if (bit_state == S2IO_BIT_RESET) {
3413                         if (!(val64 & busy_bit)) {
3414                                 ret = SUCCESS;
3415                                 break;
3416                         }
3417                 } else {
3418                         if (!(val64 & busy_bit)) {
3419                                 ret = SUCCESS;
3420                                 break;
3421                         }
3422                 }
3423
3424                 if(in_interrupt())
3425                         mdelay(delay);
3426                 else
3427                         msleep(delay);
3428
3429                 if (++cnt >= 10)
3430                         delay = 50;
3431         } while (cnt < 20);
3432         return ret;
3433 }
3434 /*
3435  * check_pci_device_id - Checks if the device id is supported
3436  * @id : device id
3437  * Description: Function to check if the pci device id is supported by driver.
3438  * Return value: Actual device id if supported else PCI_ANY_ID
3439  */
3440 static u16 check_pci_device_id(u16 id)
3441 {
3442         switch (id) {
3443         case PCI_DEVICE_ID_HERC_WIN:
3444         case PCI_DEVICE_ID_HERC_UNI:
3445                 return XFRAME_II_DEVICE;
3446         case PCI_DEVICE_ID_S2IO_UNI:
3447         case PCI_DEVICE_ID_S2IO_WIN:
3448                 return XFRAME_I_DEVICE;
3449         default:
3450                 return PCI_ANY_ID;
3451         }
3452 }
3453
3454 /**
3455  *  s2io_reset - Resets the card.
3456  *  @sp : private member of the device structure.
3457  *  Description: Function to Reset the card. This function then also
3458  *  restores the previously saved PCI configuration space registers as
3459  *  the card reset also resets the configuration space.
3460  *  Return value:
3461  *  void.
3462  */
3463
3464 static void s2io_reset(struct s2io_nic * sp)
3465 {
3466         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3467         u64 val64;
3468         u16 subid, pci_cmd;
3469         int i;
3470         u16 val16;
3471         unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3472         unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3473
3474         DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3475                         __FUNCTION__, sp->dev->name);
3476
3477         /* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3478         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3479
3480         val64 = SW_RESET_ALL;
3481         writeq(val64, &bar0->sw_reset);
3482         if (strstr(sp->product_name, "CX4")) {
3483                 msleep(750);
3484         }
3485         msleep(250);
3486         for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3487
3488                 /* Restore the PCI state saved during initialization. */
3489                 pci_restore_state(sp->pdev);
3490                 pci_read_config_word(sp->pdev, 0x2, &val16);
3491                 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3492                         break;
3493                 msleep(200);
3494         }
3495
3496         if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3497                 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3498         }
3499
3500         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3501
3502         s2io_init_pci(sp);
3503
3504         /* Set swapper to enable I/O register access */
3505         s2io_set_swapper(sp);
3506
3507         /* restore mac_addr entries */
3508         do_s2io_restore_unicast_mc(sp);
3509
3510         /* Restore the MSIX table entries from local variables */
3511         restore_xmsi_data(sp);
3512
3513         /* Clear certain PCI/PCI-X fields after reset */
3514         if (sp->device_type == XFRAME_II_DEVICE) {
3515                 /* Clear "detected parity error" bit */
3516                 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3517
3518                 /* Clearing PCIX Ecc status register */
3519                 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3520
3521                 /* Clearing PCI_STATUS error reflected here */
3522                 writeq(s2BIT(62), &bar0->txpic_int_reg);
3523         }
3524
3525         /* Reset device statistics maintained by OS */
3526         memset(&sp->stats, 0, sizeof (struct net_device_stats));
3527
3528         up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3529         down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3530         up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3531         down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3532         reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3533         mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3534         mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3535         watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3536         /* save link up/down time/cnt, reset/memory/watchdog cnt */
3537         memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3538         /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3539         sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3540         sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3541         sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3542         sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3543         sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3544         sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3545         sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3546         sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3547
3548         /* SXE-002: Configure link and activity LED to turn it off */
3549         subid = sp->pdev->subsystem_device;
3550         if (((subid & 0xFF) >= 0x07) &&
3551             (sp->device_type == XFRAME_I_DEVICE)) {
3552                 val64 = readq(&bar0->gpio_control);
3553                 val64 |= 0x0000800000000000ULL;
3554                 writeq(val64, &bar0->gpio_control);
3555                 val64 = 0x0411040400000000ULL;
3556                 writeq(val64, (void __iomem *)bar0 + 0x2700);
3557         }
3558
3559         /*
3560          * Clear spurious ECC interrupts that would have occured on
3561          * XFRAME II cards after reset.
3562          */
3563         if (sp->device_type == XFRAME_II_DEVICE) {
3564                 val64 = readq(&bar0->pcc_err_reg);
3565                 writeq(val64, &bar0->pcc_err_reg);
3566         }
3567
3568         sp->device_enabled_once = FALSE;
3569 }
3570
3571 /**
3572  *  s2io_set_swapper - to set the swapper controle on the card
3573  *  @sp : private member of the device structure,
3574  *  pointer to the s2io_nic structure.
3575  *  Description: Function to set the swapper control on the card
3576  *  correctly depending on the 'endianness' of the system.
3577  *  Return value:
3578  *  SUCCESS on success and FAILURE on failure.
3579  */
3580
3581 static int s2io_set_swapper(struct s2io_nic * sp)
3582 {
3583         struct net_device *dev = sp->dev;
3584         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3585         u64 val64, valt, valr;
3586
3587         /*
3588          * Set proper endian settings and verify the same by reading
3589          * the PIF Feed-back register.
3590          */
3591
3592         val64 = readq(&bar0->pif_rd_swapper_fb);
3593         if (val64 != 0x0123456789ABCDEFULL) {
3594                 int i = 0;
3595                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
3596                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
3597                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
3598                                 0};                     /* FE=0, SE=0 */
3599
3600                 while(i<4) {
3601                         writeq(value[i], &bar0->swapper_ctrl);
3602                         val64 = readq(&bar0->pif_rd_swapper_fb);
3603                         if (val64 == 0x0123456789ABCDEFULL)
3604                                 break;
3605                         i++;
3606                 }
3607                 if (i == 4) {
3608                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3609                                 dev->name);
3610                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3611                                 (unsigned long long) val64);
3612                         return FAILURE;
3613                 }
3614                 valr = value[i];
3615         } else {
3616                 valr = readq(&bar0->swapper_ctrl);
3617         }
3618
3619         valt = 0x0123456789ABCDEFULL;
3620         writeq(valt, &bar0->xmsi_address);
3621         val64 = readq(&bar0->xmsi_address);
3622
3623         if(val64 != valt) {
3624                 int i = 0;
3625                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
3626                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
3627                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
3628                                 0};                     /* FE=0, SE=0 */
3629
3630                 while(i<4) {
3631                         writeq((value[i] | valr), &bar0->swapper_ctrl);
3632                         writeq(valt, &bar0->xmsi_address);
3633                         val64 = readq(&bar0->xmsi_address);
3634                         if(val64 == valt)
3635                                 break;
3636                         i++;
3637                 }
3638                 if(i == 4) {
3639                         unsigned long long x = val64;
3640                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3641                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3642                         return FAILURE;
3643                 }
3644         }
3645         val64 = readq(&bar0->swapper_ctrl);
3646         val64 &= 0xFFFF000000000000ULL;
3647
3648 #ifdef  __BIG_ENDIAN
3649         /*
3650          * The device by default set to a big endian format, so a
3651          * big endian driver need not set anything.
3652          */
3653         val64 |= (SWAPPER_CTRL_TXP_FE |
3654                  SWAPPER_CTRL_TXP_SE |
3655                  SWAPPER_CTRL_TXD_R_FE |
3656                  SWAPPER_CTRL_TXD_W_FE |
3657                  SWAPPER_CTRL_TXF_R_FE |
3658                  SWAPPER_CTRL_RXD_R_FE |
3659                  SWAPPER_CTRL_RXD_W_FE |
3660                  SWAPPER_CTRL_RXF_W_FE |
3661                  SWAPPER_CTRL_XMSI_FE |
3662                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3663         if (sp->config.intr_type == INTA)
3664                 val64 |= SWAPPER_CTRL_XMSI_SE;
3665         writeq(val64, &bar0->swapper_ctrl);
3666 #else
3667         /*
3668          * Initially we enable all bits to make it accessible by the
3669          * driver, then we selectively enable only those bits that
3670          * we want to set.
3671          */
3672         val64 |= (SWAPPER_CTRL_TXP_FE |
3673                  SWAPPER_CTRL_TXP_SE |
3674                  SWAPPER_CTRL_TXD_R_FE |
3675                  SWAPPER_CTRL_TXD_R_SE |
3676                  SWAPPER_CTRL_TXD_W_FE |
3677                  SWAPPER_CTRL_TXD_W_SE |
3678                  SWAPPER_CTRL_TXF_R_FE |
3679                  SWAPPER_CTRL_RXD_R_FE |
3680                  SWAPPER_CTRL_RXD_R_SE |
3681                  SWAPPER_CTRL_RXD_W_FE |
3682                  SWAPPER_CTRL_RXD_W_SE |
3683                  SWAPPER_CTRL_RXF_W_FE |
3684                  SWAPPER_CTRL_XMSI_FE |
3685                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3686         if (sp->config.intr_type == INTA)
3687                 val64 |= SWAPPER_CTRL_XMSI_SE;
3688         writeq(val64, &bar0->swapper_ctrl);
3689 #endif
3690         val64 = readq(&bar0->swapper_ctrl);
3691
3692         /*
3693          * Verifying if endian settings are accurate by reading a
3694          * feedback register.
3695          */
3696         val64 = readq(&bar0->pif_rd_swapper_fb);
3697         if (val64 != 0x0123456789ABCDEFULL) {
3698                 /* Endian settings are incorrect, calls for another dekko. */
3699                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3700                           dev->name);
3701                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3702                           (unsigned long long) val64);
3703                 return FAILURE;
3704         }
3705
3706         return SUCCESS;
3707 }
3708
3709 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3710 {
3711         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3712         u64 val64;
3713         int ret = 0, cnt = 0;
3714
3715         do {
3716                 val64 = readq(&bar0->xmsi_access);
3717                 if (!(val64 & s2BIT(15)))
3718                         break;
3719                 mdelay(1);
3720                 cnt++;
3721         } while(cnt < 5);
3722         if (cnt == 5) {
3723                 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3724                 ret = 1;
3725         }
3726
3727         return ret;
3728 }
3729
3730 static void restore_xmsi_data(struct s2io_nic *nic)
3731 {
3732         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3733         u64 val64;
3734         int i;
3735
3736         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3737                 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3738                 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3739                 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3740                 writeq(val64, &bar0->xmsi_access);
3741                 if (wait_for_msix_trans(nic, i)) {
3742                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3743                         continue;
3744                 }
3745         }
3746 }
3747
3748 static void store_xmsi_data(struct s2io_nic *nic)
3749 {
3750         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3751         u64 val64, addr, data;
3752         int i;
3753
3754         /* Store and display */
3755         for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3756                 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3757                 writeq(val64, &bar0->xmsi_access);
3758                 if (wait_for_msix_trans(nic, i)) {
3759                         DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3760                         continue;
3761                 }
3762                 addr = readq(&bar0->xmsi_address);
3763                 data = readq(&bar0->xmsi_data);
3764                 if (addr && data) {
3765                         nic->msix_info[i].addr = addr;
3766                         nic->msix_info[i].data = data;
3767                 }
3768         }
3769 }
3770
3771 static int s2io_enable_msi_x(struct s2io_nic *nic)
3772 {
3773         struct XENA_dev_config __iomem *bar0 = nic->bar0;
3774         u64 tx_mat, rx_mat;
3775         u16 msi_control; /* Temp variable */
3776         int ret, i, j, msix_indx = 1;
3777
3778         nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3779                                GFP_KERNEL);
3780         if (!nic->entries) {
3781                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3782                         __FUNCTION__);
3783                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3784                 return -ENOMEM;
3785         }
3786         nic->mac_control.stats_info->sw_stat.mem_allocated
3787                 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3788
3789         nic->s2io_entries =
3790                 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3791                                    GFP_KERNEL);
3792         if (!nic->s2io_entries) {
3793                 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3794                         __FUNCTION__);
3795                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3796                 kfree(nic->entries);
3797                 nic->mac_control.stats_info->sw_stat.mem_freed
3798                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3799                 return -ENOMEM;
3800         }
3801          nic->mac_control.stats_info->sw_stat.mem_allocated
3802                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3803
3804         for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3805                 nic->entries[i].entry = i;
3806                 nic->s2io_entries[i].entry = i;
3807                 nic->s2io_entries[i].arg = NULL;
3808                 nic->s2io_entries[i].in_use = 0;
3809         }
3810
3811         tx_mat = readq(&bar0->tx_mat0_n[0]);
3812         for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3813                 tx_mat |= TX_MAT_SET(i, msix_indx);
3814                 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3815                 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3816                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3817         }
3818         writeq(tx_mat, &bar0->tx_mat0_n[0]);
3819
3820         rx_mat = readq(&bar0->rx_mat);
3821         for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3822                 rx_mat |= RX_MAT_SET(j, msix_indx);
3823                 nic->s2io_entries[msix_indx].arg
3824                         = &nic->mac_control.rings[j];
3825                 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3826                 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3827         }
3828         writeq(rx_mat, &bar0->rx_mat);
3829
3830         nic->avail_msix_vectors = 0;
3831         ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3832         /* We fail init if error or we get less vectors than min required */
3833         if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3834                 nic->avail_msix_vectors = ret;
3835                 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3836         }
3837         if (ret) {
3838                 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3839                 kfree(nic->entries);
3840                 nic->mac_control.stats_info->sw_stat.mem_freed
3841                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3842                 kfree(nic->s2io_entries);
3843                 nic->mac_control.stats_info->sw_stat.mem_freed
3844                 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3845                 nic->entries = NULL;
3846                 nic->s2io_entries = NULL;
3847                 nic->avail_msix_vectors = 0;
3848                 return -ENOMEM;
3849         }
3850         if (!nic->avail_msix_vectors)
3851                 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3852
3853         /*
3854          * To enable MSI-X, MSI also needs to be enabled, due to a bug
3855          * in the herc NIC. (Temp change, needs to be removed later)
3856          */
3857         pci_read_config_word(nic->pdev, 0x42, &msi_control);
3858         msi_control |= 0x1; /* Enable MSI */
3859         pci_write_config_word(nic->pdev, 0x42, msi_control);
3860
3861         return 0;
3862 }
3863
3864 /* Handle software interrupt used during MSI(X) test */
3865 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3866 {
3867         struct s2io_nic *sp = dev_id;
3868
3869         sp->msi_detected = 1;
3870         wake_up(&sp->msi_wait);
3871
3872         return IRQ_HANDLED;
3873 }
3874
3875 /* Test interrupt path by forcing a a software IRQ */
3876 static int s2io_test_msi(struct s2io_nic *sp)
3877 {
3878         struct pci_dev *pdev = sp->pdev;
3879         struct XENA_dev_config __iomem *bar0 = sp->bar0;
3880         int err;
3881         u64 val64, saved64;
3882
3883         err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3884                         sp->name, sp);
3885         if (err) {
3886                 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3887                        sp->dev->name, pci_name(pdev), pdev->irq);
3888                 return err;
3889         }
3890
3891         init_waitqueue_head (&sp->msi_wait);
3892         sp->msi_detected = 0;
3893
3894         saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3895         val64 |= SCHED_INT_CTRL_ONE_SHOT;
3896         val64 |= SCHED_INT_CTRL_TIMER_EN;
3897         val64 |= SCHED_INT_CTRL_INT2MSI(1);
3898         writeq(val64, &bar0->scheduled_int_ctrl);
3899
3900         wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3901
3902         if (!sp->msi_detected) {
3903                 /* MSI(X) test failed, go back to INTx mode */
3904                 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3905                         "using MSI(X) during test\n", sp->dev->name,
3906                         pci_name(pdev));
3907
3908                 err = -EOPNOTSUPP;
3909         }
3910
3911         free_irq(sp->entries[1].vector, sp);
3912
3913         writeq(saved64, &bar0->scheduled_int_ctrl);
3914
3915         return err;
3916 }
3917
3918 static void remove_msix_isr(struct s2io_nic *sp)
3919 {
3920         int i;
3921         u16 msi_control;
3922
3923         for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3924                 if (sp->s2io_entries[i].in_use ==
3925                         MSIX_REGISTERED_SUCCESS) {
3926                         int vector = sp->entries[i].vector;
3927                         void *arg = sp->s2io_entries[i].arg;
3928                         free_irq(vector, arg);
3929                 }
3930         }
3931
3932         kfree(sp->entries);
3933         kfree(sp->s2io_entries);
3934         sp->entries = NULL;
3935         sp->s2io_entries = NULL;
3936
3937         pci_read_config_word(sp->pdev, 0x42, &msi_control);
3938         msi_control &= 0xFFFE; /* Disable MSI */
3939         pci_write_config_word(sp->pdev, 0x42, msi_control);
3940
3941         pci_disable_msix(sp->pdev);
3942 }
3943
3944 static void remove_inta_isr(struct s2io_nic *sp)
3945 {
3946         struct net_device *dev = sp->dev;
3947
3948         free_irq(sp->pdev->irq, dev);
3949 }
3950
3951 /* ********************************************************* *
3952  * Functions defined below concern the OS part of the driver *
3953  * ********************************************************* */
3954
3955 /**
3956  *  s2io_open - open entry point of the driver
3957  *  @dev : pointer to the device structure.
3958  *  Description:
3959  *  This function is the open entry point of the driver. It mainly calls a
3960  *  function to allocate Rx buffers and inserts them into the buffer
3961  *  descriptors and then enables the Rx part of the NIC.
3962  *  Return value:
3963  *  0 on success and an appropriate (-)ve integer as defined in errno.h
3964  *   file on failure.
3965  */
3966
3967 static int s2io_open(struct net_device *dev)
3968 {
3969         struct s2io_nic *sp = dev->priv;
3970         int err = 0;
3971
3972         /*
3973          * Make sure you have link off by default every time
3974          * Nic is initialized
3975          */
3976         netif_carrier_off(dev);
3977         sp->last_link_state = 0;
3978
3979         if (sp->config.intr_type == MSI_X) {
3980                 int ret = s2io_enable_msi_x(sp);
3981
3982                 if (!ret) {
3983                         ret = s2io_test_msi(sp);
3984                         /* rollback MSI-X, will re-enable during add_isr() */
3985                         remove_msix_isr(sp);
3986                 }
3987                 if (ret) {
3988
3989                         DBG_PRINT(ERR_DBG,
3990                           "%s: MSI-X requested but failed to enable\n",
3991                           dev->name);
3992                         sp->config.intr_type = INTA;
3993                 }
3994         }
3995
3996         /* NAPI doesn't work well with MSI(X) */
3997          if (sp->config.intr_type != INTA) {
3998                 if(sp->config.napi)
3999                         sp->config.napi = 0;
4000         }
4001
4002         /* Initialize H/W and enable interrupts */
4003         err = s2io_card_up(sp);
4004         if (err) {
4005                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4006                           dev->name);
4007                 goto hw_init_failed;
4008         }
4009
4010         if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4011                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4012                 s2io_card_down(sp);
4013                 err = -ENODEV;
4014                 goto hw_init_failed;
4015         }
4016         s2io_start_all_tx_queue(sp);
4017         return 0;
4018
4019 hw_init_failed:
4020         if (sp->config.intr_type == MSI_X) {
4021                 if (sp->entries) {
4022                         kfree(sp->entries);
4023                         sp->mac_control.stats_info->sw_stat.mem_freed
4024                         += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
4025                 }
4026                 if (sp->s2io_entries) {
4027                         kfree(sp->s2io_entries);
4028                         sp->mac_control.stats_info->sw_stat.mem_freed
4029                         += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
4030                 }
4031         }
4032         return err;
4033 }
4034
4035 /**
4036  *  s2io_close -close entry point of the driver
4037  *  @dev : device pointer.
4038  *  Description:
4039  *  This is the stop entry point of the driver. It needs to undo exactly
4040  *  whatever was done by the open entry point,thus it's usually referred to
4041  *  as the close function.Among other things this function mainly stops the
4042  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4043  *  Return value:
4044  *  0 on success and an appropriate (-)ve integer as defined in errno.h
4045  *  file on failure.
4046  */
4047
4048 static int s2io_close(struct net_device *dev)
4049 {
4050         struct s2io_nic *sp = dev->priv;
4051         struct config_param *config = &sp->config;
4052         u64 tmp64;
4053         int offset;
4054
4055         /* Return if the device is already closed               *
4056         *  Can happen when s2io_card_up failed in change_mtu    *
4057         */
4058         if (!is_s2io_card_up(sp))
4059                 return 0;
4060
4061         s2io_stop_all_tx_queue(sp);
4062         /* delete all populated mac entries */
4063         for (offset = 1; offset < config->max_mc_addr; offset++) {
4064                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4065                 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4066                         do_s2io_delete_unicast_mc(sp, tmp64);
4067         }
4068
4069         s2io_card_down(sp);
4070
4071         return 0;
4072 }
4073
4074 /**
4075  *  s2io_xmit - Tx entry point of te driver
4076  *  @skb : the socket buffer containing the Tx data.
4077  *  @dev : device pointer.
4078  *  Description :
4079  *  This function is the Tx entry point of the driver. S2IO NIC supports
4080  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
4081  *  NOTE: when device cant queue the pkt,just the trans_start variable will
4082  *  not be upadted.
4083  *  Return value:
4084  *  0 on success & 1 on failure.
4085  */
4086
4087 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4088 {
4089         struct s2io_nic *sp = dev->priv;
4090         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4091         register u64 val64;
4092         struct TxD *txdp;
4093         struct TxFIFO_element __iomem *tx_fifo;
4094         unsigned long flags = 0;
4095         u16 vlan_tag = 0;
4096         struct fifo_info *fifo = NULL;
4097         struct mac_info *mac_control;
4098         struct config_param *config;
4099         int do_spin_lock = 1;
4100         int offload_type;
4101         int enable_per_list_interrupt = 0;
4102         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4103
4104         mac_control = &sp->mac_control;
4105         config = &sp->config;
4106
4107         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4108
4109         if (unlikely(skb->len <= 0)) {
4110                 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4111                 dev_kfree_skb_any(skb);
4112                 return 0;
4113         }
4114
4115         if (!is_s2io_card_up(sp)) {
4116                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4117                           dev->name);
4118                 dev_kfree_skb(skb);
4119                 return 0;
4120         }
4121
4122         queue = 0;
4123         if (sp->vlgrp && vlan_tx_tag_present(skb))
4124                 vlan_tag = vlan_tx_tag_get(skb);
4125         if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4126                 if (skb->protocol == htons(ETH_P_IP)) {
4127                         struct iphdr *ip;
4128                         struct tcphdr *th;
4129                         ip = ip_hdr(skb);
4130
4131                         if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4132                                 th = (struct tcphdr *)(((unsigned char *)ip) +
4133                                                 ip->ihl*4);
4134
4135                                 if (ip->protocol == IPPROTO_TCP) {
4136                                         queue_len = sp->total_tcp_fifos;
4137                                         queue = (ntohs(th->source) +
4138                                                         ntohs(th->dest)) &
4139                                             sp->fifo_selector[queue_len - 1];
4140                                         if (queue >= queue_len)
4141                                                 queue = queue_len - 1;
4142                                 } else if (ip->protocol == IPPROTO_UDP) {
4143                                         queue_len = sp->total_udp_fifos;
4144                                         queue = (ntohs(th->source) +
4145                                                         ntohs(th->dest)) &
4146                                             sp->fifo_selector[queue_len - 1];
4147                                         if (queue >= queue_len)
4148                                                 queue = queue_len - 1;
4149                                         queue += sp->udp_fifo_idx;
4150                                         if (skb->len > 1024)
4151                                                 enable_per_list_interrupt = 1;
4152                                         do_spin_lock = 0;
4153                                 }
4154                         }
4155                 }
4156         } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4157                 /* get fifo number based on skb->priority value */
4158                 queue = config->fifo_mapping
4159                                         [skb->priority & (MAX_TX_FIFOS - 1)];
4160         fifo = &mac_control->fifos[queue];
4161
4162         if (do_spin_lock)
4163                 spin_lock_irqsave(&fifo->tx_lock, flags);
4164         else {
4165                 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4166                         return NETDEV_TX_LOCKED;
4167         }
4168
4169 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
4170         if (sp->config.multiq) {
4171                 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4172                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4173                         return NETDEV_TX_BUSY;
4174                 }
4175         } else
4176 #endif
4177         if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4178                 if (netif_queue_stopped(dev)) {
4179                         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180                         return NETDEV_TX_BUSY;
4181                 }
4182         }
4183
4184         put_off = (u16) fifo->tx_curr_put_info.offset;
4185         get_off = (u16) fifo->tx_curr_get_info.offset;
4186         txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4187
4188         queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4189         /* Avoid "put" pointer going beyond "get" pointer */
4190         if (txdp->Host_Control ||
4191                    ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4192                 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4193                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4194                 dev_kfree_skb(skb);
4195                 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4196                 return 0;
4197         }
4198
4199         offload_type = s2io_offload_type(skb);
4200         if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4201                 txdp->Control_1 |= TXD_TCP_LSO_EN;
4202                 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4203         }
4204         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4205                 txdp->Control_2 |=
4206                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4207                      TXD_TX_CKO_UDP_EN);
4208         }
4209         txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4210         txdp->Control_1 |= TXD_LIST_OWN_XENA;
4211         txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4212         if (enable_per_list_interrupt)
4213                 if (put_off & (queue_len >> 5))
4214                         txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4215         if (vlan_tag) {
4216                 txdp->Control_2 |= TXD_VLAN_ENABLE;
4217                 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4218         }
4219
4220         frg_len = skb->len - skb->data_len;
4221         if (offload_type == SKB_GSO_UDP) {
4222                 int ufo_size;
4223
4224                 ufo_size = s2io_udp_mss(skb);
4225                 ufo_size &= ~7;
4226                 txdp->Control_1 |= TXD_UFO_EN;
4227                 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4228                 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4229 #ifdef __BIG_ENDIAN
4230                 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4231                 fifo->ufo_in_band_v[put_off] =
4232                                 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4233 #else
4234                 fifo->ufo_in_band_v[put_off] =
4235                                 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4236 #endif
4237                 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4238                 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4239                                         fifo->ufo_in_band_v,
4240                                         sizeof(u64), PCI_DMA_TODEVICE);
4241                 if((txdp->Buffer_Pointer == 0) ||
4242                         (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4243                         goto pci_map_failed;
4244                 txdp++;
4245         }
4246
4247         txdp->Buffer_Pointer = pci_map_single
4248             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4249         if((txdp->Buffer_Pointer == 0) ||
4250                 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4251                 goto pci_map_failed;
4252
4253         txdp->Host_Control = (unsigned long) skb;
4254         txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4255         if (offload_type == SKB_GSO_UDP)
4256                 txdp->Control_1 |= TXD_UFO_EN;
4257
4258         frg_cnt = skb_shinfo(skb)->nr_frags;
4259         /* For fragmented SKB. */
4260         for (i = 0; i < frg_cnt; i++) {
4261                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4262                 /* A '0' length fragment will be ignored */
4263                 if (!frag->size)
4264                         continue;
4265                 txdp++;
4266                 txdp->Buffer_Pointer = (u64) pci_map_page
4267                     (sp->pdev, frag->page, frag->page_offset,
4268                      frag->size, PCI_DMA_TODEVICE);
4269                 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4270                 if (offload_type == SKB_GSO_UDP)
4271                         txdp->Control_1 |= TXD_UFO_EN;
4272         }
4273         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4274
4275         if (offload_type == SKB_GSO_UDP)
4276                 frg_cnt++; /* as Txd0 was used for inband header */
4277
4278         tx_fifo = mac_control->tx_FIFO_start[queue];
4279         val64 = fifo->list_info[put_off].list_phy_addr;
4280         writeq(val64, &tx_fifo->TxDL_Pointer);
4281
4282         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4283                  TX_FIFO_LAST_LIST);
4284         if (offload_type)
4285                 val64 |= TX_FIFO_SPECIAL_FUNC;
4286
4287         writeq(val64, &tx_fifo->List_Control);
4288
4289         mmiowb();
4290
4291         put_off++;
4292         if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4293                 put_off = 0;
4294         fifo->tx_curr_put_info.offset = put_off;
4295
4296         /* Avoid "put" pointer going beyond "get" pointer */
4297         if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4298                 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4299                 DBG_PRINT(TX_DBG,
4300                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4301                           put_off, get_off);
4302                 s2io_stop_tx_queue(sp, fifo->fifo_no);
4303         }
4304         mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4305         dev->trans_start = jiffies;
4306         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4307
4308         if (sp->config.intr_type == MSI_X)
4309                 tx_intr_handler(fifo);
4310
4311         return 0;
4312 pci_map_failed:
4313         stats->pci_map_fail_cnt++;
4314         s2io_stop_tx_queue(sp, fifo->fifo_no);
4315         stats->mem_freed += skb->truesize;
4316         dev_kfree_skb(skb);
4317         spin_unlock_irqrestore(&fifo->tx_lock, flags);
4318         return 0;
4319 }
4320
4321 static void
4322 s2io_alarm_handle(unsigned long data)
4323 {
4324         struct s2io_nic *sp = (struct s2io_nic *)data;
4325         struct net_device *dev = sp->dev;
4326
4327         s2io_handle_errors(dev);
4328         mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4329 }
4330
4331 static int s2io_chk_rx_buffers(struct ring_info *ring)
4332 {
4333         if (fill_rx_buffers(ring) == -ENOMEM) {
4334                 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
4335                 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4336         }
4337         return 0;
4338 }
4339
4340 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4341 {
4342         struct ring_info *ring = (struct ring_info *)dev_id;
4343         struct s2io_nic *sp = ring->nic;
4344
4345         if (!is_s2io_card_up(sp))
4346                 return IRQ_HANDLED;
4347
4348         rx_intr_handler(ring);
4349         s2io_chk_rx_buffers(ring);
4350
4351         return IRQ_HANDLED;
4352 }
4353
4354 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4355 {
4356         struct fifo_info *fifo = (struct fifo_info *)dev_id;
4357         struct s2io_nic *sp = fifo->nic;
4358
4359         if (!is_s2io_card_up(sp))
4360                 return IRQ_HANDLED;
4361
4362         tx_intr_handler(fifo);
4363         return IRQ_HANDLED;
4364 }
4365 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4366 {
4367         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4368         u64 val64;
4369
4370         val64 = readq(&bar0->pic_int_status);
4371         if (val64 & PIC_INT_GPIO) {
4372                 val64 = readq(&bar0->gpio_int_reg);
4373                 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4374                     (val64 & GPIO_INT_REG_LINK_UP)) {
4375                         /*
4376                          * This is unstable state so clear both up/down
4377                          * interrupt and adapter to re-evaluate the link state.
4378                          */
4379                         val64 |=  GPIO_INT_REG_LINK_DOWN;
4380                         val64 |= GPIO_INT_REG_LINK_UP;
4381                         writeq(val64, &bar0->gpio_int_reg);
4382                         val64 = readq(&bar0->gpio_int_mask);
4383                         val64 &= ~(GPIO_INT_MASK_LINK_UP |
4384                                    GPIO_INT_MASK_LINK_DOWN);
4385                         writeq(val64, &bar0->gpio_int_mask);
4386                 }
4387                 else if (val64 & GPIO_INT_REG_LINK_UP) {
4388                         val64 = readq(&bar0->adapter_status);
4389                                 /* Enable Adapter */
4390                         val64 = readq(&bar0->adapter_control);
4391                         val64 |= ADAPTER_CNTL_EN;
4392                         writeq(val64, &bar0->adapter_control);
4393                         val64 |= ADAPTER_LED_ON;
4394                         writeq(val64, &bar0->adapter_control);
4395                         if (!sp->device_enabled_once)
4396                                 sp->device_enabled_once = 1;
4397
4398                         s2io_link(sp, LINK_UP);
4399                         /*
4400                          * unmask link down interrupt and mask link-up
4401                          * intr
4402                          */
4403                         val64 = readq(&bar0->gpio_int_mask);
4404                         val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4405                         val64 |= GPIO_INT_MASK_LINK_UP;
4406                         writeq(val64, &bar0->gpio_int_mask);
4407
4408                 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4409                         val64 = readq(&bar0->adapter_status);
4410                         s2io_link(sp, LINK_DOWN);
4411                         /* Link is down so unmaks link up interrupt */
4412                         val64 = readq(&bar0->gpio_int_mask);
4413                         val64 &= ~GPIO_INT_MASK_LINK_UP;
4414                         val64 |= GPIO_INT_MASK_LINK_DOWN;
4415                         writeq(val64, &bar0->gpio_int_mask);
4416
4417                         /* turn off LED */
4418                         val64 = readq(&bar0->adapter_control);
4419                         val64 = val64 &(~ADAPTER_LED_ON);
4420                         writeq(val64, &bar0->adapter_control);
4421                 }
4422         }
4423         val64 = readq(&bar0->gpio_int_mask);
4424 }
4425
4426 /**
4427  *  do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4428  *  @value: alarm bits
4429  *  @addr: address value
4430  *  @cnt: counter variable
4431  *  Description: Check for alarm and increment the counter
4432  *  Return Value:
4433  *  1 - if alarm bit set
4434  *  0 - if alarm bit is not set
4435  */
4436 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4437                           unsigned long long *cnt)
4438 {
4439         u64 val64;
4440         val64 = readq(addr);
4441         if ( val64 & value ) {
4442                 writeq(val64, addr);
4443                 (*cnt)++;
4444                 return 1;
4445         }
4446         return 0;
4447
4448 }
4449
4450 /**
4451  *  s2io_handle_errors - Xframe error indication handler
4452  *  @nic: device private variable
4453  *  Description: Handle alarms such as loss of link, single or
4454  *  double ECC errors, critical and serious errors.
4455  *  Return Value:
4456  *  NONE
4457  */
4458 static void s2io_handle_errors(void * dev_id)
4459 {
4460         struct net_device *dev = (struct net_device *) dev_id;
4461         struct s2io_nic *sp = dev->priv;
4462         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4463         u64 temp64 = 0,val64=0;
4464         int i = 0;
4465
4466         struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4467         struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4468
4469         if (!is_s2io_card_up(sp))
4470                 return;
4471
4472         if (pci_channel_offline(sp->pdev))
4473                 return;
4474
4475         memset(&sw_stat->ring_full_cnt, 0,
4476                 sizeof(sw_stat->ring_full_cnt));
4477
4478         /* Handling the XPAK counters update */
4479         if(stats->xpak_timer_count < 72000) {
4480                 /* waiting for an hour */
4481                 stats->xpak_timer_count++;
4482         } else {
4483                 s2io_updt_xpak_counter(dev);
4484                 /* reset the count to zero */
4485                 stats->xpak_timer_count = 0;
4486         }
4487
4488         /* Handling link status change error Intr */
4489         if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4490                 val64 = readq(&bar0->mac_rmac_err_reg);
4491                 writeq(val64, &bar0->mac_rmac_err_reg);
4492                 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4493                         schedule_work(&sp->set_link_task);
4494         }
4495
4496         /* In case of a serious error, the device will be Reset. */
4497         if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4498                                 &sw_stat->serious_err_cnt))
4499                 goto reset;
4500
4501         /* Check for data parity error */
4502         if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4503                                 &sw_stat->parity_err_cnt))
4504                 goto reset;
4505
4506         /* Check for ring full counter */
4507         if (sp->device_type == XFRAME_II_DEVICE) {
4508                 val64 = readq(&bar0->ring_bump_counter1);
4509                 for (i=0; i<4; i++) {
4510                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4511                         temp64 >>= 64 - ((i+1)*16);
4512                         sw_stat->ring_full_cnt[i] += temp64;
4513                 }
4514
4515                 val64 = readq(&bar0->ring_bump_counter2);
4516                 for (i=0; i<4; i++) {
4517                         temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4518                         temp64 >>= 64 - ((i+1)*16);
4519                          sw_stat->ring_full_cnt[i+4] += temp64;
4520                 }
4521         }
4522
4523         val64 = readq(&bar0->txdma_int_status);
4524         /*check for pfc_err*/
4525         if (val64 & TXDMA_PFC_INT) {
4526                 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4527                                 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4528                                 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4529                                 &sw_stat->pfc_err_cnt))
4530                         goto reset;
4531                 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4532                                 &sw_stat->pfc_err_cnt);
4533         }
4534
4535         /*check for tda_err*/
4536         if (val64 & TXDMA_TDA_INT) {
4537                 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4538                                 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4539                                 &sw_stat->tda_err_cnt))
4540                         goto reset;
4541                 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4542                                 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4543         }
4544         /*check for pcc_err*/
4545         if (val64 & TXDMA_PCC_INT) {
4546                 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4547                                 | PCC_N_SERR | PCC_6_COF_OV_ERR
4548                                 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4549                                 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4550                                 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4551                                 &sw_stat->pcc_err_cnt))
4552                         goto reset;
4553                 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4554                                 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4555         }
4556
4557         /*check for tti_err*/
4558         if (val64 & TXDMA_TTI_INT) {
4559                 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4560                                 &sw_stat->tti_err_cnt))
4561                         goto reset;
4562                 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4563                                 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4564         }
4565
4566         /*check for lso_err*/
4567         if (val64 & TXDMA_LSO_INT) {
4568                 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4569                                 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4570                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4571                         goto reset;
4572                 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4573                                 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4574         }
4575
4576         /*check for tpa_err*/
4577         if (val64 & TXDMA_TPA_INT) {
4578                 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4579                         &sw_stat->tpa_err_cnt))
4580                         goto reset;
4581                 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4582                         &sw_stat->tpa_err_cnt);
4583         }
4584
4585         /*check for sm_err*/
4586         if (val64 & TXDMA_SM_INT) {
4587                 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4588                         &sw_stat->sm_err_cnt))
4589                         goto reset;
4590         }
4591
4592         val64 = readq(&bar0->mac_int_status);
4593         if (val64 & MAC_INT_STATUS_TMAC_INT) {
4594                 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4595                                 &bar0->mac_tmac_err_reg,
4596                                 &sw_stat->mac_tmac_err_cnt))
4597                         goto reset;
4598                 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4599                                 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4600                                 &bar0->mac_tmac_err_reg,
4601                                 &sw_stat->mac_tmac_err_cnt);
4602         }
4603
4604         val64 = readq(&bar0->xgxs_int_status);
4605         if (val64 & XGXS_INT_STATUS_TXGXS) {
4606                 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4607                                 &bar0->xgxs_txgxs_err_reg,
4608                                 &sw_stat->xgxs_txgxs_err_cnt))
4609                         goto reset;
4610                 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4611                                 &bar0->xgxs_txgxs_err_reg,
4612                                 &sw_stat->xgxs_txgxs_err_cnt);
4613         }
4614
4615         val64 = readq(&bar0->rxdma_int_status);
4616         if (val64 & RXDMA_INT_RC_INT_M) {
4617                 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4618                                 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4619                                 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4620                         goto reset;
4621                 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4622                                 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4623                                 &sw_stat->rc_err_cnt);
4624                 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4625                                 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4626                                 &sw_stat->prc_pcix_err_cnt))
4627                         goto reset;
4628                 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4629                                 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4630                                 &sw_stat->prc_pcix_err_cnt);
4631         }
4632
4633         if (val64 & RXDMA_INT_RPA_INT_M) {
4634                 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4635                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4636                         goto reset;
4637                 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4638                                 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4639         }
4640
4641         if (val64 & RXDMA_INT_RDA_INT_M) {
4642                 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4643                                 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4644                                 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4645                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4646                         goto reset;
4647                 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4648                                 | RDA_MISC_ERR | RDA_PCIX_ERR,
4649                                 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4650         }
4651
4652         if (val64 & RXDMA_INT_RTI_INT_M) {
4653                 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4654                                 &sw_stat->rti_err_cnt))
4655                         goto reset;
4656                 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4657                                 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4658         }
4659
4660         val64 = readq(&bar0->mac_int_status);
4661         if (val64 & MAC_INT_STATUS_RMAC_INT) {
4662                 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4663                                 &bar0->mac_rmac_err_reg,
4664                                 &sw_stat->mac_rmac_err_cnt))
4665                         goto reset;
4666                 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4667                                 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4668                                 &sw_stat->mac_rmac_err_cnt);
4669         }
4670
4671         val64 = readq(&bar0->xgxs_int_status);
4672         if (val64 & XGXS_INT_STATUS_RXGXS) {
4673                 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4674                                 &bar0->xgxs_rxgxs_err_reg,
4675                                 &sw_stat->xgxs_rxgxs_err_cnt))
4676                         goto reset;
4677         }
4678
4679         val64 = readq(&bar0->mc_int_status);
4680         if(val64 & MC_INT_STATUS_MC_INT) {
4681                 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4682                                 &sw_stat->mc_err_cnt))
4683                         goto reset;
4684
4685                 /* Handling Ecc errors */
4686                 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4687                         writeq(val64, &bar0->mc_err_reg);
4688                         if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4689                                 sw_stat->double_ecc_errs++;
4690                                 if (sp->device_type != XFRAME_II_DEVICE) {
4691                                         /*
4692                                          * Reset XframeI only if critical error
4693                                          */
4694                                         if (val64 &
4695                                                 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4696                                                 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4697                                                                 goto reset;
4698                                         }
4699                         } else
4700                                 sw_stat->single_ecc_errs++;
4701                 }
4702         }
4703         return;
4704
4705 reset:
4706         s2io_stop_all_tx_queue(sp);
4707         schedule_work(&sp->rst_timer_task);
4708         sw_stat->soft_reset_cnt++;
4709         return;
4710 }
4711
4712 /**
4713  *  s2io_isr - ISR handler of the device .
4714  *  @irq: the irq of the device.
4715  *  @dev_id: a void pointer to the dev structure of the NIC.
4716  *  Description:  This function is the ISR handler of the device. It
4717  *  identifies the reason for the interrupt and calls the relevant
4718  *  service routines. As a contongency measure, this ISR allocates the
4719  *  recv buffers, if their numbers are below the panic value which is
4720  *  presently set to 25% of the original number of rcv buffers allocated.
4721  *  Return value:
4722  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
4723  *   IRQ_NONE: will be returned if interrupt is not from our device
4724  */
4725 static irqreturn_t s2io_isr(int irq, void *dev_id)
4726 {
4727         struct net_device *dev = (struct net_device *) dev_id;
4728         struct s2io_nic *sp = dev->priv;
4729         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4730         int i;
4731         u64 reason = 0;
4732         struct mac_info *mac_control;
4733         struct config_param *config;
4734
4735         /* Pretend we handled any irq's from a disconnected card */
4736         if (pci_channel_offline(sp->pdev))
4737                 return IRQ_NONE;
4738
4739         if (!is_s2io_card_up(sp))
4740                 return IRQ_NONE;
4741
4742         mac_control = &sp->mac_control;
4743         config = &sp->config;
4744
4745         /*
4746          * Identify the cause for interrupt and call the appropriate
4747          * interrupt handler. Causes for the interrupt could be;
4748          * 1. Rx of packet.
4749          * 2. Tx complete.
4750          * 3. Link down.
4751          */
4752         reason = readq(&bar0->general_int_status);
4753
4754         if (unlikely(reason == S2IO_MINUS_ONE) ) {
4755                 /* Nothing much can be done. Get out */
4756                 return IRQ_HANDLED;
4757         }
4758
4759         if (reason & (GEN_INTR_RXTRAFFIC |
4760                 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4761         {
4762                 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4763
4764                 if (config->napi) {
4765                         if (reason & GEN_INTR_RXTRAFFIC) {
4766                                 if (likely(netif_rx_schedule_prep(dev,
4767                                                         &sp->napi))) {
4768                                         __netif_rx_schedule(dev, &sp->napi);
4769                                         writeq(S2IO_MINUS_ONE,
4770                                                &bar0->rx_traffic_mask);
4771                                 } else
4772                                         writeq(S2IO_MINUS_ONE,
4773                                                &bar0->rx_traffic_int);
4774                         }
4775                 } else {
4776                         /*
4777                          * rx_traffic_int reg is an R1 register, writing all 1's
4778                          * will ensure that the actual interrupt causing bit
4779                          * get's cleared and hence a read can be avoided.
4780                          */
4781                         if (reason & GEN_INTR_RXTRAFFIC)
4782                                 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4783
4784                         for (i = 0; i < config->rx_ring_num; i++)
4785                                 rx_intr_handler(&mac_control->rings[i]);
4786                 }
4787
4788                 /*
4789                  * tx_traffic_int reg is an R1 register, writing all 1's
4790                  * will ensure that the actual interrupt causing bit get's
4791                  * cleared and hence a read can be avoided.
4792                  */
4793                 if (reason & GEN_INTR_TXTRAFFIC)
4794                         writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4795
4796                 for (i = 0; i < config->tx_fifo_num; i++)
4797                         tx_intr_handler(&mac_control->fifos[i]);
4798
4799                 if (reason & GEN_INTR_TXPIC)
4800                         s2io_txpic_intr_handle(sp);
4801
4802                 /*
4803                  * Reallocate the buffers from the interrupt handler itself.
4804                  */
4805                 if (!config->napi) {
4806                         for (i = 0; i < config->rx_ring_num; i++)
4807                                 s2io_chk_rx_buffers(&mac_control->rings[i]);
4808                 }
4809                 writeq(sp->general_int_mask, &bar0->general_int_mask);
4810                 readl(&bar0->general_int_status);
4811
4812                 return IRQ_HANDLED;
4813
4814         }
4815         else if (!reason) {
4816                 /* The interrupt was not raised by us */
4817                 return IRQ_NONE;
4818         }
4819
4820         return IRQ_HANDLED;
4821 }
4822
4823 /**
4824  * s2io_updt_stats -
4825  */
4826 static void s2io_updt_stats(struct s2io_nic *sp)
4827 {
4828         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4829         u64 val64;
4830         int cnt = 0;
4831
4832         if (is_s2io_card_up(sp)) {
4833                 /* Apprx 30us on a 133 MHz bus */
4834                 val64 = SET_UPDT_CLICKS(10) |
4835                         STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4836                 writeq(val64, &bar0->stat_cfg);
4837                 do {
4838                         udelay(100);
4839                         val64 = readq(&bar0->stat_cfg);
4840                         if (!(val64 & s2BIT(0)))
4841                                 break;
4842                         cnt++;
4843                         if (cnt == 5)
4844                                 break; /* Updt failed */
4845                 } while(1);
4846         }
4847 }
4848
4849 /**
4850  *  s2io_get_stats - Updates the device statistics structure.
4851  *  @dev : pointer to the device structure.
4852  *  Description:
4853  *  This function updates the device statistics structure in the s2io_nic
4854  *  structure and returns a pointer to the same.
4855  *  Return value:
4856  *  pointer to the updated net_device_stats structure.
4857  */
4858
4859 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4860 {
4861         struct s2io_nic *sp = dev->priv;
4862         struct mac_info *mac_control;
4863         struct config_param *config;
4864         int i;
4865
4866
4867         mac_control = &sp->mac_control;
4868         config = &sp->config;
4869
4870         /* Configure Stats for immediate updt */
4871         s2io_updt_stats(sp);
4872
4873         sp->stats.tx_packets =
4874                 le32_to_cpu(mac_control->stats_info->tmac_frms);
4875         sp->stats.tx_errors =
4876                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4877         sp->stats.rx_errors =
4878                 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4879         sp->stats.multicast =
4880                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4881         sp->stats.rx_length_errors =
4882                 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4883
4884         /* collect per-ring rx_packets and rx_bytes */
4885         sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4886         for (i = 0; i < config->rx_ring_num; i++) {
4887                 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4888                 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4889         }
4890
4891         return (&sp->stats);
4892 }
4893
4894 /**
4895  *  s2io_set_multicast - entry point for multicast address enable/disable.
4896  *  @dev : pointer to the device structure
4897  *  Description:
4898  *  This function is a driver entry point which gets called by the kernel
4899  *  whenever multicast addresses must be enabled/disabled. This also gets
4900  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
4901  *  determine, if multicast address must be enabled or if promiscuous mode
4902  *  is to be disabled etc.
4903  *  Return value:
4904  *  void.
4905  */
4906
4907 static void s2io_set_multicast(struct net_device *dev)
4908 {
4909         int i, j, prev_cnt;
4910         struct dev_mc_list *mclist;
4911         struct s2io_nic *sp = dev->priv;
4912         struct XENA_dev_config __iomem *bar0 = sp->bar0;
4913         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4914             0xfeffffffffffULL;
4915         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4916         void __iomem *add;
4917         struct config_param *config = &sp->config;
4918
4919         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4920                 /*  Enable all Multicast addresses */
4921                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4922                        &bar0->rmac_addr_data0_mem);
4923                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4924                        &bar0->rmac_addr_data1_mem);
4925                 val64 = RMAC_ADDR_CMD_MEM_WE |
4926                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4927                     RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4928                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4929                 /* Wait till command completes */
4930                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4931                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4932                                         S2IO_BIT_RESET);
4933
4934                 sp->m_cast_flg = 1;
4935                 sp->all_multi_pos = config->max_mc_addr - 1;
4936         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4937                 /*  Disable all Multicast addresses */
4938                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4939                        &bar0->rmac_addr_data0_mem);
4940                 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4941                        &bar0->rmac_addr_data1_mem);
4942                 val64 = RMAC_ADDR_CMD_MEM_WE |
4943                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4944                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4945                 writeq(val64, &bar0->rmac_addr_cmd_mem);
4946                 /* Wait till command completes */
4947                 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4948                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4949                                         S2IO_BIT_RESET);
4950
4951                 sp->m_cast_flg = 0;
4952                 sp->all_multi_pos = 0;
4953         }
4954
4955         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4956                 /*  Put the NIC into promiscuous mode */
4957                 add = &bar0->mac_cfg;
4958                 val64 = readq(&bar0->mac_cfg);
4959                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4960
4961                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4962                 writel((u32) val64, add);
4963                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4964                 writel((u32) (val64 >> 32), (add + 4));
4965
4966                 if (vlan_tag_strip != 1) {
4967                         val64 = readq(&bar0->rx_pa_cfg);
4968                         val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4969                         writeq(val64, &bar0->rx_pa_cfg);
4970                         vlan_strip_flag = 0;
4971                 }
4972
4973                 val64 = readq(&bar0->mac_cfg);
4974                 sp->promisc_flg = 1;
4975                 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4976                           dev->name);
4977         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4978                 /*  Remove the NIC from promiscuous mode */
4979                 add = &bar0->mac_cfg;
4980                 val64 = readq(&bar0->mac_cfg);
4981                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4982
4983                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4984                 writel((u32) val64, add);
4985                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4986                 writel((u32) (val64 >> 32), (add + 4));
4987
4988                 if (vlan_tag_strip != 0) {
4989                         val64 = readq(&bar0->rx_pa_cfg);
4990                         val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4991                         writeq(val64, &bar0->rx_pa_cfg);
4992                         vlan_strip_flag = 1;
4993                 }
4994
4995                 val64 = readq(&bar0->mac_cfg);
4996                 sp->promisc_flg = 0;
4997                 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4998                           dev->name);
4999         }
5000
5001         /*  Update individual M_CAST address list */
5002         if ((!sp->m_cast_flg) && dev->mc_count) {
5003                 if (dev->mc_count >
5004                     (config->max_mc_addr - config->max_mac_addr)) {
5005                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5006                                   dev->name);
5007                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
5008                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5009                         return;
5010                 }
5011
5012                 prev_cnt = sp->mc_addr_count;
5013                 sp->mc_addr_count = dev->mc_count;
5014
5015                 /* Clear out the previous list of Mc in the H/W. */
5016                 for (i = 0; i < prev_cnt; i++) {
5017                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5018                                &bar0->rmac_addr_data0_mem);
5019                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5020                                 &bar0->rmac_addr_data1_mem);
5021                         val64 = RMAC_ADDR_CMD_MEM_WE |
5022                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5023                             RMAC_ADDR_CMD_MEM_OFFSET
5024                             (config->mc_start_offset + i);
5025                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5026
5027                         /* Wait for command completes */
5028                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5029                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5030                                         S2IO_BIT_RESET)) {
5031                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5032                                           dev->name);
5033                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5034                                 return;
5035                         }
5036                 }
5037
5038                 /* Create the new Rx filter list and update the same in H/W. */
5039                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5040                      i++, mclist = mclist->next) {
5041                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5042                                ETH_ALEN);
5043                         mac_addr = 0;
5044                         for (j = 0; j < ETH_ALEN; j++) {
5045                                 mac_addr |= mclist->dmi_addr[j];
5046                                 mac_addr <<= 8;
5047                         }
5048                         mac_addr >>= 8;
5049                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5050                                &bar0->rmac_addr_data0_mem);
5051                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5052                                 &bar0->rmac_addr_data1_mem);
5053                         val64 = RMAC_ADDR_CMD_MEM_WE |
5054                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5055                             RMAC_ADDR_CMD_MEM_OFFSET
5056                             (i + config->mc_start_offset);
5057                         writeq(val64, &bar0->rmac_addr_cmd_mem);
5058
5059                         /* Wait for command completes */
5060                         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5061                                         RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5062                                         S2IO_BIT_RESET)) {
5063                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
5064                                           dev->name);
5065                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5066                                 return;
5067                         }
5068                 }
5069         }
5070 }
5071
5072 /* read from CAM unicast & multicast addresses and store it in
5073  * def_mac_addr structure
5074  */
5075 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5076 {
5077         int offset;
5078         u64 mac_addr = 0x0;
5079         struct config_param *config = &sp->config;
5080
5081         /* store unicast & multicast mac addresses */
5082         for (offset = 0; offset < config->max_mc_addr; offset++) {
5083                 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5084                 /* if read fails disable the entry */
5085                 if (mac_addr == FAILURE)
5086                         mac_addr = S2IO_DISABLE_MAC_ENTRY;
5087                 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5088         }
5089 }
5090
5091 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5092 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5093 {
5094         int offset;
5095         struct config_param *config = &sp->config;
5096         /* restore unicast mac address */
5097         for (offset = 0; offset < config->max_mac_addr; offset++)
5098                 do_s2io_prog_unicast(sp->dev,
5099                         sp->def_mac_addr[offset].mac_addr);
5100
5101         /* restore multicast mac address */
5102         for (offset = config->mc_start_offset;
5103                 offset < config->max_mc_addr; offset++)
5104                 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5105 }
5106
5107 /* add a multicast MAC address to CAM */
5108 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5109 {
5110         int i;
5111         u64 mac_addr = 0;
5112         struct config_param *config = &sp->config;
5113
5114         for (i = 0; i < ETH_ALEN; i++) {
5115                 mac_addr <<= 8;
5116                 mac_addr |= addr[i];
5117         }
5118         if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5119                 return SUCCESS;
5120
5121         /* check if the multicast mac already preset in CAM */
5122         for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5123                 u64 tmp64;
5124                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5125                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5126                         break;
5127
5128                 if (tmp64 == mac_addr)
5129                         return SUCCESS;
5130         }
5131         if (i == config->max_mc_addr) {
5132                 DBG_PRINT(ERR_DBG,
5133                         "CAM full no space left for multicast MAC\n");
5134                 return FAILURE;
5135         }
5136         /* Update the internal structure with this new mac address */
5137         do_s2io_copy_mac_addr(sp, i, mac_addr);
5138
5139         return (do_s2io_add_mac(sp, mac_addr, i));
5140 }
5141
5142 /* add MAC address to CAM */
5143 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5144 {
5145         u64 val64;
5146         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5147
5148         writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5149                 &bar0->rmac_addr_data0_mem);
5150
5151         val64 =
5152                 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5153                 RMAC_ADDR_CMD_MEM_OFFSET(off);
5154         writeq(val64, &bar0->rmac_addr_cmd_mem);
5155
5156         /* Wait till command completes */
5157         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5158                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5159                 S2IO_BIT_RESET)) {
5160                 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5161                 return FAILURE;
5162         }
5163         return SUCCESS;
5164 }
5165 /* deletes a specified unicast/multicast mac entry from CAM */
5166 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5167 {
5168         int offset;
5169         u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5170         struct config_param *config = &sp->config;
5171
5172         for (offset = 1;
5173                 offset < config->max_mc_addr; offset++) {
5174                 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5175                 if (tmp64 == addr) {
5176                         /* disable the entry by writing  0xffffffffffffULL */
5177                         if (do_s2io_add_mac(sp, dis_addr, offset) ==  FAILURE)
5178                                 return FAILURE;
5179                         /* store the new mac list from CAM */
5180                         do_s2io_store_unicast_mc(sp);
5181                         return SUCCESS;
5182                 }
5183         }
5184         DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5185                         (unsigned long long)addr);
5186         return FAILURE;
5187 }
5188
5189 /* read mac entries from CAM */
5190 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5191 {
5192         u64 tmp64 = 0xffffffffffff0000ULL, val64;
5193         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5194
5195         /* read mac addr */
5196         val64 =
5197                 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5198                 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5199         writeq(val64, &bar0->rmac_addr_cmd_mem);
5200
5201         /* Wait till command completes */
5202         if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5203                 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5204                 S2IO_BIT_RESET)) {
5205                 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5206                 return FAILURE;
5207         }
5208         tmp64 = readq(&bar0->rmac_addr_data0_mem);
5209         return (tmp64 >> 16);
5210 }
5211
5212 /**
5213  * s2io_set_mac_addr driver entry point
5214  */
5215
5216 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5217 {
5218         struct sockaddr *addr = p;
5219
5220         if (!is_valid_ether_addr(addr->sa_data))
5221                 return -EINVAL;
5222
5223         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5224
5225         /* store the MAC address in CAM */
5226         return (do_s2io_prog_unicast(dev, dev->dev_addr));
5227 }
5228 /**
5229  *  do_s2io_prog_unicast - Programs the Xframe mac address
5230  *  @dev : pointer to the device structure.
5231  *  @addr: a uchar pointer to the new mac address which is to be set.
5232  *  Description : This procedure will program the Xframe to receive
5233  *  frames with new Mac Address
5234  *  Return value: SUCCESS on success and an appropriate (-)ve integer
5235  *  as defined in errno.h file on failure.
5236  */
5237
5238 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5239 {
5240         struct s2io_nic *sp = dev->priv;
5241         register u64 mac_addr = 0, perm_addr = 0;
5242         int i;
5243         u64 tmp64;
5244         struct config_param *config = &sp->config;
5245
5246         /*
5247         * Set the new MAC address as the new unicast filter and reflect this
5248         * change on the device address registered with the OS. It will be
5249         * at offset 0.
5250         */
5251         for (i = 0; i < ETH_ALEN; i++) {
5252                 mac_addr <<= 8;
5253                 mac_addr |= addr[i];
5254                 perm_addr <<= 8;
5255                 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5256         }
5257
5258         /* check if the dev_addr is different than perm_addr */
5259         if (mac_addr == perm_addr)
5260                 return SUCCESS;
5261
5262         /* check if the mac already preset in CAM */
5263         for (i = 1; i < config->max_mac_addr; i++) {
5264                 tmp64 = do_s2io_read_unicast_mc(sp, i);
5265                 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5266                         break;
5267
5268                 if (tmp64 == mac_addr) {
5269                         DBG_PRINT(INFO_DBG,
5270                         "MAC addr:0x%llx already present in CAM\n",
5271                         (unsigned long long)mac_addr);
5272                         return SUCCESS;
5273                 }
5274         }
5275         if (i == config->max_mac_addr) {
5276                 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5277                 return FAILURE;
5278         }
5279         /* Update the internal structure with this new mac address */
5280         do_s2io_copy_mac_addr(sp, i, mac_addr);
5281         return (do_s2io_add_mac(sp, mac_addr, i));
5282 }
5283
5284 /**
5285  * s2io_ethtool_sset - Sets different link parameters.
5286  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
5287  * @info: pointer to the structure with parameters given by ethtool to set
5288  * link information.
5289  * Description:
5290  * The function sets different link parameters provided by the user onto
5291  * the NIC.
5292  * Return value:
5293  * 0 on success.
5294 */
5295
5296 static int s2io_ethtool_sset(struct net_device *dev,
5297                              struct ethtool_cmd *info)
5298 {
5299         struct s2io_nic *sp = dev->priv;
5300         if ((info->autoneg == AUTONEG_ENABLE) ||
5301             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5302                 return -EINVAL;
5303         else {
5304                 s2io_close(sp->dev);
5305                 s2io_open(sp->dev);
5306         }
5307
5308         return 0;
5309 }
5310
5311 /**
5312  * s2io_ethtol_gset - Return link specific information.
5313  * @sp : private member of the device structure, pointer to the
5314  *      s2io_nic structure.
5315  * @info : pointer to the structure with parameters given by ethtool
5316  * to return link information.
5317  * Description:
5318  * Returns link specific information like speed, duplex etc.. to ethtool.
5319  * Return value :
5320  * return 0 on success.
5321  */
5322
5323 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5324 {
5325         struct s2io_nic *sp = dev->priv;
5326         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5327         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5328         info->port = PORT_FIBRE;
5329
5330         /* info->transceiver */
5331         info->transceiver = XCVR_EXTERNAL;
5332
5333         if (netif_carrier_ok(sp->dev)) {
5334                 info->speed = 10000;
5335                 info->duplex = DUPLEX_FULL;
5336         } else {
5337                 info->speed = -1;
5338                 info->duplex = -1;
5339         }
5340
5341         info->autoneg = AUTONEG_DISABLE;
5342         return 0;
5343 }
5344
5345 /**
5346  * s2io_ethtool_gdrvinfo - Returns driver specific information.
5347  * @sp : private member of the device structure, which is a pointer to the
5348  * s2io_nic structure.
5349  * @info : pointer to the structure with parameters given by ethtool to
5350  * return driver information.
5351  * Description:
5352  * Returns driver specefic information like name, version etc.. to ethtool.
5353  * Return value:
5354  *  void
5355  */
5356
5357 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5358                                   struct ethtool_drvinfo *info)
5359 {
5360         struct s2io_nic *sp = dev->priv;
5361
5362         strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5363         strncpy(info->version, s2io_driver_version, sizeof(info->version));
5364         strncpy(info->fw_version, "", sizeof(info->fw_version));
5365         strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5366         info->regdump_len = XENA_REG_SPACE;
5367         info->eedump_len = XENA_EEPROM_SPACE;
5368 }
5369
5370 /**
5371  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5372  *  @sp: private member of the device structure, which is a pointer to the
5373  *  s2io_nic structure.
5374  *  @regs : pointer to the structure with parameters given by ethtool for
5375  *  dumping the registers.
5376  *  @reg_space: The input argumnet into which all the registers are dumped.
5377  *  Description:
5378  *  Dumps the entire register space of xFrame NIC into the user given
5379  *  buffer area.
5380  * Return value :
5381  * void .
5382 */
5383
5384 static void s2io_ethtool_gregs(struct net_device *dev,
5385                                struct ethtool_regs *regs, void *space)
5386 {
5387         int i;
5388         u64 reg;
5389         u8 *reg_space = (u8 *) space;
5390         struct s2io_nic *sp = dev->priv;
5391
5392         regs->len = XENA_REG_SPACE;
5393         regs->version = sp->pdev->subsystem_device;
5394
5395         for (i = 0; i < regs->len; i += 8) {
5396                 reg = readq(sp->bar0 + i);
5397                 memcpy((reg_space + i), &reg, 8);
5398         }
5399 }
5400
5401 /**
5402  *  s2io_phy_id  - timer function that alternates adapter LED.
5403  *  @data : address of the private member of the device structure, which
5404  *  is a pointer to the s2io_nic structure, provided as an u32.
5405  * Description: This is actually the timer function that alternates the
5406  * adapter LED bit of the adapter control bit to set/reset every time on
5407  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5408  *  once every second.
5409 */
5410 static void s2io_phy_id(unsigned long data)
5411 {
5412         struct s2io_nic *sp = (struct s2io_nic *) data;
5413         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5414         u64 val64 = 0;
5415         u16 subid;
5416
5417         subid = sp->pdev->subsystem_device;
5418         if ((sp->device_type == XFRAME_II_DEVICE) ||
5419                    ((subid & 0xFF) >= 0x07)) {
5420                 val64 = readq(&bar0->gpio_control);
5421                 val64 ^= GPIO_CTRL_GPIO_0;
5422                 writeq(val64, &bar0->gpio_control);
5423         } else {
5424                 val64 = readq(&bar0->adapter_control);
5425                 val64 ^= ADAPTER_LED_ON;
5426                 writeq(val64, &bar0->adapter_control);
5427         }
5428
5429         mod_timer(&sp->id_timer, jiffies + HZ / 2);
5430 }
5431
5432 /**
5433  * s2io_ethtool_idnic - To physically identify the nic on the system.
5434  * @sp : private member of the device structure, which is a pointer to the
5435  * s2io_nic structure.
5436  * @id : pointer to the structure with identification parameters given by
5437  * ethtool.
5438  * Description: Used to physically identify the NIC on the system.
5439  * The Link LED will blink for a time specified by the user for
5440  * identification.
5441  * NOTE: The Link has to be Up to be able to blink the LED. Hence
5442  * identification is possible only if it's link is up.
5443  * Return value:
5444  * int , returns 0 on success
5445  */
5446
5447 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5448 {
5449         u64 val64 = 0, last_gpio_ctrl_val;
5450         struct s2io_nic *sp = dev->priv;
5451         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5452         u16 subid;
5453
5454         subid = sp->pdev->subsystem_device;
5455         last_gpio_ctrl_val = readq(&bar0->gpio_control);
5456         if ((sp->device_type == XFRAME_I_DEVICE) &&
5457                 ((subid & 0xFF) < 0x07)) {
5458                 val64 = readq(&bar0->adapter_control);
5459                 if (!(val64 & ADAPTER_CNTL_EN)) {
5460                         printk(KERN_ERR
5461                                "Adapter Link down, cannot blink LED\n");
5462                         return -EFAULT;
5463                 }
5464         }
5465         if (sp->id_timer.function == NULL) {
5466                 init_timer(&sp->id_timer);
5467                 sp->id_timer.function = s2io_phy_id;
5468                 sp->id_timer.data = (unsigned long) sp;
5469         }
5470         mod_timer(&sp->id_timer, jiffies);
5471         if (data)
5472                 msleep_interruptible(data * HZ);
5473         else
5474                 msleep_interruptible(MAX_FLICKER_TIME);
5475         del_timer_sync(&sp->id_timer);
5476
5477         if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5478                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5479                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5480         }
5481
5482         return 0;
5483 }
5484
5485 static void s2io_ethtool_gringparam(struct net_device *dev,
5486                                     struct ethtool_ringparam *ering)
5487 {
5488         struct s2io_nic *sp = dev->priv;
5489         int i,tx_desc_count=0,rx_desc_count=0;
5490
5491         if (sp->rxd_mode == RXD_MODE_1)
5492                 ering->rx_max_pending = MAX_RX_DESC_1;
5493         else if (sp->rxd_mode == RXD_MODE_3B)
5494                 ering->rx_max_pending = MAX_RX_DESC_2;
5495
5496         ering->tx_max_pending = MAX_TX_DESC;
5497         for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5498                 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5499
5500         DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5501         ering->tx_pending = tx_desc_count;
5502         rx_desc_count = 0;
5503         for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5504                 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5505
5506         ering->rx_pending = rx_desc_count;
5507
5508         ering->rx_mini_max_pending = 0;
5509         ering->rx_mini_pending = 0;
5510         if(sp->rxd_mode == RXD_MODE_1)
5511                 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5512         else if (sp->rxd_mode == RXD_MODE_3B)
5513                 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5514         ering->rx_jumbo_pending = rx_desc_count;
5515 }
5516
5517 /**
5518  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5519  * @sp : private member of the device structure, which is a pointer to the
5520  *      s2io_nic structure.
5521  * @ep : pointer to the structure with pause parameters given by ethtool.
5522  * Description:
5523  * Returns the Pause frame generation and reception capability of the NIC.
5524  * Return value:
5525  *  void
5526  */
5527 static void s2io_ethtool_getpause_data(struct net_device *dev,
5528                                        struct ethtool_pauseparam *ep)
5529 {
5530         u64 val64;
5531         struct s2io_nic *sp = dev->priv;
5532         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5533
5534         val64 = readq(&bar0->rmac_pause_cfg);
5535         if (val64 & RMAC_PAUSE_GEN_ENABLE)
5536                 ep->tx_pause = TRUE;
5537         if (val64 & RMAC_PAUSE_RX_ENABLE)
5538                 ep->rx_pause = TRUE;
5539         ep->autoneg = FALSE;
5540 }
5541
5542 /**
5543  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
5544  * @sp : private member of the device structure, which is a pointer to the
5545  *      s2io_nic structure.
5546  * @ep : pointer to the structure with pause parameters given by ethtool.
5547  * Description:
5548  * It can be used to set or reset Pause frame generation or reception
5549  * support of the NIC.
5550  * Return value:
5551  * int, returns 0 on Success
5552  */
5553
5554 static int s2io_ethtool_setpause_data(struct net_device *dev,
5555                                struct ethtool_pauseparam *ep)
5556 {
5557         u64 val64;
5558         struct s2io_nic *sp = dev->priv;
5559         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5560
5561         val64 = readq(&bar0->rmac_pause_cfg);
5562         if (ep->tx_pause)
5563                 val64 |= RMAC_PAUSE_GEN_ENABLE;
5564         else
5565                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5566         if (ep->rx_pause)
5567                 val64 |= RMAC_PAUSE_RX_ENABLE;
5568         else
5569                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5570         writeq(val64, &bar0->rmac_pause_cfg);
5571         return 0;
5572 }
5573
5574 /**
5575  * read_eeprom - reads 4 bytes of data from user given offset.
5576  * @sp : private member of the device structure, which is a pointer to the
5577  *      s2io_nic structure.
5578  * @off : offset at which the data must be written
5579  * @data : Its an output parameter where the data read at the given
5580  *      offset is stored.
5581  * Description:
5582  * Will read 4 bytes of data from the user given offset and return the
5583  * read data.
5584  * NOTE: Will allow to read only part of the EEPROM visible through the
5585  *   I2C bus.
5586  * Return value:
5587  *  -1 on failure and 0 on success.
5588  */
5589
5590 #define S2IO_DEV_ID             5
5591 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5592 {
5593         int ret = -1;
5594         u32 exit_cnt = 0;
5595         u64 val64;
5596         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5597
5598         if (sp->device_type == XFRAME_I_DEVICE) {
5599                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5600                     I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5601                     I2C_CONTROL_CNTL_START;
5602                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5603
5604                 while (exit_cnt < 5) {
5605                         val64 = readq(&bar0->i2c_control);
5606                         if (I2C_CONTROL_CNTL_END(val64)) {
5607                                 *data = I2C_CONTROL_GET_DATA(val64);
5608                                 ret = 0;
5609                                 break;
5610                         }
5611                         msleep(50);
5612                         exit_cnt++;
5613                 }
5614         }
5615
5616         if (sp->device_type == XFRAME_II_DEVICE) {
5617                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5618                         SPI_CONTROL_BYTECNT(0x3) |
5619                         SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5620                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5621                 val64 |= SPI_CONTROL_REQ;
5622                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5623                 while (exit_cnt < 5) {
5624                         val64 = readq(&bar0->spi_control);
5625                         if (val64 & SPI_CONTROL_NACK) {
5626                                 ret = 1;
5627                                 break;
5628                         } else if (val64 & SPI_CONTROL_DONE) {
5629                                 *data = readq(&bar0->spi_data);
5630                                 *data &= 0xffffff;
5631                                 ret = 0;
5632                                 break;
5633                         }
5634                         msleep(50);
5635                         exit_cnt++;
5636                 }
5637         }
5638         return ret;
5639 }
5640
5641 /**
5642  *  write_eeprom - actually writes the relevant part of the data value.
5643  *  @sp : private member of the device structure, which is a pointer to the
5644  *       s2io_nic structure.
5645  *  @off : offset at which the data must be written
5646  *  @data : The data that is to be written
5647  *  @cnt : Number of bytes of the data that are actually to be written into
5648  *  the Eeprom. (max of 3)
5649  * Description:
5650  *  Actually writes the relevant part of the data value into the Eeprom
5651  *  through the I2C bus.
5652  * Return value:
5653  *  0 on success, -1 on failure.
5654  */
5655
5656 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5657 {
5658         int exit_cnt = 0, ret = -1;
5659         u64 val64;
5660         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5661
5662         if (sp->device_type == XFRAME_I_DEVICE) {
5663                 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5664                     I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5665                     I2C_CONTROL_CNTL_START;
5666                 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5667
5668                 while (exit_cnt < 5) {
5669                         val64 = readq(&bar0->i2c_control);
5670                         if (I2C_CONTROL_CNTL_END(val64)) {
5671                                 if (!(val64 & I2C_CONTROL_NACK))
5672                                         ret = 0;
5673                                 break;
5674                         }
5675                         msleep(50);
5676                         exit_cnt++;
5677                 }
5678         }
5679
5680         if (sp->device_type == XFRAME_II_DEVICE) {
5681                 int write_cnt = (cnt == 8) ? 0 : cnt;
5682                 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5683
5684                 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5685                         SPI_CONTROL_BYTECNT(write_cnt) |
5686                         SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5687                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5688                 val64 |= SPI_CONTROL_REQ;
5689                 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5690                 while (exit_cnt < 5) {
5691                         val64 = readq(&bar0->spi_control);
5692                         if (val64 & SPI_CONTROL_NACK) {
5693                                 ret = 1;
5694                                 break;
5695                         } else if (val64 & SPI_CONTROL_DONE) {
5696                                 ret = 0;
5697                                 break;
5698                         }
5699                         msleep(50);
5700                         exit_cnt++;
5701                 }
5702         }
5703         return ret;
5704 }
5705 static void s2io_vpd_read(struct s2io_nic *nic)
5706 {
5707         u8 *vpd_data;
5708         u8 data;
5709         int i=0, cnt, fail = 0;
5710         int vpd_addr = 0x80;
5711
5712         if (nic->device_type == XFRAME_II_DEVICE) {
5713                 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5714                 vpd_addr = 0x80;
5715         }
5716         else {
5717                 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5718                 vpd_addr = 0x50;
5719         }
5720         strcpy(nic->serial_num, "NOT AVAILABLE");
5721
5722         vpd_data = kmalloc(256, GFP_KERNEL);
5723         if (!vpd_data) {
5724                 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5725                 return;
5726         }
5727         nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5728
5729         for (i = 0; i < 256; i +=4 ) {
5730                 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5731                 pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
5732                 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5733                 for (cnt = 0; cnt <5; cnt++) {
5734                         msleep(2);
5735                         pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5736                         if (data == 0x80)
5737                                 break;
5738                 }
5739                 if (cnt >= 5) {
5740                         DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5741                         fail = 1;
5742                         break;
5743                 }
5744                 pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
5745                                       (u32 *)&vpd_data[i]);
5746         }
5747
5748         if(!fail) {
5749                 /* read serial number of adapter */
5750                 for (cnt = 0; cnt < 256; cnt++) {
5751                 if ((vpd_data[cnt] == 'S') &&
5752                         (vpd_data[cnt+1] == 'N') &&
5753                         (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5754                                 memset(nic->serial_num, 0, VPD_STRING_LEN);
5755                                 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5756                                         vpd_data[cnt+2]);
5757                                 break;
5758                         }
5759                 }
5760         }
5761
5762         if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5763                 memset(nic->product_name, 0, vpd_data[1]);
5764                 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5765         }
5766         kfree(vpd_data);
5767         nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5768 }
5769
5770 /**
5771  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
5772  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
5773  *  @eeprom : pointer to the user level structure provided by ethtool,
5774  *  containing all relevant information.
5775  *  @data_buf : user defined value to be written into Eeprom.
5776  *  Description: Reads the values stored in the Eeprom at given offset
5777  *  for a given length. Stores these values int the input argument data
5778  *  buffer 'data_buf' and returns these to the caller (ethtool.)
5779  *  Return value:
5780  *  int  0 on success
5781  */
5782
5783 static int s2io_ethtool_geeprom(struct net_device *dev,
5784                          struct ethtool_eeprom *eeprom, u8 * data_buf)
5785 {
5786         u32 i, valid;
5787         u64 data;
5788         struct s2io_nic *sp = dev->priv;
5789
5790         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5791
5792         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5793                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5794
5795         for (i = 0; i < eeprom->len; i += 4) {
5796                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5797                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5798                         return -EFAULT;
5799                 }
5800                 valid = INV(data);
5801                 memcpy((data_buf + i), &valid, 4);
5802         }
5803         return 0;
5804 }
5805
5806 /**
5807  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5808  *  @sp : private member of the device structure, which is a pointer to the
5809  *  s2io_nic structure.
5810  *  @eeprom : pointer to the user level structure provided by ethtool,
5811  *  containing all relevant information.
5812  *  @data_buf ; user defined value to be written into Eeprom.
5813  *  Description:
5814  *  Tries to write the user provided value in the Eeprom, at the offset
5815  *  given by the user.
5816  *  Return value:
5817  *  0 on success, -EFAULT on failure.
5818  */
5819
5820 static int s2io_ethtool_seeprom(struct net_device *dev,
5821                                 struct ethtool_eeprom *eeprom,
5822                                 u8 * data_buf)
5823 {
5824         int len = eeprom->len, cnt = 0;
5825         u64 valid = 0, data;
5826         struct s2io_nic *sp = dev->priv;
5827
5828         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5829                 DBG_PRINT(ERR_DBG,
5830                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5831                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5832                           eeprom->magic);
5833                 return -EFAULT;
5834         }
5835
5836         while (len) {
5837                 data = (u32) data_buf[cnt] & 0x000000FF;
5838                 if (data) {
5839                         valid = (u32) (data << 24);
5840                 } else
5841                         valid = data;
5842
5843                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5844                         DBG_PRINT(ERR_DBG,
5845                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5846                         DBG_PRINT(ERR_DBG,
5847                                   "write into the specified offset\n");
5848                         return -EFAULT;
5849                 }
5850                 cnt++;
5851                 len--;
5852         }
5853
5854         return 0;
5855 }
5856
5857 /**
5858  * s2io_register_test - reads and writes into all clock domains.
5859  * @sp : private member of the device structure, which is a pointer to the
5860  * s2io_nic structure.
5861  * @data : variable that returns the result of each of the test conducted b
5862  * by the driver.
5863  * Description:
5864  * Read and write into all clock domains. The NIC has 3 clock domains,
5865  * see that registers in all the three regions are accessible.
5866  * Return value:
5867  * 0 on success.
5868  */
5869
5870 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5871 {
5872         struct XENA_dev_config __iomem *bar0 = sp->bar0;
5873         u64 val64 = 0, exp_val;
5874         int fail = 0;
5875
5876         val64 = readq(&bar0->pif_rd_swapper_fb);
5877         if (val64 != 0x123456789abcdefULL) {
5878                 fail = 1;
5879                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5880         }
5881
5882         val64 = readq(&bar0->rmac_pause_cfg);
5883         if (val64 != 0xc000ffff00000000ULL) {
5884                 fail = 1;
5885                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5886         }
5887
5888         val64 = readq(&bar0->rx_queue_cfg);
5889         if (sp->device_type == XFRAME_II_DEVICE)
5890                 exp_val = 0x0404040404040404ULL;
5891         else
5892                 exp_val = 0x0808080808080808ULL;
5893         if (val64 != exp_val) {
5894                 fail = 1;
5895                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5896         }
5897
5898         val64 = readq(&bar0->xgxs_efifo_cfg);
5899         if (val64 != 0x000000001923141EULL) {
5900                 fail = 1;
5901                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5902         }
5903
5904         val64 = 0x5A5A5A5A5A5A5A5AULL;
5905         writeq(val64, &bar0->xmsi_data);
5906         val64 = readq(&bar0->xmsi_data);
5907         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5908                 fail = 1;
5909                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5910         }
5911
5912         val64 = 0xA5A5A5A5A5A5A5A5ULL;
5913         writeq(val64, &bar0->xmsi_data);
5914         val64 = readq(&bar0->xmsi_data);
5915         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5916                 fail = 1;
5917                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5918         }
5919
5920         *data = fail;
5921         return fail;
5922 }
5923
5924 /**
5925  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5926  * @sp : private member of the device structure, which is a pointer to the
5927  * s2io_nic structure.
5928  * @data:variable that returns the result of each of the test conducted by
5929  * the driver.
5930  * Description:
5931  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5932  * register.
5933  * Return value:
5934  * 0 on success.
5935  */
5936
5937 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5938 {
5939         int fail = 0;
5940         u64 ret_data, org_4F0, org_7F0;
5941         u8 saved_4F0 = 0, saved_7F0 = 0;
5942         struct net_device *dev = sp->dev;
5943
5944         /* Test Write Error at offset 0 */
5945         /* Note that SPI interface allows write access to all areas
5946          * of EEPROM. Hence doing all negative testing only for Xframe I.
5947          */
5948         if (sp->device_type == XFRAME_I_DEVICE)
5949                 if (!write_eeprom(sp, 0, 0, 3))
5950                         fail = 1;
5951
5952         /* Save current values at offsets 0x4F0 and 0x7F0 */
5953         if (!read_eeprom(sp, 0x4F0, &org_4F0))
5954                 saved_4F0 = 1;
5955         if (!read_eeprom(sp, 0x7F0, &org_7F0))
5956                 saved_7F0 = 1;
5957
5958         /* Test Write at offset 4f0 */
5959         if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5960                 fail = 1;
5961         if (read_eeprom(sp, 0x4F0, &ret_data))
5962                 fail = 1;
5963
5964         if (ret_data != 0x012345) {
5965                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5966                         "Data written %llx Data read %llx\n",
5967                         dev->name, (unsigned long long)0x12345,
5968                         (unsigned long long)ret_data);
5969                 fail = 1;
5970         }
5971
5972         /* Reset the EEPROM data go FFFF */
5973         write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5974
5975         /* Test Write Request Error at offset 0x7c */
5976         if (sp->device_type == XFRAME_I_DEVICE)
5977                 if (!write_eeprom(sp, 0x07C, 0, 3))
5978                         fail = 1;
5979
5980         /* Test Write Request at offset 0x7f0 */
5981         if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5982                 fail = 1;
5983         if (read_eeprom(sp, 0x7F0, &ret_data))
5984                 fail = 1;
5985
5986         if (ret_data != 0x012345) {
5987                 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5988                         "Data written %llx Data read %llx\n",
5989                         dev->name, (unsigned long long)0x12345,
5990                         (unsigned long long)ret_data);
5991                 fail = 1;
5992         }
5993
5994         /* Reset the EEPROM data go FFFF */
5995         write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5996
5997         if (sp->device_type == XFRAME_I_DEVICE) {
5998                 /* Test Write Error at offset 0x80 */
5999                 if (!write_eeprom(sp, 0x080, 0, 3))
6000                         fail = 1;
6001
6002                 /* Test Write Error at offset 0xfc */
6003                 if (!write_eeprom(sp, 0x0FC, 0, 3))
6004                         fail = 1;
6005
6006                 /* Test Write Error at offset 0x100 */
6007                 if (!write_eeprom(sp, 0x100, 0, 3))
6008                         fail = 1;
6009
6010                 /* Test Write Error at offset 4ec */
6011                 if (!write_eeprom(sp, 0x4EC, 0, 3))
6012                         fail = 1;
6013         }
6014
6015         /* Restore values at offsets 0x4F0 and 0x7F0 */
6016         if (saved_4F0)
6017                 write_eeprom(sp, 0x4F0, org_4F0, 3);
6018         if (saved_7F0)
6019                 write_eeprom(sp, 0x7F0, org_7F0, 3);
6020
6021         *data = fail;
6022         return fail;
6023 }
6024
6025 /**
6026  * s2io_bist_test - invokes the MemBist test of the card .
6027  * @sp : private member of the device structure, which is a pointer to the
6028  * s2io_nic structure.
6029  * @data:variable that returns the result of each of the test conducted by
6030  * the driver.
6031  * Description:
6032  * This invokes the MemBist test of the card. We give around
6033  * 2 secs time for the Test to complete. If it's still not complete
6034  * within this peiod, we consider that the test failed.
6035  * Return value:
6036  * 0 on success and -1 on failure.
6037  */
6038
6039 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6040 {
6041         u8 bist = 0;
6042         int cnt = 0, ret = -1;
6043
6044         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6045         bist |= PCI_BIST_START;
6046         pci_write_config_word(sp->pdev, PCI_BIST, bist);
6047
6048         while (cnt < 20) {
6049                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6050                 if (!(bist & PCI_BIST_START)) {
6051                         *data = (bist & PCI_BIST_CODE_MASK);
6052                         ret = 0;
6053                         break;
6054                 }
6055                 msleep(100);
6056                 cnt++;
6057         }
6058
6059         return ret;
6060 }
6061
6062 /**
6063  * s2io-link_test - verifies the link state of the nic
6064  * @sp ; private member of the device structure, which is a pointer to the
6065  * s2io_nic structure.
6066  * @data: variable that returns the result of each of the test conducted by
6067  * the driver.
6068  * Description:
6069  * The function verifies the link state of the NIC and updates the input
6070  * argument 'data' appropriately.
6071  * Return value:
6072  * 0 on success.
6073  */
6074
6075 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6076 {
6077         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6078         u64 val64;
6079
6080         val64 = readq(&bar0->adapter_status);
6081         if(!(LINK_IS_UP(val64)))
6082                 *data = 1;
6083         else
6084                 *data = 0;
6085
6086         return *data;
6087 }
6088
6089 /**
6090  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6091  * @sp - private member of the device structure, which is a pointer to the
6092  * s2io_nic structure.
6093  * @data - variable that returns the result of each of the test
6094  * conducted by the driver.
6095  * Description:
6096  *  This is one of the offline test that tests the read and write
6097  *  access to the RldRam chip on the NIC.
6098  * Return value:
6099  *  0 on success.
6100  */
6101
6102 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6103 {
6104         struct XENA_dev_config __iomem *bar0 = sp->bar0;
6105         u64 val64;
6106         int cnt, iteration = 0, test_fail = 0;
6107
6108         val64 = readq(&bar0->adapter_control);
6109         val64 &= ~ADAPTER_ECC_EN;
6110         writeq(val64, &bar0->adapter_control);
6111
6112         val64 = readq(&bar0->mc_rldram_test_ctrl);
6113         val64 |= MC_RLDRAM_TEST_MODE;
6114         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6115
6116         val64 = readq(&bar0->mc_rldram_mrs);
6117         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6118         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6119
6120         val64 |= MC_RLDRAM_MRS_ENABLE;
6121         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6122
6123         while (iteration < 2) {
6124                 val64 = 0x55555555aaaa0000ULL;
6125                 if (iteration == 1) {
6126                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6127                 }
6128                 writeq(val64, &bar0->mc_rldram_test_d0);
6129
6130                 val64 = 0xaaaa5a5555550000ULL;
6131                 if (iteration == 1) {
6132                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6133                 }
6134                 writeq(val64, &bar0->mc_rldram_test_d1);
6135
6136                 val64 = 0x55aaaaaaaa5a0000ULL;
6137                 if (iteration == 1) {
6138                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
6139                 }
6140                 writeq(val64, &bar0->mc_rldram_test_d2);
6141
6142                 val64 = (u64) (0x0000003ffffe0100ULL);
6143                 writeq(val64, &bar0->mc_rldram_test_add);
6144
6145                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6146                         MC_RLDRAM_TEST_GO;
6147                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6148
6149                 for (cnt = 0; cnt < 5; cnt++) {
6150                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6151                         if (val64 & MC_RLDRAM_TEST_DONE)
6152                                 break;
6153                         msleep(200);
6154                 }
6155
6156                 if (cnt == 5)
6157                         break;
6158
6159                 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6160                 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6161
6162                 for (cnt = 0; cnt < 5; cnt++) {
6163                         val64 = readq(&bar0->mc_rldram_test_ctrl);
6164                         if (val64 & MC_RLDRAM_TEST_DONE)
6165                                 break;
6166                         msleep(500);
6167                 }
6168
6169                 if (cnt == 5)
6170                         break;
6171
6172                 val64 = readq(&bar0->mc_rldram_test_ctrl);
6173                 if (!(val64 & MC_RLDRAM_TEST_PASS))
6174                         test_fail = 1;
6175
6176                 iteration++;
6177         }
6178
6179         *data = test_fail;
6180
6181         /* Bring the adapter out of test mode */
6182         SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6183
6184         return test_fail;
6185 }
6186
6187 /**
6188  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6189  *  @sp : private member of the device structure, which is a pointer to the
6190  *  s2io_nic structure.
6191  *  @ethtest : pointer to a ethtool command specific structure that will be
6192  *  returned to the user.
6193  *  @data : variable that returns the result of each of the test
6194  * conducted by the driver.
6195  * Description:
6196  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
6197  *  the health of the card.
6198  * Return value:
6199  *  void
6200  */
6201
6202 static void s2io_ethtool_test(struct net_device *dev,
6203                               struct ethtool_test *ethtest,
6204                               uint64_t * data)
6205 {
6206         struct s2io_nic *sp = dev->priv;
6207         int orig_state = netif_running(sp->dev);
6208
6209         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6210                 /* Offline Tests. */
6211                 if (orig_state)
6212                         s2io_close(sp->dev);
6213
6214                 if (s2io_register_test(sp, &data[0]))
6215                         ethtest->flags |= ETH_TEST_FL_FAILED;
6216
6217                 s2io_reset(sp);
6218
6219                 if (s2io_rldram_test(sp, &data[3]))
6220                         ethtest->flags |= ETH_TEST_FL_FAILED;
6221
6222                 s2io_reset(sp);
6223
6224                 if (s2io_eeprom_test(sp, &data[1]))
6225                         ethtest->flags |= ETH_TEST_FL_FAILED;
6226
6227                 if (s2io_bist_test(sp, &data[4]))
6228                         ethtest->flags |= ETH_TEST_FL_FAILED;
6229
6230                 if (orig_state)
6231                         s2io_open(sp->dev);
6232
6233                 data[2] = 0;
6234         } else {
6235                 /* Online Tests. */
6236                 if (!orig_state) {
6237                         DBG_PRINT(ERR_DBG,
6238                                   "%s: is not up, cannot run test\n",
6239                                   dev->name);
6240                         data[0] = -1;
6241                         data[1] = -1;
6242                         data[2] = -1;
6243                         data[3] = -1;
6244                         data[4] = -1;
6245                 }
6246
6247                 if (s2io_link_test(sp, &data[2]))
6248                         ethtest->flags |= ETH_TEST_FL_FAILED;
6249
6250                 data[0] = 0;
6251                 data[1] = 0;
6252                 data[3] = 0;
6253                 data[4] = 0;
6254         }
6255 }
6256
6257 static void s2io_get_ethtool_stats(struct net_device *dev,
6258                                    struct ethtool_stats *estats,
6259                                    u64 * tmp_stats)
6260 {
6261         int i = 0, k;
6262         struct s2io_nic *sp = dev->priv;
6263         struct stat_block *stat_info = sp->mac_control.stats_info;
6264
6265         s2io_updt_stats(sp);
6266         tmp_stats[i++] =
6267                 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
6268                 le32_to_cpu(stat_info->tmac_frms);
6269         tmp_stats[i++] =
6270                 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6271                 le32_to_cpu(stat_info->tmac_data_octets);
6272         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6273         tmp_stats[i++] =
6274                 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6275                 le32_to_cpu(stat_info->tmac_mcst_frms);
6276         tmp_stats[i++] =
6277                 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6278                 le32_to_cpu(stat_info->tmac_bcst_frms);
6279         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6280         tmp_stats[i++] =
6281                 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6282                 le32_to_cpu(stat_info->tmac_ttl_octets);
6283         tmp_stats[i++] =
6284                 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6285                 le32_to_cpu(stat_info->tmac_ucst_frms);
6286         tmp_stats[i++] =
6287                 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6288                 le32_to_cpu(stat_info->tmac_nucst_frms);
6289         tmp_stats[i++] =
6290                 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6291                 le32_to_cpu(stat_info->tmac_any_err_frms);
6292         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6293         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6294         tmp_stats[i++] =
6295                 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6296                 le32_to_cpu(stat_info->tmac_vld_ip);
6297         tmp_stats[i++] =
6298                 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6299                 le32_to_cpu(stat_info->tmac_drop_ip);
6300         tmp_stats[i++] =
6301                 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6302                 le32_to_cpu(stat_info->tmac_icmp);
6303         tmp_stats[i++] =
6304                 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6305                 le32_to_cpu(stat_info->tmac_rst_tcp);
6306         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6307         tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6308                 le32_to_cpu(stat_info->tmac_udp);
6309         tmp_stats[i++] =
6310                 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6311                 le32_to_cpu(stat_info->rmac_vld_frms);
6312         tmp_stats[i++] =
6313                 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6314                 le32_to_cpu(stat_info->rmac_data_octets);
6315         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6316         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6317         tmp_stats[i++] =
6318                 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6319                 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6320         tmp_stats[i++] =
6321                 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6322                 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6323         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6324         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6325         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6326         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6327         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6328         tmp_stats[i++] =
6329                 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6330                 le32_to_cpu(stat_info->rmac_ttl_octets);
6331         tmp_stats[i++] =
6332                 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6333                 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6334         tmp_stats[i++] =
6335                 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6336                  << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6337         tmp_stats[i++] =
6338                 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6339                 le32_to_cpu(stat_info->rmac_discarded_frms);
6340         tmp_stats[i++] =
6341                 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6342                  << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6343         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6344         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6345         tmp_stats[i++] =
6346                 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6347                 le32_to_cpu(stat_info->rmac_usized_frms);
6348         tmp_stats[i++] =
6349                 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6350                 le32_to_cpu(stat_info->rmac_osized_frms);
6351         tmp_stats[i++] =
6352                 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6353                 le32_to_cpu(stat_info->rmac_frag_frms);
6354         tmp_stats[i++] =
6355                 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6356                 le32_to_cpu(stat_info->rmac_jabber_frms);
6357         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6358         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6359         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6360         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6361         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6362         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6363         tmp_stats[i++] =
6364                 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6365                 le32_to_cpu(stat_info->rmac_ip);
6366         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6367         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6368         tmp_stats[i++] =
6369                 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6370                 le32_to_cpu(stat_info->rmac_drop_ip);
6371         tmp_stats[i++] =
6372                 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6373                 le32_to_cpu(stat_info->rmac_icmp);
6374         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6375         tmp_stats[i++] =
6376                 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6377                 le32_to_cpu(stat_info->rmac_udp);
6378         tmp_stats[i++] =
6379                 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6380                 le32_to_cpu(stat_info->rmac_err_drp_udp);
6381         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6382         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6383         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6384         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6385         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6386         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6387         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6388         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6389         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6390         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6391         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6392         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6393         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6394         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6395         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6396         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6397         tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6398         tmp_stats[i++] =
6399                 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6400                 le32_to_cpu(stat_info->rmac_pause_cnt);
6401         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6402         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6403         tmp_stats[i++] =
6404                 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6405                 le32_to_cpu(stat_info->rmac_accepted_ip);
6406         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6407         tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6408         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6409         tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6410         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6411         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6412         tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6413         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6414         tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6415         tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6416         tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6417         tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6418         tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6419         tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6420         tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6421         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6422         tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6423         tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6424         tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6425
6426         /* Enhanced statistics exist only for Hercules */
6427         if(sp->device_type == XFRAME_II_DEVICE) {
6428                 tmp_stats[i++] =
6429                                 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6430                 tmp_stats[i++] =
6431                                 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6432                 tmp_stats[i++] =
6433                                 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6434                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6435                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6436                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6437                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6438                 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6439                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6440                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6441                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6442                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6443                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6444                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6445                 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6446                 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6447         }
6448
6449         tmp_stats[i++] = 0;
6450         tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6451         tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6452         tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6453         tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6454         tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6455         tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6456         for (k = 0; k < MAX_RX_RINGS; k++)
6457                 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6458         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6459         tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6460         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6461         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6462         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6463         tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6464         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6465         tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6466         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6467         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6468         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6469         tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6470         tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6471         tmp_stats[i++] = stat_info->sw_stat.sending_both;
6472         tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6473         tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6474         if (stat_info->sw_stat.num_aggregations) {
6475                 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6476                 int count = 0;
6477                 /*
6478                  * Since 64-bit divide does not work on all platforms,
6479                  * do repeated subtraction.
6480                  */
6481                 while (tmp >= stat_info->sw_stat.num_aggregations) {
6482                         tmp -= stat_info->sw_stat.num_aggregations;
6483                         count++;
6484                 }
6485                 tmp_stats[i++] = count;
6486         }
6487         else
6488                 tmp_stats[i++] = 0;
6489         tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6490         tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6491         tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6492         tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6493         tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6494         tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6495         tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6496         tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6497         tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6498
6499         tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6500         tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6501         tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6502         tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6503         tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6504
6505         tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6506         tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6507         tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6508         tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6509         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6510         tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6511         tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6512         tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6513         tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6514         tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6515         tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6516         tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6517         tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6518         tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6519         tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6520         tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6521         tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6522         tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6523         tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6524         tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6525         tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6526         tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6527         tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6528         tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6529         tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6530         tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6531 }
6532
6533 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6534 {
6535         return (XENA_REG_SPACE);
6536 }
6537
6538
6539 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6540 {
6541         struct s2io_nic *sp = dev->priv;
6542
6543         return (sp->rx_csum);
6544 }
6545
6546 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6547 {
6548         struct s2io_nic *sp = dev->priv;
6549
6550         if (data)
6551                 sp->rx_csum = 1;
6552         else
6553                 sp->rx_csum = 0;
6554
6555         return 0;
6556 }
6557
6558 static int s2io_get_eeprom_len(struct net_device *dev)
6559 {
6560         return (XENA_EEPROM_SPACE);
6561 }
6562
6563 static int s2io_get_sset_count(struct net_device *dev, int sset)
6564 {
6565         struct s2io_nic *sp = dev->priv;
6566
6567         switch (sset) {
6568         case ETH_SS_TEST:
6569                 return S2IO_TEST_LEN;
6570         case ETH_SS_STATS:
6571                 switch(sp->device_type) {
6572                 case XFRAME_I_DEVICE:
6573                         return XFRAME_I_STAT_LEN;
6574                 case XFRAME_II_DEVICE:
6575                         return XFRAME_II_STAT_LEN;
6576                 default:
6577                         return 0;
6578                 }
6579         default:
6580                 return -EOPNOTSUPP;
6581         }
6582 }
6583
6584 static void s2io_ethtool_get_strings(struct net_device *dev,
6585                                      u32 stringset, u8 * data)
6586 {
6587         int stat_size = 0;
6588         struct s2io_nic *sp = dev->priv;
6589
6590         switch (stringset) {
6591         case ETH_SS_TEST:
6592                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6593                 break;
6594         case ETH_SS_STATS:
6595                 stat_size = sizeof(ethtool_xena_stats_keys);
6596                 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6597                 if(sp->device_type == XFRAME_II_DEVICE) {
6598                         memcpy(data + stat_size,
6599                                 &ethtool_enhanced_stats_keys,
6600                                 sizeof(ethtool_enhanced_stats_keys));
6601                         stat_size += sizeof(ethtool_enhanced_stats_keys);
6602                 }
6603
6604                 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6605                         sizeof(ethtool_driver_stats_keys));
6606         }
6607 }
6608
6609 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6610 {
6611         if (data)
6612                 dev->features |= NETIF_F_IP_CSUM;
6613         else
6614                 dev->features &= ~NETIF_F_IP_CSUM;
6615
6616         return 0;
6617 }
6618
6619 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6620 {
6621         return (dev->features & NETIF_F_TSO) != 0;
6622 }
6623 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6624 {
6625         if (data)
6626                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6627         else
6628                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6629
6630         return 0;
6631 }
6632
6633 static const struct ethtool_ops netdev_ethtool_ops = {
6634         .get_settings = s2io_ethtool_gset,
6635         .set_settings = s2io_ethtool_sset,
6636         .get_drvinfo = s2io_ethtool_gdrvinfo,
6637         .get_regs_len = s2io_ethtool_get_regs_len,
6638         .get_regs = s2io_ethtool_gregs,
6639         .get_link = ethtool_op_get_link,
6640         .get_eeprom_len = s2io_get_eeprom_len,
6641         .get_eeprom = s2io_ethtool_geeprom,
6642         .set_eeprom = s2io_ethtool_seeprom,
6643         .get_ringparam = s2io_ethtool_gringparam,
6644         .get_pauseparam = s2io_ethtool_getpause_data,
6645         .set_pauseparam = s2io_ethtool_setpause_data,
6646         .get_rx_csum = s2io_ethtool_get_rx_csum,
6647         .set_rx_csum = s2io_ethtool_set_rx_csum,
6648         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6649         .set_sg = ethtool_op_set_sg,
6650         .get_tso = s2io_ethtool_op_get_tso,
6651         .set_tso = s2io_ethtool_op_set_tso,
6652         .set_ufo = ethtool_op_set_ufo,
6653         .self_test = s2io_ethtool_test,
6654         .get_strings = s2io_ethtool_get_strings,
6655         .phys_id = s2io_ethtool_idnic,
6656         .get_ethtool_stats = s2io_get_ethtool_stats,
6657         .get_sset_count = s2io_get_sset_count,
6658 };
6659
6660 /**
6661  *  s2io_ioctl - Entry point for the Ioctl
6662  *  @dev :  Device pointer.
6663  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
6664  *  a proprietary structure used to pass information to the driver.
6665  *  @cmd :  This is used to distinguish between the different commands that
6666  *  can be passed to the IOCTL functions.
6667  *  Description:
6668  *  Currently there are no special functionality supported in IOCTL, hence
6669  *  function always return EOPNOTSUPPORTED
6670  */
6671
6672 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6673 {
6674         return -EOPNOTSUPP;
6675 }
6676
6677 /**
6678  *  s2io_change_mtu - entry point to change MTU size for the device.
6679  *   @dev : device pointer.
6680  *   @new_mtu : the new MTU size for the device.
6681  *   Description: A driver entry point to change MTU size for the device.
6682  *   Before changing the MTU the device must be stopped.
6683  *  Return value:
6684  *   0 on success and an appropriate (-)ve integer as defined in errno.h
6685  *   file on failure.
6686  */
6687
6688 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6689 {
6690         struct s2io_nic *sp = dev->priv;
6691         int ret = 0;
6692
6693         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6694                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6695                           dev->name);
6696                 return -EPERM;
6697         }
6698
6699         dev->mtu = new_mtu;
6700         if (netif_running(dev)) {
6701                 s2io_stop_all_tx_queue(sp);
6702                 s2io_card_down(sp);
6703                 ret = s2io_card_up(sp);
6704                 if (ret) {
6705                         DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6706                                   __FUNCTION__);
6707                         return ret;
6708                 }
6709                 s2io_wake_all_tx_queue(sp);
6710         } else { /* Device is down */
6711                 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6712                 u64 val64 = new_mtu;
6713
6714                 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6715         }
6716
6717         return ret;
6718 }
6719
6720 /**
6721  * s2io_set_link - Set the LInk status
6722  * @data: long pointer to device private structue
6723  * Description: Sets the link status for the adapter
6724  */
6725
6726 static void s2io_set_link(struct work_struct *work)
6727 {
6728         struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6729         struct net_device *dev = nic->dev;
6730         struct XENA_dev_config __iomem *bar0 = nic->bar0;
6731         register u64 val64;
6732         u16 subid;
6733
6734         rtnl_lock();
6735
6736         if (!netif_running(dev))
6737                 goto out_unlock;
6738
6739         if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6740                 /* The card is being reset, no point doing anything */
6741                 goto out_unlock;
6742         }
6743
6744         subid = nic->pdev->subsystem_device;
6745         if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6746                 /*
6747                  * Allow a small delay for the NICs self initiated
6748                  * cleanup to complete.
6749                  */
6750                 msleep(100);
6751         }
6752
6753         val64 = readq(&bar0->adapter_status);
6754         if (LINK_IS_UP(val64)) {
6755                 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6756                         if (verify_xena_quiescence(nic)) {
6757                                 val64 = readq(&bar0->adapter_control);
6758                                 val64 |= ADAPTER_CNTL_EN;
6759                                 writeq(val64, &bar0->adapter_control);
6760                                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6761                                         nic->device_type, subid)) {
6762                                         val64 = readq(&bar0->gpio_control);
6763                                         val64 |= GPIO_CTRL_GPIO_0;
6764                                         writeq(val64, &bar0->gpio_control);
6765                                         val64 = readq(&bar0->gpio_control);
6766                                 } else {
6767                                         val64 |= ADAPTER_LED_ON;
6768                                         writeq(val64, &bar0->adapter_control);
6769                                 }
6770                                 nic->device_enabled_once = TRUE;
6771                         } else {
6772                                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6773                                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6774                                 s2io_stop_all_tx_queue(nic);
6775                         }
6776                 }
6777                 val64 = readq(&bar0->adapter_control);
6778                 val64 |= ADAPTER_LED_ON;
6779                 writeq(val64, &bar0->adapter_control);
6780                 s2io_link(nic, LINK_UP);
6781         } else {
6782                 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6783                                                       subid)) {
6784                         val64 = readq(&bar0->gpio_control);
6785                         val64 &= ~GPIO_CTRL_GPIO_0;
6786                         writeq(val64, &bar0->gpio_control);
6787                         val64 = readq(&bar0->gpio_control);
6788                 }
6789                 /* turn off LED */
6790                 val64 = readq(&bar0->adapter_control);
6791                 val64 = val64 &(~ADAPTER_LED_ON);
6792                 writeq(val64, &bar0->adapter_control);
6793                 s2io_link(nic, LINK_DOWN);
6794         }
6795         clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6796
6797 out_unlock:
6798         rtnl_unlock();
6799 }
6800
6801 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6802                                 struct buffAdd *ba,
6803                                 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6804                                 u64 *temp2, int size)
6805 {
6806         struct net_device *dev = sp->dev;
6807         struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6808
6809         if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6810                 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6811                 /* allocate skb */
6812                 if (*skb) {
6813                         DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6814                         /*
6815                          * As Rx frame are not going to be processed,
6816                          * using same mapped address for the Rxd
6817                          * buffer pointer
6818                          */
6819                         rxdp1->Buffer0_ptr = *temp0;
6820                 } else {
6821                         *skb = dev_alloc_skb(size);
6822                         if (!(*skb)) {
6823                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6824                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6825                                 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6826                                 sp->mac_control.stats_info->sw_stat. \
6827                                         mem_alloc_fail_cnt++;
6828                                 return -ENOMEM ;
6829                         }
6830                         sp->mac_control.stats_info->sw_stat.mem_allocated
6831                                 += (*skb)->truesize;
6832                         /* storing the mapped addr in a temp variable
6833                          * such it will be used for next rxd whose
6834                          * Host Control is NULL
6835                          */
6836                         rxdp1->Buffer0_ptr = *temp0 =
6837                                 pci_map_single( sp->pdev, (*skb)->data,
6838                                         size - NET_IP_ALIGN,
6839                                         PCI_DMA_FROMDEVICE);
6840                         if( (rxdp1->Buffer0_ptr == 0) ||
6841                                 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6842                                 goto memalloc_failed;
6843                         }
6844                         rxdp->Host_Control = (unsigned long) (*skb);
6845                 }
6846         } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6847                 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6848                 /* Two buffer Mode */
6849                 if (*skb) {
6850                         rxdp3->Buffer2_ptr = *temp2;
6851                         rxdp3->Buffer0_ptr = *temp0;
6852                         rxdp3->Buffer1_ptr = *temp1;
6853                 } else {
6854                         *skb = dev_alloc_skb(size);
6855                         if (!(*skb)) {
6856                                 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6857                                 DBG_PRINT(INFO_DBG, "memory to allocate ");
6858                                 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6859                                 sp->mac_control.stats_info->sw_stat. \
6860                                         mem_alloc_fail_cnt++;
6861                                 return -ENOMEM;
6862                         }
6863                         sp->mac_control.stats_info->sw_stat.mem_allocated
6864                                 += (*skb)->truesize;
6865                         rxdp3->Buffer2_ptr = *temp2 =
6866                                 pci_map_single(sp->pdev, (*skb)->data,
6867                                                dev->mtu + 4,
6868                                                PCI_DMA_FROMDEVICE);
6869                         if( (rxdp3->Buffer2_ptr == 0) ||
6870                                 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6871                                 goto memalloc_failed;
6872                         }
6873                         rxdp3->Buffer0_ptr = *temp0 =
6874                                 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6875                                                 PCI_DMA_FROMDEVICE);
6876                         if( (rxdp3->Buffer0_ptr == 0) ||
6877                                 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6878                                 pci_unmap_single (sp->pdev,
6879                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6880                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6881                                 goto memalloc_failed;
6882                         }
6883                         rxdp->Host_Control = (unsigned long) (*skb);
6884
6885                         /* Buffer-1 will be dummy buffer not used */
6886                         rxdp3->Buffer1_ptr = *temp1 =
6887                                 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6888                                                 PCI_DMA_FROMDEVICE);
6889                         if( (rxdp3->Buffer1_ptr == 0) ||
6890                                 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6891                                 pci_unmap_single (sp->pdev,
6892                                         (dma_addr_t)rxdp3->Buffer0_ptr,
6893                                         BUF0_LEN, PCI_DMA_FROMDEVICE);
6894                                 pci_unmap_single (sp->pdev,
6895                                         (dma_addr_t)rxdp3->Buffer2_ptr,
6896                                         dev->mtu + 4, PCI_DMA_FROMDEVICE);
6897                                 goto memalloc_failed;
6898                         }
6899                 }
6900         }
6901         return 0;
6902         memalloc_failed:
6903                 stats->pci_map_fail_cnt++;
6904                 stats->mem_freed += (*skb)->truesize;
6905                 dev_kfree_skb(*skb);
6906                 return -ENOMEM;
6907 }
6908
6909 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6910                                 int size)
6911 {
6912         struct net_device *dev = sp->dev;
6913         if (sp->rxd_mode == RXD_MODE_1) {
6914                 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6915         } else if (sp->rxd_mode == RXD_MODE_3B) {
6916                 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6917                 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6918                 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6919         }
6920 }
6921
6922 static  int rxd_owner_bit_reset(struct s2io_nic *sp)
6923 {
6924         int i, j, k, blk_cnt = 0, size;
6925         struct mac_info * mac_control = &sp->mac_control;
6926         struct config_param *config = &sp->config;
6927         struct net_device *dev = sp->dev;
6928         struct RxD_t *rxdp = NULL;
6929         struct sk_buff *skb = NULL;
6930         struct buffAdd *ba = NULL;
6931         u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6932
6933         /* Calculate the size based on ring mode */
6934         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6935                 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6936         if (sp->rxd_mode == RXD_MODE_1)
6937                 size += NET_IP_ALIGN;
6938         else if (sp->rxd_mode == RXD_MODE_3B)
6939                 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6940
6941         for (i = 0; i < config->rx_ring_num; i++) {
6942                 blk_cnt = config->rx_cfg[i].num_rxd /
6943                         (rxd_count[sp->rxd_mode] +1);
6944
6945                 for (j = 0; j < blk_cnt; j++) {
6946                         for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6947                                 rxdp = mac_control->rings[i].
6948                                         rx_blocks[j].rxds[k].virt_addr;
6949                                 if(sp->rxd_mode == RXD_MODE_3B)
6950                                         ba = &mac_control->rings[i].ba[j][k];
6951                                 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6952                                                        &skb,(u64 *)&temp0_64,
6953                                                        (u64 *)&temp1_64,
6954                                                        (u64 *)&temp2_64,
6955                                                         size) == ENOMEM) {
6956                                         return 0;
6957                                 }
6958
6959                                 set_rxd_buffer_size(sp, rxdp, size);
6960                                 wmb();
6961                                 /* flip the Ownership bit to Hardware */
6962                                 rxdp->Control_1 |= RXD_OWN_XENA;
6963                         }
6964                 }
6965         }
6966         return 0;
6967
6968 }
6969
6970 static int s2io_add_isr(struct s2io_nic * sp)
6971 {
6972         int ret = 0;
6973         struct net_device *dev = sp->dev;
6974         int err = 0;
6975
6976         if (sp->config.intr_type == MSI_X)
6977                 ret = s2io_enable_msi_x(sp);
6978         if (ret) {
6979                 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6980                 sp->config.intr_type = INTA;
6981         }
6982
6983         /* Store the values of the MSIX table in the struct s2io_nic structure */
6984         store_xmsi_data(sp);
6985
6986         /* After proper initialization of H/W, register ISR */
6987         if (sp->config.intr_type == MSI_X) {
6988                 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6989
6990                 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6991                         if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6992                                 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6993                                         dev->name, i);
6994                                 err = request_irq(sp->entries[i].vector,
6995                                           s2io_msix_fifo_handle, 0, sp->desc[i],
6996                                                   sp->s2io_entries[i].arg);
6997                                 /* If either data or addr is zero print it */
6998                                 if(!(sp->msix_info[i].addr &&
6999                                         sp->msix_info[i].data)) {
7000                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7001                                                 "Data:0x%llx\n",sp->desc[i],
7002                                                 (unsigned long long)
7003                                                 sp->msix_info[i].addr,
7004                                                 (unsigned long long)
7005                                                 sp->msix_info[i].data);
7006                                 } else {
7007                                         msix_tx_cnt++;
7008                                 }
7009                         } else {
7010                                 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7011                                         dev->name, i);
7012                                 err = request_irq(sp->entries[i].vector,
7013                                           s2io_msix_ring_handle, 0, sp->desc[i],
7014                                                   sp->s2io_entries[i].arg);
7015                                 /* If either data or addr is zero print it */
7016                                 if(!(sp->msix_info[i].addr &&
7017                                         sp->msix_info[i].data)) {
7018                                         DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7019                                                 "Data:0x%llx\n",sp->desc[i],
7020                                                 (unsigned long long)
7021                                                 sp->msix_info[i].addr,
7022                                                 (unsigned long long)
7023                                                 sp->msix_info[i].data);
7024                                 } else {
7025                                         msix_rx_cnt++;
7026                                 }
7027                         }
7028                         if (err) {
7029                                 remove_msix_isr(sp);
7030                                 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
7031                                           "failed\n", dev->name, i);
7032                                 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
7033                                                  dev->name);
7034                                 sp->config.intr_type = INTA;
7035                                 break;
7036                         }
7037                         sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
7038                 }
7039                 if (!err) {
7040                         printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
7041                                 msix_tx_cnt);
7042                         printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7043                                 msix_rx_cnt);
7044                 }
7045         }
7046         if (sp->config.intr_type == INTA) {
7047                 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7048                                 sp->name, dev);
7049                 if (err) {
7050                         DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7051                                   dev->name);
7052                         return -1;
7053                 }
7054         }
7055         return 0;
7056 }
7057 static void s2io_rem_isr(struct s2io_nic * sp)
7058 {
7059         if (sp->config.intr_type == MSI_X)
7060                 remove_msix_isr(sp);
7061         else
7062                 remove_inta_isr(sp);
7063 }
7064
7065 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7066 {
7067         int cnt = 0;
7068         struct XENA_dev_config __iomem *bar0 = sp->bar0;
7069         register u64 val64 = 0;
7070         struct config_param *config;
7071         config = &sp->config;
7072
7073         if (!is_s2io_card_up(sp))
7074                 return;
7075
7076         del_timer_sync(&sp->alarm_timer);
7077         /* If s2io_set_link task is executing, wait till it completes. */
7078         while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7079                 msleep(50);
7080         }
7081         clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7082
7083         /* Disable napi */
7084         if (config->napi)
7085                 napi_disable(&sp->napi);
7086
7087         /* disable Tx and Rx traffic on the NIC */
7088         if (do_io)
7089                 stop_nic(sp);
7090
7091         s2io_rem_isr(sp);
7092
7093         /* Check if the device is Quiescent and then Reset the NIC */
7094         while(do_io) {
7095                 /* As per the HW requirement we need to replenish the
7096                  * receive buffer to avoid the ring bump. Since there is
7097                  * no intention of processing the Rx frame at this pointwe are
7098                  * just settting the ownership bit of rxd in Each Rx
7099                  * ring to HW and set the appropriate buffer size
7100                  * based on the ring mode
7101                  */
7102                 rxd_owner_bit_reset(sp);
7103
7104                 val64 = readq(&bar0->adapter_status);
7105                 if (verify_xena_quiescence(sp)) {
7106                         if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7107                         break;
7108                 }
7109
7110                 msleep(50);
7111                 cnt++;
7112                 if (cnt == 10) {
7113                         DBG_PRINT(ERR_DBG,
7114                                   "s2io_close:Device not Quiescent ");
7115                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7116                                   (unsigned long long) val64);
7117                         break;
7118                 }
7119         }
7120         if (do_io)
7121                 s2io_reset(sp);
7122
7123         /* Free all Tx buffers */
7124         free_tx_buffers(sp);
7125
7126         /* Free all Rx buffers */
7127         free_rx_buffers(sp);
7128
7129         clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7130 }
7131
7132 static void s2io_card_down(struct s2io_nic * sp)
7133 {
7134         do_s2io_card_down(sp, 1);
7135 }
7136
7137 static int s2io_card_up(struct s2io_nic * sp)
7138 {
7139         int i, ret = 0;
7140         struct mac_info *mac_control;
7141         struct config_param *config;
7142         struct net_device *dev = (struct net_device *) sp->dev;
7143         u16 interruptible;
7144
7145         /* Initialize the H/W I/O registers */
7146         ret = init_nic(sp);
7147         if (ret != 0) {
7148                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7149                           dev->name);
7150                 if (ret != -EIO)
7151                         s2io_reset(sp);
7152                 return ret;
7153         }
7154
7155         /*
7156          * Initializing the Rx buffers. For now we are considering only 1
7157          * Rx ring and initializing buffers into 30 Rx blocks
7158          */
7159         mac_control = &sp->mac_control;
7160         config = &sp->config;
7161
7162         for (i = 0; i < config->rx_ring_num; i++) {
7163                 mac_control->rings[i].mtu = dev->mtu;
7164                 ret = fill_rx_buffers(&mac_control->rings[i]);
7165                 if (ret) {
7166                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7167                                   dev->name);
7168                         s2io_reset(sp);
7169                         free_rx_buffers(sp);
7170                         return -ENOMEM;
7171                 }
7172                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7173                           mac_control->rings[i].rx_bufs_left);
7174         }
7175
7176         /* Initialise napi */
7177         if (config->napi)
7178                 napi_enable(&sp->napi);
7179
7180         /* Maintain the state prior to the open */
7181         if (sp->promisc_flg)
7182                 sp->promisc_flg = 0;
7183         if (sp->m_cast_flg) {
7184                 sp->m_cast_flg = 0;
7185                 sp->all_multi_pos= 0;
7186         }
7187
7188         /* Setting its receive mode */
7189         s2io_set_multicast(dev);
7190
7191         if (sp->lro) {
7192                 /* Initialize max aggregatable pkts per session based on MTU */
7193                 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7194                 /* Check if we can use(if specified) user provided value */
7195                 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7196                         sp->lro_max_aggr_per_sess = lro_max_pkts;
7197         }
7198
7199         /* Enable Rx Traffic and interrupts on the NIC */
7200         if (start_nic(sp)) {
7201                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7202                 s2io_reset(sp);
7203                 free_rx_buffers(sp);
7204                 return -ENODEV;
7205         }
7206
7207         /* Add interrupt service routine */
7208         if (s2io_add_isr(sp) != 0) {
7209                 if (sp->config.intr_type == MSI_X)
7210                         s2io_rem_isr(sp);
7211                 s2io_reset(sp);
7212                 free_rx_buffers(sp);
7213                 return -ENODEV;
7214         }
7215
7216         S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7217
7218         /*  Enable select interrupts */
7219         en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7220         if (sp->config.intr_type != INTA)
7221                 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
7222         else {
7223                 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7224                 interruptible |= TX_PIC_INTR;
7225                 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7226         }
7227
7228         set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7229         return 0;
7230 }
7231
7232 /**
7233  * s2io_restart_nic - Resets the NIC.
7234  * @data : long pointer to the device private structure
7235  * Description:
7236  * This function is scheduled to be run by the s2io_tx_watchdog
7237  * function after 0.5 secs to reset the NIC. The idea is to reduce
7238  * the run time of the watch dog routine which is run holding a
7239  * spin lock.
7240  */
7241
7242 static void s2io_restart_nic(struct work_struct *work)
7243 {
7244         struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7245         struct net_device *dev = sp->dev;
7246
7247         rtnl_lock();
7248
7249         if (!netif_running(dev))
7250                 goto out_unlock;
7251
7252         s2io_card_down(sp);
7253         if (s2io_card_up(sp)) {
7254                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7255                           dev->name);
7256         }
7257         s2io_wake_all_tx_queue(sp);
7258         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7259                   dev->name);
7260 out_unlock:
7261         rtnl_unlock();
7262 }
7263
7264 /**
7265  *  s2io_tx_watchdog - Watchdog for transmit side.
7266  *  @dev : Pointer to net device structure
7267  *  Description:
7268  *  This function is triggered if the Tx Queue is stopped
7269  *  for a pre-defined amount of time when the Interface is still up.
7270  *  If the Interface is jammed in such a situation, the hardware is
7271  *  reset (by s2io_close) and restarted again (by s2io_open) to
7272  *  overcome any problem that might have been caused in the hardware.
7273  *  Return value:
7274  *  void
7275  */
7276
7277 static void s2io_tx_watchdog(struct net_device *dev)
7278 {
7279         struct s2io_nic *sp = dev->priv;
7280
7281         if (netif_carrier_ok(dev)) {
7282                 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7283                 schedule_work(&sp->rst_timer_task);
7284                 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7285         }
7286 }
7287
7288 /**
7289  *   rx_osm_handler - To perform some OS related operations on SKB.
7290  *   @sp: private member of the device structure,pointer to s2io_nic structure.
7291  *   @skb : the socket buffer pointer.
7292  *   @len : length of the packet
7293  *   @cksum : FCS checksum of the frame.
7294  *   @ring_no : the ring from which this RxD was extracted.
7295  *   Description:
7296  *   This function is called by the Rx interrupt serivce routine to perform
7297  *   some OS related operations on the SKB before passing it to the upper
7298  *   layers. It mainly checks if the checksum is OK, if so adds it to the
7299  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
7300  *   to the upper layer. If the checksum is wrong, it increments the Rx
7301  *   packet error count, frees the SKB and returns error.
7302  *   Return value:
7303  *   SUCCESS on success and -1 on failure.
7304  */
7305 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7306 {
7307         struct s2io_nic *sp = ring_data->nic;
7308         struct net_device *dev = (struct net_device *) ring_data->dev;
7309         struct sk_buff *skb = (struct sk_buff *)
7310                 ((unsigned long) rxdp->Host_Control);
7311         int ring_no = ring_data->ring_no;
7312         u16 l3_csum, l4_csum;
7313         unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7314         struct lro *lro;
7315         u8 err_mask;
7316
7317         skb->dev = dev;
7318
7319         if (err) {
7320                 /* Check for parity error */
7321                 if (err & 0x1) {
7322                         sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7323                 }
7324                 err_mask = err >> 48;
7325                 switch(err_mask) {
7326                         case 1:
7327                                 sp->mac_control.stats_info->sw_stat.
7328                                 rx_parity_err_cnt++;
7329                         break;
7330
7331                         case 2:
7332                                 sp->mac_control.stats_info->sw_stat.
7333                                 rx_abort_cnt++;
7334                         break;
7335
7336                         case 3:
7337                                 sp->mac_control.stats_info->sw_stat.
7338                                 rx_parity_abort_cnt++;
7339                         break;
7340
7341                         case 4:
7342                                 sp->mac_control.stats_info->sw_stat.
7343                                 rx_rda_fail_cnt++;
7344                         break;
7345
7346                         case 5:
7347                                 sp->mac_control.stats_info->sw_stat.
7348                                 rx_unkn_prot_cnt++;
7349                         break;
7350
7351                         case 6:
7352                                 sp->mac_control.stats_info->sw_stat.
7353                                 rx_fcs_err_cnt++;
7354                         break;
7355
7356                         case 7:
7357                                 sp->mac_control.stats_info->sw_stat.
7358                                 rx_buf_size_err_cnt++;
7359                         break;
7360
7361                         case 8:
7362                                 sp->mac_control.stats_info->sw_stat.
7363                                 rx_rxd_corrupt_cnt++;
7364                         break;
7365
7366                         case 15:
7367                                 sp->mac_control.stats_info->sw_stat.
7368                                 rx_unkn_err_cnt++;
7369                         break;
7370                 }
7371                 /*
7372                 * Drop the packet if bad transfer code. Exception being
7373                 * 0x5, which could be due to unsupported IPv6 extension header.
7374                 * In this case, we let stack handle the packet.
7375                 * Note that in this case, since checksum will be incorrect,
7376                 * stack will validate the same.
7377                 */
7378                 if (err_mask != 0x5) {
7379                         DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7380                                 dev->name, err_mask);
7381                         sp->stats.rx_crc_errors++;
7382                         sp->mac_control.stats_info->sw_stat.mem_freed
7383                                 += skb->truesize;
7384                         dev_kfree_skb(skb);
7385                         ring_data->rx_bufs_left -= 1;
7386                         rxdp->Host_Control = 0;
7387                         return 0;
7388                 }
7389         }
7390
7391         /* Updating statistics */
7392         ring_data->rx_packets++;
7393         rxdp->Host_Control = 0;
7394         if (sp->rxd_mode == RXD_MODE_1) {
7395                 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7396
7397                 ring_data->rx_bytes += len;
7398                 skb_put(skb, len);
7399
7400         } else if (sp->rxd_mode == RXD_MODE_3B) {
7401                 int get_block = ring_data->rx_curr_get_info.block_index;
7402                 int get_off = ring_data->rx_curr_get_info.offset;
7403                 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7404                 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7405                 unsigned char *buff = skb_push(skb, buf0_len);
7406
7407                 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7408                 ring_data->rx_bytes += buf0_len + buf2_len;
7409                 memcpy(buff, ba->ba_0, buf0_len);
7410                 skb_put(skb, buf2_len);
7411         }
7412
7413         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7414             (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7415             (sp->rx_csum)) {
7416                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7417                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7418                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7419                         /*
7420                          * NIC verifies if the Checksum of the received
7421                          * frame is Ok or not and accordingly returns
7422                          * a flag in the RxD.
7423                          */
7424                         skb->ip_summed = CHECKSUM_UNNECESSARY;
7425                         if (ring_data->lro) {
7426                                 u32 tcp_len;
7427                                 u8 *tcp;
7428                                 int ret = 0;
7429
7430                                 ret = s2io_club_tcp_session(ring_data,
7431                                         skb->data, &tcp, &tcp_len, &lro,
7432                                         rxdp, sp);
7433                                 switch (ret) {
7434                                         case 3: /* Begin anew */
7435                                                 lro->parent = skb;
7436                                                 goto aggregate;
7437                                         case 1: /* Aggregate */
7438                                         {
7439                                                 lro_append_pkt(sp, lro,
7440                                                         skb, tcp_len);
7441                                                 goto aggregate;
7442                                         }
7443                                         case 4: /* Flush session */
7444                                         {
7445                                                 lro_append_pkt(sp, lro,
7446                                                         skb, tcp_len);
7447                                                 queue_rx_frame(lro->parent,
7448                                                         lro->vlan_tag);
7449                                                 clear_lro_session(lro);
7450                                                 sp->mac_control.stats_info->
7451                                                     sw_stat.flush_max_pkts++;
7452                                                 goto aggregate;
7453                                         }
7454                                         case 2: /* Flush both */
7455                                                 lro->parent->data_len =
7456                                                         lro->frags_len;
7457                                                 sp->mac_control.stats_info->
7458                                                      sw_stat.sending_both++;
7459                                                 queue_rx_frame(lro->parent,
7460                                                         lro->vlan_tag);
7461                                                 clear_lro_session(lro);
7462                                                 goto send_up;
7463                                         case 0: /* sessions exceeded */
7464                                         case -1: /* non-TCP or not
7465                                                   * L2 aggregatable
7466                                                   */
7467                                         case 5: /*
7468                                                  * First pkt in session not
7469                                                  * L3/L4 aggregatable
7470                                                  */
7471                                                 break;
7472                                         default:
7473                                                 DBG_PRINT(ERR_DBG,
7474                                                         "%s: Samadhana!!\n",
7475                                                          __FUNCTION__);
7476                                                 BUG();
7477                                 }
7478                         }
7479                 } else {
7480                         /*
7481                          * Packet with erroneous checksum, let the
7482                          * upper layers deal with it.
7483                          */
7484                         skb->ip_summed = CHECKSUM_NONE;
7485                 }
7486         } else
7487                 skb->ip_summed = CHECKSUM_NONE;
7488
7489         sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7490 send_up:
7491         queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7492         dev->last_rx = jiffies;
7493 aggregate:
7494         sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
7495         return SUCCESS;
7496 }
7497
7498 /**
7499  *  s2io_link - stops/starts the Tx queue.
7500  *  @sp : private member of the device structure, which is a pointer to the
7501  *  s2io_nic structure.
7502  *  @link : inidicates whether link is UP/DOWN.
7503  *  Description:
7504  *  This function stops/starts the Tx queue depending on whether the link
7505  *  status of the NIC is is down or up. This is called by the Alarm
7506  *  interrupt handler whenever a link change interrupt comes up.
7507  *  Return value:
7508  *  void.
7509  */
7510
7511 static void s2io_link(struct s2io_nic * sp, int link)
7512 {
7513         struct net_device *dev = (struct net_device *) sp->dev;
7514
7515         if (link != sp->last_link_state) {
7516                 init_tti(sp, link);
7517                 if (link == LINK_DOWN) {
7518                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7519                         s2io_stop_all_tx_queue(sp);
7520                         netif_carrier_off(dev);
7521                         if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7522                         sp->mac_control.stats_info->sw_stat.link_up_time =
7523                                 jiffies - sp->start_time;
7524                         sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7525                 } else {
7526                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7527                         if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7528                         sp->mac_control.stats_info->sw_stat.link_down_time =
7529                                 jiffies - sp->start_time;
7530                         sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7531                         netif_carrier_on(dev);
7532                         s2io_wake_all_tx_queue(sp);
7533                 }
7534         }
7535         sp->last_link_state = link;
7536         sp->start_time = jiffies;
7537 }
7538
7539 /**
7540  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7541  *  @sp : private member of the device structure, which is a pointer to the
7542  *  s2io_nic structure.
7543  *  Description:
7544  *  This function initializes a few of the PCI and PCI-X configuration registers
7545  *  with recommended values.
7546  *  Return value:
7547  *  void
7548  */
7549
7550 static void s2io_init_pci(struct s2io_nic * sp)
7551 {
7552         u16 pci_cmd = 0, pcix_cmd = 0;
7553
7554         /* Enable Data Parity Error Recovery in PCI-X command register. */
7555         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7556                              &(pcix_cmd));
7557         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7558                               (pcix_cmd | 1));
7559         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7560                              &(pcix_cmd));
7561
7562         /* Set the PErr Response bit in PCI command register. */
7563         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7564         pci_write_config_word(sp->pdev, PCI_COMMAND,
7565                               (pci_cmd | PCI_COMMAND_PARITY));
7566         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7567 }
7568
7569 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7570         u8 *dev_multiq)
7571 {
7572         if ((tx_fifo_num > MAX_TX_FIFOS) ||
7573                 (tx_fifo_num < 1)) {
7574                 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7575                         "(%d) not supported\n", tx_fifo_num);
7576
7577                 if (tx_fifo_num < 1)
7578                         tx_fifo_num = 1;
7579                 else
7580                         tx_fifo_num = MAX_TX_FIFOS;
7581
7582                 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7583                 DBG_PRINT(ERR_DBG, "tx fifos\n");
7584         }
7585
7586 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
7587         if (multiq) {
7588                 DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n");
7589                 multiq = 0;
7590         }
7591 #endif
7592         if (multiq)
7593                 *dev_multiq = multiq;
7594
7595         if (tx_steering_type && (1 == tx_fifo_num)) {
7596                 if (tx_steering_type != TX_DEFAULT_STEERING)
7597                         DBG_PRINT(ERR_DBG,
7598                                 "s2io: Tx steering is not supported with "
7599                                 "one fifo. Disabling Tx steering.\n");
7600                 tx_steering_type = NO_STEERING;
7601         }
7602
7603         if ((tx_steering_type < NO_STEERING) ||
7604                 (tx_steering_type > TX_DEFAULT_STEERING)) {
7605                 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7606                          "supported\n");
7607                 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7608                 tx_steering_type = NO_STEERING;
7609         }
7610
7611         if (rx_ring_num > MAX_RX_RINGS) {
7612                 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
7613                          "supported\n");
7614                 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7615                         MAX_RX_RINGS);
7616                 rx_ring_num = MAX_RX_RINGS;
7617         }
7618
7619         if (*dev_intr_type != INTA)
7620                 napi = 0;
7621
7622         if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7623                 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7624                           "Defaulting to INTA\n");
7625                 *dev_intr_type = INTA;
7626         }
7627
7628         if ((*dev_intr_type == MSI_X) &&
7629                         ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7630                         (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7631                 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7632                                         "Defaulting to INTA\n");
7633                 *dev_intr_type = INTA;
7634         }
7635
7636         if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7637                 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7638                 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7639                 rx_ring_mode = 1;
7640         }
7641         return SUCCESS;
7642 }
7643
7644 /**
7645  * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7646  * or Traffic class respectively.
7647  * @nic: device private variable
7648  * Description: The function configures the receive steering to
7649  * desired receive ring.
7650  * Return Value:  SUCCESS on success and
7651  * '-1' on failure (endian settings incorrect).
7652  */
7653 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7654 {
7655         struct XENA_dev_config __iomem *bar0 = nic->bar0;
7656         register u64 val64 = 0;
7657
7658         if (ds_codepoint > 63)
7659                 return FAILURE;
7660
7661         val64 = RTS_DS_MEM_DATA(ring);
7662         writeq(val64, &bar0->rts_ds_mem_data);
7663
7664         val64 = RTS_DS_MEM_CTRL_WE |
7665                 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7666                 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7667
7668         writeq(val64, &bar0->rts_ds_mem_ctrl);
7669
7670         return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7671                                 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7672                                 S2IO_BIT_RESET);
7673 }
7674
7675 /**
7676  *  s2io_init_nic - Initialization of the adapter .
7677  *  @pdev : structure containing the PCI related information of the device.
7678  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7679  *  Description:
7680  *  The function initializes an adapter identified by the pci_dec structure.
7681  *  All OS related initialization including memory and device structure and
7682  *  initlaization of the device private variable is done. Also the swapper
7683  *  control register is initialized to enable read and write into the I/O
7684  *  registers of the device.
7685  *  Return value:
7686  *  returns 0 on success and negative on failure.
7687  */
7688
7689 static int __devinit
7690 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7691 {
7692         struct s2io_nic *sp;
7693         struct net_device *dev;
7694         int i, j, ret;
7695         int dma_flag = FALSE;
7696         u32 mac_up, mac_down;
7697         u64 val64 = 0, tmp64 = 0;
7698         struct XENA_dev_config __iomem *bar0 = NULL;
7699         u16 subid;
7700         struct mac_info *mac_control;
7701         struct config_param *config;
7702         int mode;
7703         u8 dev_intr_type = intr_type;
7704         u8 dev_multiq = 0;
7705         DECLARE_MAC_BUF(mac);
7706
7707         ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7708         if (ret)
7709                 return ret;
7710
7711         if ((ret = pci_enable_device(pdev))) {
7712                 DBG_PRINT(ERR_DBG,
7713                           "s2io_init_nic: pci_enable_device failed\n");
7714                 return ret;
7715         }
7716
7717         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7718                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7719                 dma_flag = TRUE;
7720                 if (pci_set_consistent_dma_mask
7721                     (pdev, DMA_64BIT_MASK)) {
7722                         DBG_PRINT(ERR_DBG,
7723                                   "Unable to obtain 64bit DMA for \
7724                                         consistent allocations\n");
7725                         pci_disable_device(pdev);
7726                         return -ENOMEM;
7727                 }
7728         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7729                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7730         } else {
7731                 pci_disable_device(pdev);
7732                 return -ENOMEM;
7733         }
7734         if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7735                 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7736                 pci_disable_device(pdev);
7737                 return -ENODEV;
7738         }
7739 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
7740         if (dev_multiq)
7741                 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7742         else
7743 #endif
7744         dev = alloc_etherdev(sizeof(struct s2io_nic));
7745         if (dev == NULL) {
7746                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7747                 pci_disable_device(pdev);
7748                 pci_release_regions(pdev);
7749                 return -ENODEV;
7750         }
7751
7752         pci_set_master(pdev);
7753         pci_set_drvdata(pdev, dev);
7754         SET_NETDEV_DEV(dev, &pdev->dev);
7755
7756         /*  Private member variable initialized to s2io NIC structure */
7757         sp = dev->priv;
7758         memset(sp, 0, sizeof(struct s2io_nic));
7759         sp->dev = dev;
7760         sp->pdev = pdev;
7761         sp->high_dma_flag = dma_flag;
7762         sp->device_enabled_once = FALSE;
7763         if (rx_ring_mode == 1)
7764                 sp->rxd_mode = RXD_MODE_1;
7765         if (rx_ring_mode == 2)
7766                 sp->rxd_mode = RXD_MODE_3B;
7767
7768         sp->config.intr_type = dev_intr_type;
7769
7770         if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7771                 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7772                 sp->device_type = XFRAME_II_DEVICE;
7773         else
7774                 sp->device_type = XFRAME_I_DEVICE;
7775
7776         sp->lro = lro_enable;
7777
7778         /* Initialize some PCI/PCI-X fields of the NIC. */
7779         s2io_init_pci(sp);
7780
7781         /*
7782          * Setting the device configuration parameters.
7783          * Most of these parameters can be specified by the user during
7784          * module insertion as they are module loadable parameters. If
7785          * these parameters are not not specified during load time, they
7786          * are initialized with default values.
7787          */
7788         mac_control = &sp->mac_control;
7789         config = &sp->config;
7790
7791         config->napi = napi;
7792         config->tx_steering_type = tx_steering_type;
7793
7794         /* Tx side parameters. */
7795         if (config->tx_steering_type == TX_PRIORITY_STEERING)
7796                 config->tx_fifo_num = MAX_TX_FIFOS;
7797         else
7798                 config->tx_fifo_num = tx_fifo_num;
7799
7800         /* Initialize the fifos used for tx steering */
7801         if (config->tx_fifo_num < 5) {
7802                         if (config->tx_fifo_num  == 1)
7803                                 sp->total_tcp_fifos = 1;
7804                         else
7805                                 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7806                         sp->udp_fifo_idx = config->tx_fifo_num - 1;
7807                         sp->total_udp_fifos = 1;
7808                         sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7809         } else {
7810                 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7811                                                 FIFO_OTHER_MAX_NUM);
7812                 sp->udp_fifo_idx = sp->total_tcp_fifos;
7813                 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7814                 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7815         }
7816
7817         config->multiq = dev_multiq;
7818         for (i = 0; i < config->tx_fifo_num; i++) {
7819                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7820                 config->tx_cfg[i].fifo_priority = i;
7821         }
7822
7823         /* mapping the QoS priority to the configured fifos */
7824         for (i = 0; i < MAX_TX_FIFOS; i++)
7825                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7826
7827         /* map the hashing selector table to the configured fifos */
7828         for (i = 0; i < config->tx_fifo_num; i++)
7829                 sp->fifo_selector[i] = fifo_selector[i];
7830
7831
7832         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7833         for (i = 0; i < config->tx_fifo_num; i++) {
7834                 config->tx_cfg[i].f_no_snoop =
7835                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7836                 if (config->tx_cfg[i].fifo_len < 65) {
7837                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7838                         break;
7839                 }
7840         }
7841         /* + 2 because one Txd for skb->data and one Txd for UFO */
7842         config->max_txds = MAX_SKB_FRAGS + 2;
7843
7844         /* Rx side parameters. */
7845         config->rx_ring_num = rx_ring_num;
7846         for (i = 0; i < config->rx_ring_num; i++) {
7847                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7848                     (rxd_count[sp->rxd_mode] + 1);
7849                 config->rx_cfg[i].ring_priority = i;
7850                 mac_control->rings[i].rx_bufs_left = 0;
7851                 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7852                 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7853                 mac_control->rings[i].pdev = sp->pdev;
7854                 mac_control->rings[i].dev = sp->dev;
7855         }
7856
7857         for (i = 0; i < rx_ring_num; i++) {
7858                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7859                 config->rx_cfg[i].f_no_snoop =
7860                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7861         }
7862
7863         /*  Setting Mac Control parameters */
7864         mac_control->rmac_pause_time = rmac_pause_time;
7865         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7866         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7867
7868
7869         /*  initialize the shared memory used by the NIC and the host */
7870         if (init_shared_mem(sp)) {
7871                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7872                           dev->name);
7873                 ret = -ENOMEM;
7874                 goto mem_alloc_failed;
7875         }
7876
7877         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7878                                      pci_resource_len(pdev, 0));
7879         if (!sp->bar0) {
7880                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7881                           dev->name);
7882                 ret = -ENOMEM;
7883                 goto bar0_remap_failed;
7884         }
7885
7886         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7887                                      pci_resource_len(pdev, 2));
7888         if (!sp->bar1) {
7889                 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7890                           dev->name);
7891                 ret = -ENOMEM;
7892                 goto bar1_remap_failed;
7893         }
7894
7895         dev->irq = pdev->irq;
7896         dev->base_addr = (unsigned long) sp->bar0;
7897
7898         /* Initializing the BAR1 address as the start of the FIFO pointer. */
7899         for (j = 0; j < MAX_TX_FIFOS; j++) {
7900                 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7901                     (sp->bar1 + (j * 0x00020000));
7902         }
7903
7904         /*  Driver entry points */
7905         dev->open = &s2io_open;
7906         dev->stop = &s2io_close;
7907         dev->hard_start_xmit = &s2io_xmit;
7908         dev->get_stats = &s2io_get_stats;
7909         dev->set_multicast_list = &s2io_set_multicast;
7910         dev->do_ioctl = &s2io_ioctl;
7911         dev->set_mac_address = &s2io_set_mac_addr;
7912         dev->change_mtu = &s2io_change_mtu;
7913         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7914         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7915         dev->vlan_rx_register = s2io_vlan_rx_register;
7916         dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7917
7918         /*
7919          * will use eth_mac_addr() for  dev->set_mac_address
7920          * mac address will be set every time dev->open() is called
7921          */
7922         netif_napi_add(dev, &sp->napi, s2io_poll, 32);
7923
7924 #ifdef CONFIG_NET_POLL_CONTROLLER
7925         dev->poll_controller = s2io_netpoll;
7926 #endif
7927
7928         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7929         if (sp->high_dma_flag == TRUE)
7930                 dev->features |= NETIF_F_HIGHDMA;
7931         dev->features |= NETIF_F_TSO;
7932         dev->features |= NETIF_F_TSO6;
7933         if ((sp->device_type & XFRAME_II_DEVICE) && (ufo))  {
7934                 dev->features |= NETIF_F_UFO;
7935                 dev->features |= NETIF_F_HW_CSUM;
7936         }
7937 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
7938         if (config->multiq)
7939                 dev->features |= NETIF_F_MULTI_QUEUE;
7940 #endif
7941         dev->tx_timeout = &s2io_tx_watchdog;
7942         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7943         INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7944         INIT_WORK(&sp->set_link_task, s2io_set_link);
7945
7946         pci_save_state(sp->pdev);
7947
7948         /* Setting swapper control on the NIC, for proper reset operation */
7949         if (s2io_set_swapper(sp)) {
7950                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7951                           dev->name);
7952                 ret = -EAGAIN;
7953                 goto set_swap_failed;
7954         }
7955
7956         /* Verify if the Herc works on the slot its placed into */
7957         if (sp->device_type & XFRAME_II_DEVICE) {
7958                 mode = s2io_verify_pci_mode(sp);
7959                 if (mode < 0) {
7960                         DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7961                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7962                         ret = -EBADSLT;
7963                         goto set_swap_failed;
7964                 }
7965         }
7966
7967         /* Not needed for Herc */
7968         if (sp->device_type & XFRAME_I_DEVICE) {
7969                 /*
7970                  * Fix for all "FFs" MAC address problems observed on
7971                  * Alpha platforms
7972                  */
7973                 fix_mac_address(sp);
7974                 s2io_reset(sp);
7975         }
7976
7977         /*
7978          * MAC address initialization.
7979          * For now only one mac address will be read and used.
7980          */
7981         bar0 = sp->bar0;
7982         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7983             RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
7984         writeq(val64, &bar0->rmac_addr_cmd_mem);
7985         wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7986                       RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7987         tmp64 = readq(&bar0->rmac_addr_data0_mem);
7988         mac_down = (u32) tmp64;
7989         mac_up = (u32) (tmp64 >> 32);
7990
7991         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7992         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7993         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7994         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7995         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7996         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7997
7998         /*  Set the factory defined MAC address initially   */
7999         dev->addr_len = ETH_ALEN;
8000         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8001         memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8002
8003         /* initialize number of multicast & unicast MAC entries variables */
8004         if (sp->device_type == XFRAME_I_DEVICE) {
8005                 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8006                 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8007                 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8008         } else if (sp->device_type == XFRAME_II_DEVICE) {
8009                 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8010                 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8011                 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8012         }
8013
8014         /* store mac addresses from CAM to s2io_nic structure */
8015         do_s2io_store_unicast_mc(sp);
8016
8017          /* Store the values of the MSIX table in the s2io_nic structure */
8018         store_xmsi_data(sp);
8019         /* reset Nic and bring it to known state */
8020         s2io_reset(sp);
8021
8022         /*
8023          * Initialize link state flags
8024          * and the card state parameter
8025          */
8026         sp->state = 0;
8027
8028         /* Initialize spinlocks */
8029         for (i = 0; i < sp->config.tx_fifo_num; i++)
8030                 spin_lock_init(&mac_control->fifos[i].tx_lock);
8031
8032         /*
8033          * SXE-002: Configure link and activity LED to init state
8034          * on driver load.
8035          */
8036         subid = sp->pdev->subsystem_device;
8037         if ((subid & 0xFF) >= 0x07) {
8038                 val64 = readq(&bar0->gpio_control);
8039                 val64 |= 0x0000800000000000ULL;
8040                 writeq(val64, &bar0->gpio_control);
8041                 val64 = 0x0411040400000000ULL;
8042                 writeq(val64, (void __iomem *) bar0 + 0x2700);
8043                 val64 = readq(&bar0->gpio_control);
8044         }
8045
8046         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
8047
8048         if (register_netdev(dev)) {
8049                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8050                 ret = -ENODEV;
8051                 goto register_failed;
8052         }
8053         s2io_vpd_read(sp);
8054         DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8055         DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8056                   sp->product_name, pdev->revision);
8057         DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8058                   s2io_driver_version);
8059         DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8060                   dev->name, print_mac(mac, dev->dev_addr));
8061         DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8062         if (sp->device_type & XFRAME_II_DEVICE) {
8063                 mode = s2io_print_pci_mode(sp);
8064                 if (mode < 0) {
8065                         DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8066                         ret = -EBADSLT;
8067                         unregister_netdev(dev);
8068                         goto set_swap_failed;
8069                 }
8070         }
8071         switch(sp->rxd_mode) {
8072                 case RXD_MODE_1:
8073                     DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8074                                                 dev->name);
8075                     break;
8076                 case RXD_MODE_3B:
8077                     DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8078                                                 dev->name);
8079                     break;
8080         }
8081
8082         if (napi)
8083                 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8084
8085         DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8086                 sp->config.tx_fifo_num);
8087
8088         DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8089                   sp->config.rx_ring_num);
8090
8091         switch(sp->config.intr_type) {
8092                 case INTA:
8093                     DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8094                     break;
8095                 case MSI_X:
8096                     DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8097                     break;
8098         }
8099         if (sp->config.multiq) {
8100         for (i = 0; i < sp->config.tx_fifo_num; i++)
8101                 mac_control->fifos[i].multiq = config->multiq;
8102                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8103                         dev->name);
8104         } else
8105                 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8106                         dev->name);
8107
8108         switch (sp->config.tx_steering_type) {
8109         case NO_STEERING:
8110                 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8111                         " transmit\n", dev->name);
8112                         break;
8113         case TX_PRIORITY_STEERING:
8114                 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8115                         " transmit\n", dev->name);
8116                 break;
8117         case TX_DEFAULT_STEERING:
8118                 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8119                         " transmit\n", dev->name);
8120         }
8121
8122         if (sp->lro)
8123                 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8124                           dev->name);
8125         if (ufo)
8126                 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8127                                         " enabled\n", dev->name);
8128         /* Initialize device name */
8129         sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8130
8131         /*
8132          * Make Link state as off at this point, when the Link change
8133          * interrupt comes the state will be automatically changed to
8134          * the right state.
8135          */
8136         netif_carrier_off(dev);
8137
8138         return 0;
8139
8140       register_failed:
8141       set_swap_failed:
8142         iounmap(sp->bar1);
8143       bar1_remap_failed:
8144         iounmap(sp->bar0);
8145       bar0_remap_failed:
8146       mem_alloc_failed:
8147         free_shared_mem(sp);
8148         pci_disable_device(pdev);
8149         pci_release_regions(pdev);
8150         pci_set_drvdata(pdev, NULL);
8151         free_netdev(dev);
8152
8153         return ret;
8154 }
8155
8156 /**
8157  * s2io_rem_nic - Free the PCI device
8158  * @pdev: structure containing the PCI related information of the device.
8159  * Description: This function is called by the Pci subsystem to release a
8160  * PCI device and free up all resource held up by the device. This could
8161  * be in response to a Hot plug event or when the driver is to be removed
8162  * from memory.
8163  */
8164
8165 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8166 {
8167         struct net_device *dev =
8168             (struct net_device *) pci_get_drvdata(pdev);
8169         struct s2io_nic *sp;
8170
8171         if (dev == NULL) {
8172                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8173                 return;
8174         }
8175
8176         flush_scheduled_work();
8177
8178         sp = dev->priv;
8179         unregister_netdev(dev);
8180
8181         free_shared_mem(sp);
8182         iounmap(sp->bar0);
8183         iounmap(sp->bar1);
8184         pci_release_regions(pdev);
8185         pci_set_drvdata(pdev, NULL);
8186         free_netdev(dev);
8187         pci_disable_device(pdev);
8188 }
8189
8190 /**
8191  * s2io_starter - Entry point for the driver
8192  * Description: This function is the entry point for the driver. It verifies
8193  * the module loadable parameters and initializes PCI configuration space.
8194  */
8195
8196 static int __init s2io_starter(void)
8197 {
8198         return pci_register_driver(&s2io_driver);
8199 }
8200
8201 /**
8202  * s2io_closer - Cleanup routine for the driver
8203  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8204  */
8205
8206 static __exit void s2io_closer(void)
8207 {
8208         pci_unregister_driver(&s2io_driver);
8209         DBG_PRINT(INIT_DBG, "cleanup done\n");
8210 }
8211
8212 module_init(s2io_starter);
8213 module_exit(s2io_closer);
8214
8215 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8216                 struct tcphdr **tcp, struct RxD_t *rxdp,
8217                 struct s2io_nic *sp)
8218 {
8219         int ip_off;
8220         u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8221
8222         if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8223                 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8224                           __FUNCTION__);
8225                 return -1;
8226         }
8227
8228         /* Checking for DIX type or DIX type with VLAN */
8229         if ((l2_type == 0)
8230                 || (l2_type == 4)) {
8231                 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8232                 /*
8233                  * If vlan stripping is disabled and the frame is VLAN tagged,
8234                  * shift the offset by the VLAN header size bytes.
8235                  */
8236                 if ((!vlan_strip_flag) &&
8237                         (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8238                         ip_off += HEADER_VLAN_SIZE;
8239         } else {
8240                 /* LLC, SNAP etc are considered non-mergeable */
8241                 return -1;
8242         }
8243
8244         *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8245         ip_len = (u8)((*ip)->ihl);
8246         ip_len <<= 2;
8247         *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8248
8249         return 0;
8250 }
8251
8252 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8253                                   struct tcphdr *tcp)
8254 {
8255         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8256         if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8257            (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8258                 return -1;
8259         return 0;
8260 }
8261
8262 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8263 {
8264         return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8265 }
8266
8267 static void initiate_new_session(struct lro *lro, u8 *l2h,
8268         struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8269 {
8270         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8271         lro->l2h = l2h;
8272         lro->iph = ip;
8273         lro->tcph = tcp;
8274         lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8275         lro->tcp_ack = tcp->ack_seq;
8276         lro->sg_num = 1;
8277         lro->total_len = ntohs(ip->tot_len);
8278         lro->frags_len = 0;
8279         lro->vlan_tag = vlan_tag;
8280         /*
8281          * check if we saw TCP timestamp. Other consistency checks have
8282          * already been done.
8283          */
8284         if (tcp->doff == 8) {
8285                 __be32 *ptr;
8286                 ptr = (__be32 *)(tcp+1);
8287                 lro->saw_ts = 1;
8288                 lro->cur_tsval = ntohl(*(ptr+1));
8289                 lro->cur_tsecr = *(ptr+2);
8290         }
8291         lro->in_use = 1;
8292 }
8293
8294 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8295 {
8296         struct iphdr *ip = lro->iph;
8297         struct tcphdr *tcp = lro->tcph;
8298         __sum16 nchk;
8299         struct stat_block *statinfo = sp->mac_control.stats_info;
8300         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8301
8302         /* Update L3 header */
8303         ip->tot_len = htons(lro->total_len);
8304         ip->check = 0;
8305         nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8306         ip->check = nchk;
8307
8308         /* Update L4 header */
8309         tcp->ack_seq = lro->tcp_ack;
8310         tcp->window = lro->window;
8311
8312         /* Update tsecr field if this session has timestamps enabled */
8313         if (lro->saw_ts) {
8314                 __be32 *ptr = (__be32 *)(tcp + 1);
8315                 *(ptr+2) = lro->cur_tsecr;
8316         }
8317
8318         /* Update counters required for calculation of
8319          * average no. of packets aggregated.
8320          */
8321         statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8322         statinfo->sw_stat.num_aggregations++;
8323 }
8324
8325 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8326                 struct tcphdr *tcp, u32 l4_pyld)
8327 {
8328         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8329         lro->total_len += l4_pyld;
8330         lro->frags_len += l4_pyld;
8331         lro->tcp_next_seq += l4_pyld;
8332         lro->sg_num++;
8333
8334         /* Update ack seq no. and window ad(from this pkt) in LRO object */
8335         lro->tcp_ack = tcp->ack_seq;
8336         lro->window = tcp->window;
8337
8338         if (lro->saw_ts) {
8339                 __be32 *ptr;
8340                 /* Update tsecr and tsval from this packet */
8341                 ptr = (__be32 *)(tcp+1);
8342                 lro->cur_tsval = ntohl(*(ptr+1));
8343                 lro->cur_tsecr = *(ptr + 2);
8344         }
8345 }
8346
8347 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8348                                     struct tcphdr *tcp, u32 tcp_pyld_len)
8349 {
8350         u8 *ptr;
8351
8352         DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8353
8354         if (!tcp_pyld_len) {
8355                 /* Runt frame or a pure ack */
8356                 return -1;
8357         }
8358
8359         if (ip->ihl != 5) /* IP has options */
8360                 return -1;
8361
8362         /* If we see CE codepoint in IP header, packet is not mergeable */
8363         if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8364                 return -1;
8365
8366         /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8367         if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8368                                     tcp->ece || tcp->cwr || !tcp->ack) {
8369                 /*
8370                  * Currently recognize only the ack control word and
8371                  * any other control field being set would result in
8372                  * flushing the LRO session
8373                  */
8374                 return -1;
8375         }
8376
8377         /*
8378          * Allow only one TCP timestamp option. Don't aggregate if
8379          * any other options are detected.
8380          */
8381         if (tcp->doff != 5 && tcp->doff != 8)
8382                 return -1;
8383
8384         if (tcp->doff == 8) {
8385                 ptr = (u8 *)(tcp + 1);
8386                 while (*ptr == TCPOPT_NOP)
8387                         ptr++;
8388                 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8389                         return -1;
8390
8391                 /* Ensure timestamp value increases monotonically */
8392                 if (l_lro)
8393                         if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8394                                 return -1;
8395
8396                 /* timestamp echo reply should be non-zero */
8397                 if (*((__be32 *)(ptr+6)) == 0)
8398                         return -1;
8399         }
8400
8401         return 0;
8402 }
8403
8404 static int
8405 s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8406         u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8407         struct s2io_nic *sp)
8408 {
8409         struct iphdr *ip;
8410         struct tcphdr *tcph;
8411         int ret = 0, i;
8412         u16 vlan_tag = 0;
8413
8414         if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8415                                          rxdp, sp))) {
8416                 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8417                           ip->saddr, ip->daddr);
8418         } else
8419                 return ret;
8420
8421         vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8422         tcph = (struct tcphdr *)*tcp;
8423         *tcp_len = get_l4_pyld_length(ip, tcph);
8424         for (i=0; i<MAX_LRO_SESSIONS; i++) {
8425                 struct lro *l_lro = &ring_data->lro0_n[i];
8426                 if (l_lro->in_use) {
8427                         if (check_for_socket_match(l_lro, ip, tcph))
8428                                 continue;
8429                         /* Sock pair matched */
8430                         *lro = l_lro;
8431
8432                         if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8433                                 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8434                                           "0x%x, actual 0x%x\n", __FUNCTION__,
8435                                           (*lro)->tcp_next_seq,
8436                                           ntohl(tcph->seq));
8437
8438                                 sp->mac_control.stats_info->
8439                                    sw_stat.outof_sequence_pkts++;
8440                                 ret = 2;
8441                                 break;
8442                         }
8443
8444                         if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8445                                 ret = 1; /* Aggregate */
8446                         else
8447                                 ret = 2; /* Flush both */
8448                         break;
8449                 }
8450         }
8451
8452         if (ret == 0) {
8453                 /* Before searching for available LRO objects,
8454                  * check if the pkt is L3/L4 aggregatable. If not
8455                  * don't create new LRO session. Just send this
8456                  * packet up.
8457                  */
8458                 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8459                         return 5;
8460                 }
8461
8462                 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8463                         struct lro *l_lro = &ring_data->lro0_n[i];
8464                         if (!(l_lro->in_use)) {
8465                                 *lro = l_lro;
8466                                 ret = 3; /* Begin anew */
8467                                 break;
8468                         }
8469                 }
8470         }
8471
8472         if (ret == 0) { /* sessions exceeded */
8473                 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8474                           __FUNCTION__);
8475                 *lro = NULL;
8476                 return ret;
8477         }
8478
8479         switch (ret) {
8480                 case 3:
8481                         initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8482                                                                 vlan_tag);
8483                         break;
8484                 case 2:
8485                         update_L3L4_header(sp, *lro);
8486                         break;
8487                 case 1:
8488                         aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8489                         if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8490                                 update_L3L4_header(sp, *lro);
8491                                 ret = 4; /* Flush the LRO */
8492                         }
8493                         break;
8494                 default:
8495                         DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8496                                 __FUNCTION__);
8497                         break;
8498         }
8499
8500         return ret;
8501 }
8502
8503 static void clear_lro_session(struct lro *lro)
8504 {
8505         static u16 lro_struct_size = sizeof(struct lro);
8506
8507         memset(lro, 0, lro_struct_size);
8508 }
8509
8510 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8511 {
8512         struct net_device *dev = skb->dev;
8513         struct s2io_nic *sp = dev->priv;
8514
8515         skb->protocol = eth_type_trans(skb, dev);
8516         if (sp->vlgrp && vlan_tag
8517                 && (vlan_strip_flag)) {
8518                 /* Queueing the vlan frame to the upper layer */
8519                 if (sp->config.napi)
8520                         vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8521                 else
8522                         vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8523         } else {
8524                 if (sp->config.napi)
8525                         netif_receive_skb(skb);
8526                 else
8527                         netif_rx(skb);
8528         }
8529 }
8530
8531 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8532                            struct sk_buff *skb,
8533                            u32 tcp_len)
8534 {
8535         struct sk_buff *first = lro->parent;
8536
8537         first->len += tcp_len;
8538         first->data_len = lro->frags_len;
8539         skb_pull(skb, (skb->len - tcp_len));
8540         if (skb_shinfo(first)->frag_list)
8541                 lro->last_frag->next = skb;
8542         else
8543                 skb_shinfo(first)->frag_list = skb;
8544         first->truesize += skb->truesize;
8545         lro->last_frag = skb;
8546         sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8547         return;
8548 }
8549
8550 /**
8551  * s2io_io_error_detected - called when PCI error is detected
8552  * @pdev: Pointer to PCI device
8553  * @state: The current pci connection state
8554  *
8555  * This function is called after a PCI bus error affecting
8556  * this device has been detected.
8557  */
8558 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8559                                                pci_channel_state_t state)
8560 {
8561         struct net_device *netdev = pci_get_drvdata(pdev);
8562         struct s2io_nic *sp = netdev->priv;
8563
8564         netif_device_detach(netdev);
8565
8566         if (netif_running(netdev)) {
8567                 /* Bring down the card, while avoiding PCI I/O */
8568                 do_s2io_card_down(sp, 0);
8569         }
8570         pci_disable_device(pdev);
8571
8572         return PCI_ERS_RESULT_NEED_RESET;
8573 }
8574
8575 /**
8576  * s2io_io_slot_reset - called after the pci bus has been reset.
8577  * @pdev: Pointer to PCI device
8578  *
8579  * Restart the card from scratch, as if from a cold-boot.
8580  * At this point, the card has exprienced a hard reset,
8581  * followed by fixups by BIOS, and has its config space
8582  * set up identically to what it was at cold boot.
8583  */
8584 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8585 {
8586         struct net_device *netdev = pci_get_drvdata(pdev);
8587         struct s2io_nic *sp = netdev->priv;
8588
8589         if (pci_enable_device(pdev)) {
8590                 printk(KERN_ERR "s2io: "
8591                        "Cannot re-enable PCI device after reset.\n");
8592                 return PCI_ERS_RESULT_DISCONNECT;
8593         }
8594
8595         pci_set_master(pdev);
8596         s2io_reset(sp);
8597
8598         return PCI_ERS_RESULT_RECOVERED;
8599 }
8600
8601 /**
8602  * s2io_io_resume - called when traffic can start flowing again.
8603  * @pdev: Pointer to PCI device
8604  *
8605  * This callback is called when the error recovery driver tells
8606  * us that its OK to resume normal operation.
8607  */
8608 static void s2io_io_resume(struct pci_dev *pdev)
8609 {
8610         struct net_device *netdev = pci_get_drvdata(pdev);
8611         struct s2io_nic *sp = netdev->priv;
8612
8613         if (netif_running(netdev)) {
8614                 if (s2io_card_up(sp)) {
8615                         printk(KERN_ERR "s2io: "
8616                                "Can't bring device back up after reset.\n");
8617                         return;
8618                 }
8619
8620                 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8621                         s2io_card_down(sp);
8622                         printk(KERN_ERR "s2io: "
8623                                "Can't resetore mac addr after reset.\n");
8624                         return;
8625                 }
8626         }
8627
8628         netif_device_attach(netdev);
8629         netif_wake_queue(netdev);
8630 }