1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2007 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 2(MSI_X). Default value is '2(MSI_X)'
41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
55 ************************************************************************/
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/errno.h>
60 #include <linux/ioport.h>
61 #include <linux/pci.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/kernel.h>
64 #include <linux/netdevice.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
67 #include <linux/init.h>
68 #include <linux/delay.h>
69 #include <linux/stddef.h>
70 #include <linux/ioctl.h>
71 #include <linux/timex.h>
72 #include <linux/ethtool.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
76 #include <linux/tcp.h>
79 #include <asm/system.h>
80 #include <asm/uaccess.h>
82 #include <asm/div64.h>
87 #include "s2io-regs.h"
89 #define DRV_VERSION "2.0.26.15-2"
91 /* S2io Driver name & version. */
92 static char s2io_driver_name[] = "Neterion";
93 static char s2io_driver_version[] = DRV_VERSION;
95 static int rxd_size[2] = {32,48};
96 static int rxd_count[2] = {127,85};
98 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
113 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
118 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
123 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
125 struct mac_info *mac_control;
127 mac_control = &sp->mac_control;
128 if (rxb_size <= rxd_count[sp->rxd_mode])
130 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
135 static inline int is_s2io_card_up(const struct s2io_nic * sp)
137 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
140 /* Ethtool related variables and Macros. */
141 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
142 "Register test\t(offline)",
143 "Eeprom test\t(offline)",
144 "Link test\t(online)",
145 "RLDRAM test\t(offline)",
146 "BIST Test\t(offline)"
149 static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
151 {"tmac_data_octets"},
155 {"tmac_pause_ctrl_frms"},
159 {"tmac_any_err_frms"},
160 {"tmac_ttl_less_fb_octets"},
161 {"tmac_vld_ip_octets"},
169 {"rmac_data_octets"},
170 {"rmac_fcs_err_frms"},
172 {"rmac_vld_mcst_frms"},
173 {"rmac_vld_bcst_frms"},
174 {"rmac_in_rng_len_err_frms"},
175 {"rmac_out_rng_len_err_frms"},
177 {"rmac_pause_ctrl_frms"},
178 {"rmac_unsup_ctrl_frms"},
180 {"rmac_accepted_ucst_frms"},
181 {"rmac_accepted_nucst_frms"},
182 {"rmac_discarded_frms"},
183 {"rmac_drop_events"},
184 {"rmac_ttl_less_fb_octets"},
186 {"rmac_usized_frms"},
187 {"rmac_osized_frms"},
189 {"rmac_jabber_frms"},
190 {"rmac_ttl_64_frms"},
191 {"rmac_ttl_65_127_frms"},
192 {"rmac_ttl_128_255_frms"},
193 {"rmac_ttl_256_511_frms"},
194 {"rmac_ttl_512_1023_frms"},
195 {"rmac_ttl_1024_1518_frms"},
203 {"rmac_err_drp_udp"},
204 {"rmac_xgmii_err_sym"},
222 {"rmac_xgmii_data_err_cnt"},
223 {"rmac_xgmii_ctrl_err_cnt"},
224 {"rmac_accepted_ip"},
228 {"new_rd_req_rtry_cnt"},
230 {"wr_rtry_rd_ack_cnt"},
233 {"new_wr_req_rtry_cnt"},
236 {"rd_rtry_wr_ack_cnt"},
246 static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
247 {"rmac_ttl_1519_4095_frms"},
248 {"rmac_ttl_4096_8191_frms"},
249 {"rmac_ttl_8192_max_frms"},
250 {"rmac_ttl_gt_max_frms"},
251 {"rmac_osized_alt_frms"},
252 {"rmac_jabber_alt_frms"},
253 {"rmac_gt_max_alt_frms"},
255 {"rmac_len_discard"},
256 {"rmac_fcs_discard"},
259 {"rmac_red_discard"},
260 {"rmac_rts_discard"},
261 {"rmac_ingm_full_discard"},
265 static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
266 {"\n DRIVER STATISTICS"},
267 {"single_bit_ecc_errs"},
268 {"double_bit_ecc_errs"},
281 {"alarm_transceiver_temp_high"},
282 {"alarm_transceiver_temp_low"},
283 {"alarm_laser_bias_current_high"},
284 {"alarm_laser_bias_current_low"},
285 {"alarm_laser_output_power_high"},
286 {"alarm_laser_output_power_low"},
287 {"warn_transceiver_temp_high"},
288 {"warn_transceiver_temp_low"},
289 {"warn_laser_bias_current_high"},
290 {"warn_laser_bias_current_low"},
291 {"warn_laser_output_power_high"},
292 {"warn_laser_output_power_low"},
293 {"lro_aggregated_pkts"},
294 {"lro_flush_both_count"},
295 {"lro_out_of_sequence_pkts"},
296 {"lro_flush_due_to_max_pkts"},
297 {"lro_avg_aggr_pkts"},
298 {"mem_alloc_fail_cnt"},
299 {"pci_map_fail_cnt"},
300 {"watchdog_timer_cnt"},
307 {"tx_tcode_buf_abort_cnt"},
308 {"tx_tcode_desc_abort_cnt"},
309 {"tx_tcode_parity_err_cnt"},
310 {"tx_tcode_link_loss_cnt"},
311 {"tx_tcode_list_proc_err_cnt"},
312 {"rx_tcode_parity_err_cnt"},
313 {"rx_tcode_abort_cnt"},
314 {"rx_tcode_parity_abort_cnt"},
315 {"rx_tcode_rda_fail_cnt"},
316 {"rx_tcode_unkn_prot_cnt"},
317 {"rx_tcode_fcs_err_cnt"},
318 {"rx_tcode_buf_size_err_cnt"},
319 {"rx_tcode_rxd_corrupt_cnt"},
320 {"rx_tcode_unkn_err_cnt"},
328 {"mac_tmac_err_cnt"},
329 {"mac_rmac_err_cnt"},
330 {"xgxs_txgxs_err_cnt"},
331 {"xgxs_rxgxs_err_cnt"},
333 {"prc_pcix_err_cnt"},
340 #define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
341 #define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
342 #define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
344 #define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
345 #define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
347 #define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
348 #define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
350 #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
351 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
353 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
354 init_timer(&timer); \
355 timer.function = handle; \
356 timer.data = (unsigned long) arg; \
357 mod_timer(&timer, (jiffies + exp)) \
359 /* copy mac addr to def_mac_addr array */
360 static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
362 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
363 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
364 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
365 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
366 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
367 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
370 static void s2io_vlan_rx_register(struct net_device *dev,
371 struct vlan_group *grp)
374 struct s2io_nic *nic = dev->priv;
375 unsigned long flags[MAX_TX_FIFOS];
376 struct mac_info *mac_control = &nic->mac_control;
377 struct config_param *config = &nic->config;
379 for (i = 0; i < config->tx_fifo_num; i++)
380 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
383 for (i = config->tx_fifo_num - 1; i >= 0; i--)
384 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
388 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
389 static int vlan_strip_flag;
392 * Constants to be programmed into the Xena's registers, to configure
397 static const u64 herc_act_dtx_cfg[] = {
399 0x8000051536750000ULL, 0x80000515367500E0ULL,
401 0x8000051536750004ULL, 0x80000515367500E4ULL,
403 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
405 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
407 0x801205150D440000ULL, 0x801205150D4400E0ULL,
409 0x801205150D440004ULL, 0x801205150D4400E4ULL,
411 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
413 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
418 static const u64 xena_dtx_cfg[] = {
420 0x8000051500000000ULL, 0x80000515000000E0ULL,
422 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
424 0x8001051500000000ULL, 0x80010515000000E0ULL,
426 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
428 0x8002051500000000ULL, 0x80020515000000E0ULL,
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
435 * Constants for Fixing the MacAddress problem seen mostly on
438 static const u64 fix_mac[] = {
439 0x0060000000000000ULL, 0x0060600000000000ULL,
440 0x0040600000000000ULL, 0x0000600000000000ULL,
441 0x0020600000000000ULL, 0x0060600000000000ULL,
442 0x0020600000000000ULL, 0x0060600000000000ULL,
443 0x0020600000000000ULL, 0x0060600000000000ULL,
444 0x0020600000000000ULL, 0x0060600000000000ULL,
445 0x0020600000000000ULL, 0x0060600000000000ULL,
446 0x0020600000000000ULL, 0x0060600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0000600000000000ULL,
452 0x0040600000000000ULL, 0x0060600000000000ULL,
456 MODULE_LICENSE("GPL");
457 MODULE_VERSION(DRV_VERSION);
460 /* Module Loadable parameters. */
461 S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
462 S2IO_PARM_INT(rx_ring_num, 1);
463 S2IO_PARM_INT(multiq, 0);
464 S2IO_PARM_INT(rx_ring_mode, 1);
465 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
466 S2IO_PARM_INT(rmac_pause_time, 0x100);
467 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
468 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
469 S2IO_PARM_INT(shared_splits, 0);
470 S2IO_PARM_INT(tmac_util_period, 5);
471 S2IO_PARM_INT(rmac_util_period, 5);
472 S2IO_PARM_INT(l3l4hdr_size, 128);
473 /* 0 is no steering, 1 is Priority steering, 2 is Default steering */
474 S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
475 /* Frequency of Rx desc syncs expressed as power of 2 */
476 S2IO_PARM_INT(rxsync_frequency, 3);
477 /* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
478 S2IO_PARM_INT(intr_type, 2);
479 /* Large receive offload feature */
480 static unsigned int lro_enable;
481 module_param_named(lro, lro_enable, uint, 0);
483 /* Max pkts to be aggregated by LRO at one time. If not specified,
484 * aggregation happens until we hit max IP pkt size(64K)
486 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
487 S2IO_PARM_INT(indicate_max_pkts, 0);
489 S2IO_PARM_INT(napi, 1);
490 S2IO_PARM_INT(ufo, 0);
491 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
493 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
494 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
495 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
496 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
497 static unsigned int rts_frm_len[MAX_RX_RINGS] =
498 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
500 module_param_array(tx_fifo_len, uint, NULL, 0);
501 module_param_array(rx_ring_sz, uint, NULL, 0);
502 module_param_array(rts_frm_len, uint, NULL, 0);
506 * This table lists all the devices that this driver supports.
508 static struct pci_device_id s2io_tbl[] __devinitdata = {
509 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
510 PCI_ANY_ID, PCI_ANY_ID},
511 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
512 PCI_ANY_ID, PCI_ANY_ID},
513 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
514 PCI_ANY_ID, PCI_ANY_ID},
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
516 PCI_ANY_ID, PCI_ANY_ID},
520 MODULE_DEVICE_TABLE(pci, s2io_tbl);
522 static struct pci_error_handlers s2io_err_handler = {
523 .error_detected = s2io_io_error_detected,
524 .slot_reset = s2io_io_slot_reset,
525 .resume = s2io_io_resume,
528 static struct pci_driver s2io_driver = {
530 .id_table = s2io_tbl,
531 .probe = s2io_init_nic,
532 .remove = __devexit_p(s2io_rem_nic),
533 .err_handler = &s2io_err_handler,
536 /* A simplifier macro used both by init and free shared_mem Fns(). */
537 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
539 /* netqueue manipulation helper functions */
540 static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
543 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
544 if (sp->config.multiq) {
545 for (i = 0; i < sp->config.tx_fifo_num; i++)
546 netif_stop_subqueue(sp->dev, i);
550 for (i = 0; i < sp->config.tx_fifo_num; i++)
551 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
552 netif_stop_queue(sp->dev);
556 static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
558 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
559 if (sp->config.multiq)
560 netif_stop_subqueue(sp->dev, fifo_no);
564 sp->mac_control.fifos[fifo_no].queue_state =
566 netif_stop_queue(sp->dev);
570 static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
573 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
574 if (sp->config.multiq) {
575 for (i = 0; i < sp->config.tx_fifo_num; i++)
576 netif_start_subqueue(sp->dev, i);
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
582 netif_start_queue(sp->dev);
586 static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
588 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
589 if (sp->config.multiq)
590 netif_start_subqueue(sp->dev, fifo_no);
594 sp->mac_control.fifos[fifo_no].queue_state =
596 netif_start_queue(sp->dev);
600 static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
603 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
604 if (sp->config.multiq) {
605 for (i = 0; i < sp->config.tx_fifo_num; i++)
606 netif_wake_subqueue(sp->dev, i);
610 for (i = 0; i < sp->config.tx_fifo_num; i++)
611 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
612 netif_wake_queue(sp->dev);
616 static inline void s2io_wake_tx_queue(
617 struct fifo_info *fifo, int cnt, u8 multiq)
620 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
622 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
623 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
626 if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
627 if (netif_queue_stopped(fifo->dev)) {
628 fifo->queue_state = FIFO_QUEUE_START;
629 netif_wake_queue(fifo->dev);
635 * init_shared_mem - Allocation and Initialization of Memory
636 * @nic: Device private variable.
637 * Description: The function allocates all the memory areas shared
638 * between the NIC and the driver. This includes Tx descriptors,
639 * Rx descriptors and the statistics block.
642 static int init_shared_mem(struct s2io_nic *nic)
645 void *tmp_v_addr, *tmp_v_addr_next;
646 dma_addr_t tmp_p_addr, tmp_p_addr_next;
647 struct RxD_block *pre_rxd_blk = NULL;
649 int lst_size, lst_per_page;
650 struct net_device *dev = nic->dev;
654 struct mac_info *mac_control;
655 struct config_param *config;
656 unsigned long long mem_allocated = 0;
658 mac_control = &nic->mac_control;
659 config = &nic->config;
662 /* Allocation and initialization of TXDLs in FIOFs */
664 for (i = 0; i < config->tx_fifo_num; i++) {
665 size += config->tx_cfg[i].fifo_len;
667 if (size > MAX_AVAILABLE_TXDS) {
668 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
669 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
674 for (i = 0; i < config->tx_fifo_num; i++) {
675 size = config->tx_cfg[i].fifo_len;
677 * Legal values are from 2 to 8192
680 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
681 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
682 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
688 lst_size = (sizeof(struct TxD) * config->max_txds);
689 lst_per_page = PAGE_SIZE / lst_size;
691 for (i = 0; i < config->tx_fifo_num; i++) {
692 int fifo_len = config->tx_cfg[i].fifo_len;
693 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
694 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
696 if (!mac_control->fifos[i].list_info) {
698 "Malloc failed for list_info\n");
701 mem_allocated += list_holder_size;
703 for (i = 0; i < config->tx_fifo_num; i++) {
704 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
706 mac_control->fifos[i].tx_curr_put_info.offset = 0;
707 mac_control->fifos[i].tx_curr_put_info.fifo_len =
708 config->tx_cfg[i].fifo_len - 1;
709 mac_control->fifos[i].tx_curr_get_info.offset = 0;
710 mac_control->fifos[i].tx_curr_get_info.fifo_len =
711 config->tx_cfg[i].fifo_len - 1;
712 mac_control->fifos[i].fifo_no = i;
713 mac_control->fifos[i].nic = nic;
714 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
715 mac_control->fifos[i].dev = dev;
717 for (j = 0; j < page_num; j++) {
721 tmp_v = pci_alloc_consistent(nic->pdev,
725 "pci_alloc_consistent ");
726 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
729 /* If we got a zero DMA address(can happen on
730 * certain platforms like PPC), reallocate.
731 * Store virtual address of page we don't want,
735 mac_control->zerodma_virt_addr = tmp_v;
737 "%s: Zero DMA address for TxDL. ", dev->name);
739 "Virtual address %p\n", tmp_v);
740 tmp_v = pci_alloc_consistent(nic->pdev,
744 "pci_alloc_consistent ");
745 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
748 mem_allocated += PAGE_SIZE;
750 while (k < lst_per_page) {
751 int l = (j * lst_per_page) + k;
752 if (l == config->tx_cfg[i].fifo_len)
754 mac_control->fifos[i].list_info[l].list_virt_addr =
755 tmp_v + (k * lst_size);
756 mac_control->fifos[i].list_info[l].list_phy_addr =
757 tmp_p + (k * lst_size);
763 for (i = 0; i < config->tx_fifo_num; i++) {
764 size = config->tx_cfg[i].fifo_len;
765 mac_control->fifos[i].ufo_in_band_v
766 = kcalloc(size, sizeof(u64), GFP_KERNEL);
767 if (!mac_control->fifos[i].ufo_in_band_v)
769 mem_allocated += (size * sizeof(u64));
772 /* Allocation and initialization of RXDs in Rings */
774 for (i = 0; i < config->rx_ring_num; i++) {
775 if (config->rx_cfg[i].num_rxd %
776 (rxd_count[nic->rxd_mode] + 1)) {
777 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
778 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
780 DBG_PRINT(ERR_DBG, "RxDs per Block");
783 size += config->rx_cfg[i].num_rxd;
784 mac_control->rings[i].block_count =
785 config->rx_cfg[i].num_rxd /
786 (rxd_count[nic->rxd_mode] + 1 );
787 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
788 mac_control->rings[i].block_count;
790 if (nic->rxd_mode == RXD_MODE_1)
791 size = (size * (sizeof(struct RxD1)));
793 size = (size * (sizeof(struct RxD3)));
795 for (i = 0; i < config->rx_ring_num; i++) {
796 mac_control->rings[i].rx_curr_get_info.block_index = 0;
797 mac_control->rings[i].rx_curr_get_info.offset = 0;
798 mac_control->rings[i].rx_curr_get_info.ring_len =
799 config->rx_cfg[i].num_rxd - 1;
800 mac_control->rings[i].rx_curr_put_info.block_index = 0;
801 mac_control->rings[i].rx_curr_put_info.offset = 0;
802 mac_control->rings[i].rx_curr_put_info.ring_len =
803 config->rx_cfg[i].num_rxd - 1;
804 mac_control->rings[i].nic = nic;
805 mac_control->rings[i].ring_no = i;
807 blk_cnt = config->rx_cfg[i].num_rxd /
808 (rxd_count[nic->rxd_mode] + 1);
809 /* Allocating all the Rx blocks */
810 for (j = 0; j < blk_cnt; j++) {
811 struct rx_block_info *rx_blocks;
814 rx_blocks = &mac_control->rings[i].rx_blocks[j];
815 size = SIZE_OF_BLOCK; //size is always page size
816 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
818 if (tmp_v_addr == NULL) {
820 * In case of failure, free_shared_mem()
821 * is called, which should free any
822 * memory that was alloced till the
825 rx_blocks->block_virt_addr = tmp_v_addr;
828 mem_allocated += size;
829 memset(tmp_v_addr, 0, size);
830 rx_blocks->block_virt_addr = tmp_v_addr;
831 rx_blocks->block_dma_addr = tmp_p_addr;
832 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
833 rxd_count[nic->rxd_mode],
835 if (!rx_blocks->rxds)
838 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
839 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
840 rx_blocks->rxds[l].virt_addr =
841 rx_blocks->block_virt_addr +
842 (rxd_size[nic->rxd_mode] * l);
843 rx_blocks->rxds[l].dma_addr =
844 rx_blocks->block_dma_addr +
845 (rxd_size[nic->rxd_mode] * l);
848 /* Interlinking all Rx Blocks */
849 for (j = 0; j < blk_cnt; j++) {
851 mac_control->rings[i].rx_blocks[j].block_virt_addr;
853 mac_control->rings[i].rx_blocks[(j + 1) %
854 blk_cnt].block_virt_addr;
856 mac_control->rings[i].rx_blocks[j].block_dma_addr;
858 mac_control->rings[i].rx_blocks[(j + 1) %
859 blk_cnt].block_dma_addr;
861 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
862 pre_rxd_blk->reserved_2_pNext_RxD_block =
863 (unsigned long) tmp_v_addr_next;
864 pre_rxd_blk->pNext_RxD_Blk_physical =
865 (u64) tmp_p_addr_next;
868 if (nic->rxd_mode == RXD_MODE_3B) {
870 * Allocation of Storages for buffer addresses in 2BUFF mode
871 * and the buffers as well.
873 for (i = 0; i < config->rx_ring_num; i++) {
874 blk_cnt = config->rx_cfg[i].num_rxd /
875 (rxd_count[nic->rxd_mode]+ 1);
876 mac_control->rings[i].ba =
877 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
879 if (!mac_control->rings[i].ba)
881 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
882 for (j = 0; j < blk_cnt; j++) {
884 mac_control->rings[i].ba[j] =
885 kmalloc((sizeof(struct buffAdd) *
886 (rxd_count[nic->rxd_mode] + 1)),
888 if (!mac_control->rings[i].ba[j])
890 mem_allocated += (sizeof(struct buffAdd) * \
891 (rxd_count[nic->rxd_mode] + 1));
892 while (k != rxd_count[nic->rxd_mode]) {
893 ba = &mac_control->rings[i].ba[j][k];
895 ba->ba_0_org = (void *) kmalloc
896 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
900 (BUF0_LEN + ALIGN_SIZE);
901 tmp = (unsigned long)ba->ba_0_org;
903 tmp &= ~((unsigned long) ALIGN_SIZE);
904 ba->ba_0 = (void *) tmp;
906 ba->ba_1_org = (void *) kmalloc
907 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
911 += (BUF1_LEN + ALIGN_SIZE);
912 tmp = (unsigned long) ba->ba_1_org;
914 tmp &= ~((unsigned long) ALIGN_SIZE);
915 ba->ba_1 = (void *) tmp;
922 /* Allocation and initialization of Statistics block */
923 size = sizeof(struct stat_block);
924 mac_control->stats_mem = pci_alloc_consistent
925 (nic->pdev, size, &mac_control->stats_mem_phy);
927 if (!mac_control->stats_mem) {
929 * In case of failure, free_shared_mem() is called, which
930 * should free any memory that was alloced till the
935 mem_allocated += size;
936 mac_control->stats_mem_sz = size;
938 tmp_v_addr = mac_control->stats_mem;
939 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
940 memset(tmp_v_addr, 0, size);
941 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
942 (unsigned long long) tmp_p_addr);
943 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
948 * free_shared_mem - Free the allocated Memory
949 * @nic: Device private variable.
950 * Description: This function is to free all memory locations allocated by
951 * the init_shared_mem() function and return it to the kernel.
954 static void free_shared_mem(struct s2io_nic *nic)
956 int i, j, blk_cnt, size;
958 dma_addr_t tmp_p_addr;
959 struct mac_info *mac_control;
960 struct config_param *config;
961 int lst_size, lst_per_page;
962 struct net_device *dev;
970 mac_control = &nic->mac_control;
971 config = &nic->config;
973 lst_size = (sizeof(struct TxD) * config->max_txds);
974 lst_per_page = PAGE_SIZE / lst_size;
976 for (i = 0; i < config->tx_fifo_num; i++) {
977 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
979 for (j = 0; j < page_num; j++) {
980 int mem_blks = (j * lst_per_page);
981 if (!mac_control->fifos[i].list_info)
983 if (!mac_control->fifos[i].list_info[mem_blks].
986 pci_free_consistent(nic->pdev, PAGE_SIZE,
987 mac_control->fifos[i].
990 mac_control->fifos[i].
993 nic->mac_control.stats_info->sw_stat.mem_freed
996 /* If we got a zero DMA address during allocation,
999 if (mac_control->zerodma_virt_addr) {
1000 pci_free_consistent(nic->pdev, PAGE_SIZE,
1001 mac_control->zerodma_virt_addr,
1004 "%s: Freeing TxDL with zero DMA addr. ",
1006 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1007 mac_control->zerodma_virt_addr);
1008 nic->mac_control.stats_info->sw_stat.mem_freed
1011 kfree(mac_control->fifos[i].list_info);
1012 nic->mac_control.stats_info->sw_stat.mem_freed +=
1013 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1016 size = SIZE_OF_BLOCK;
1017 for (i = 0; i < config->rx_ring_num; i++) {
1018 blk_cnt = mac_control->rings[i].block_count;
1019 for (j = 0; j < blk_cnt; j++) {
1020 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1022 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1024 if (tmp_v_addr == NULL)
1026 pci_free_consistent(nic->pdev, size,
1027 tmp_v_addr, tmp_p_addr);
1028 nic->mac_control.stats_info->sw_stat.mem_freed += size;
1029 kfree(mac_control->rings[i].rx_blocks[j].rxds);
1030 nic->mac_control.stats_info->sw_stat.mem_freed +=
1031 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1035 if (nic->rxd_mode == RXD_MODE_3B) {
1036 /* Freeing buffer storage addresses in 2BUFF mode. */
1037 for (i = 0; i < config->rx_ring_num; i++) {
1038 blk_cnt = config->rx_cfg[i].num_rxd /
1039 (rxd_count[nic->rxd_mode] + 1);
1040 for (j = 0; j < blk_cnt; j++) {
1042 if (!mac_control->rings[i].ba[j])
1044 while (k != rxd_count[nic->rxd_mode]) {
1045 struct buffAdd *ba =
1046 &mac_control->rings[i].ba[j][k];
1047 kfree(ba->ba_0_org);
1048 nic->mac_control.stats_info->sw_stat.\
1049 mem_freed += (BUF0_LEN + ALIGN_SIZE);
1050 kfree(ba->ba_1_org);
1051 nic->mac_control.stats_info->sw_stat.\
1052 mem_freed += (BUF1_LEN + ALIGN_SIZE);
1055 kfree(mac_control->rings[i].ba[j]);
1056 nic->mac_control.stats_info->sw_stat.mem_freed +=
1057 (sizeof(struct buffAdd) *
1058 (rxd_count[nic->rxd_mode] + 1));
1060 kfree(mac_control->rings[i].ba);
1061 nic->mac_control.stats_info->sw_stat.mem_freed +=
1062 (sizeof(struct buffAdd *) * blk_cnt);
1066 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1067 if (mac_control->fifos[i].ufo_in_band_v) {
1068 nic->mac_control.stats_info->sw_stat.mem_freed
1069 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1070 kfree(mac_control->fifos[i].ufo_in_band_v);
1074 if (mac_control->stats_mem) {
1075 nic->mac_control.stats_info->sw_stat.mem_freed +=
1076 mac_control->stats_mem_sz;
1077 pci_free_consistent(nic->pdev,
1078 mac_control->stats_mem_sz,
1079 mac_control->stats_mem,
1080 mac_control->stats_mem_phy);
1085 * s2io_verify_pci_mode -
1088 static int s2io_verify_pci_mode(struct s2io_nic *nic)
1090 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1091 register u64 val64 = 0;
1094 val64 = readq(&bar0->pci_mode);
1095 mode = (u8)GET_PCI_MODE(val64);
1097 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1098 return -1; /* Unknown PCI mode */
1102 #define NEC_VENID 0x1033
1103 #define NEC_DEVID 0x0125
1104 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1106 struct pci_dev *tdev = NULL;
1107 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1108 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
1109 if (tdev->bus == s2io_pdev->bus->parent)
1117 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
1119 * s2io_print_pci_mode -
1121 static int s2io_print_pci_mode(struct s2io_nic *nic)
1123 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1124 register u64 val64 = 0;
1126 struct config_param *config = &nic->config;
1128 val64 = readq(&bar0->pci_mode);
1129 mode = (u8)GET_PCI_MODE(val64);
1131 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1132 return -1; /* Unknown PCI mode */
1134 config->bus_speed = bus_speed[mode];
1136 if (s2io_on_nec_bridge(nic->pdev)) {
1137 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1142 if (val64 & PCI_MODE_32_BITS) {
1143 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1145 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1149 case PCI_MODE_PCI_33:
1150 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
1152 case PCI_MODE_PCI_66:
1153 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
1155 case PCI_MODE_PCIX_M1_66:
1156 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
1158 case PCI_MODE_PCIX_M1_100:
1159 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
1161 case PCI_MODE_PCIX_M1_133:
1162 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
1164 case PCI_MODE_PCIX_M2_66:
1165 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
1167 case PCI_MODE_PCIX_M2_100:
1168 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
1170 case PCI_MODE_PCIX_M2_133:
1171 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
1174 return -1; /* Unsupported bus speed */
1181 * init_tti - Initialization transmit traffic interrupt scheme
1182 * @nic: device private variable
1183 * @link: link status (UP/DOWN) used to enable/disable continuous
1184 * transmit interrupts
1185 * Description: The function configures transmit traffic interrupts
1186 * Return Value: SUCCESS on success and
1190 static int init_tti(struct s2io_nic *nic, int link)
1192 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1193 register u64 val64 = 0;
1195 struct config_param *config;
1197 config = &nic->config;
1199 for (i = 0; i < config->tx_fifo_num; i++) {
1201 * TTI Initialization. Default Tx timer gets us about
1202 * 250 interrupts per sec. Continuous interrupts are enabled
1205 if (nic->device_type == XFRAME_II_DEVICE) {
1206 int count = (nic->config.bus_speed * 125)/2;
1207 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1209 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1211 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1212 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1213 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1214 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1216 if (use_continuous_tx_intrs && (link == LINK_UP))
1217 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1218 writeq(val64, &bar0->tti_data1_mem);
1220 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1221 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1222 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1223 TTI_DATA2_MEM_TX_UFC_D(0x80);
1225 writeq(val64, &bar0->tti_data2_mem);
1227 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1228 TTI_CMD_MEM_OFFSET(i);
1229 writeq(val64, &bar0->tti_command_mem);
1231 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1232 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1240 * init_nic - Initialization of hardware
1241 * @nic: device private variable
1242 * Description: The function sequentially configures every block
1243 * of the H/W from their reset values.
1244 * Return Value: SUCCESS on success and
1245 * '-1' on failure (endian settings incorrect).
1248 static int init_nic(struct s2io_nic *nic)
1250 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1251 struct net_device *dev = nic->dev;
1252 register u64 val64 = 0;
1256 struct mac_info *mac_control;
1257 struct config_param *config;
1259 unsigned long long mem_share;
1262 mac_control = &nic->mac_control;
1263 config = &nic->config;
1265 /* to set the swapper controle on the card */
1266 if(s2io_set_swapper(nic)) {
1267 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
1272 * Herc requires EOI to be removed from reset before XGXS, so..
1274 if (nic->device_type & XFRAME_II_DEVICE) {
1275 val64 = 0xA500000000ULL;
1276 writeq(val64, &bar0->sw_reset);
1278 val64 = readq(&bar0->sw_reset);
1281 /* Remove XGXS from reset state */
1283 writeq(val64, &bar0->sw_reset);
1285 val64 = readq(&bar0->sw_reset);
1287 /* Ensure that it's safe to access registers by checking
1288 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1290 if (nic->device_type == XFRAME_II_DEVICE) {
1291 for (i = 0; i < 50; i++) {
1292 val64 = readq(&bar0->adapter_status);
1293 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1301 /* Enable Receiving broadcasts */
1302 add = &bar0->mac_cfg;
1303 val64 = readq(&bar0->mac_cfg);
1304 val64 |= MAC_RMAC_BCAST_ENABLE;
1305 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1306 writel((u32) val64, add);
1307 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1308 writel((u32) (val64 >> 32), (add + 4));
1310 /* Read registers in all blocks */
1311 val64 = readq(&bar0->mac_int_mask);
1312 val64 = readq(&bar0->mc_int_mask);
1313 val64 = readq(&bar0->xgxs_int_mask);
1317 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1319 if (nic->device_type & XFRAME_II_DEVICE) {
1320 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1321 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1322 &bar0->dtx_control, UF);
1324 msleep(1); /* Necessary!! */
1328 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1329 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1330 &bar0->dtx_control, UF);
1331 val64 = readq(&bar0->dtx_control);
1336 /* Tx DMA Initialization */
1338 writeq(val64, &bar0->tx_fifo_partition_0);
1339 writeq(val64, &bar0->tx_fifo_partition_1);
1340 writeq(val64, &bar0->tx_fifo_partition_2);
1341 writeq(val64, &bar0->tx_fifo_partition_3);
1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1346 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1347 13) | vBIT(config->tx_cfg[i].fifo_priority,
1350 if (i == (config->tx_fifo_num - 1)) {
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1372 writeq(val64, &bar0->tx_fifo_partition_3);
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1386 if ((nic->device_type == XFRAME_I_DEVICE) &&
1387 (nic->pdev->revision < 4))
1388 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1390 val64 = readq(&bar0->tx_fifo_partition_0);
1391 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1392 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1395 * Initialization of Tx_PA_CONFIG register to ignore packet
1396 * integrity checking.
1398 val64 = readq(&bar0->tx_pa_cfg);
1399 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1401 writeq(val64, &bar0->tx_pa_cfg);
1403 /* Rx DMA intialization. */
1405 for (i = 0; i < config->rx_ring_num; i++) {
1407 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1410 writeq(val64, &bar0->rx_queue_priority);
1413 * Allocating equal share of memory to all the
1417 if (nic->device_type & XFRAME_II_DEVICE)
1422 for (i = 0; i < config->rx_ring_num; i++) {
1425 mem_share = (mem_size / config->rx_ring_num +
1426 mem_size % config->rx_ring_num);
1427 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1430 mem_share = (mem_size / config->rx_ring_num);
1431 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1434 mem_share = (mem_size / config->rx_ring_num);
1435 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1438 mem_share = (mem_size / config->rx_ring_num);
1439 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1442 mem_share = (mem_size / config->rx_ring_num);
1443 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1446 mem_share = (mem_size / config->rx_ring_num);
1447 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1450 mem_share = (mem_size / config->rx_ring_num);
1451 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1454 mem_share = (mem_size / config->rx_ring_num);
1455 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1459 writeq(val64, &bar0->rx_queue_cfg);
1462 * Filling Tx round robin registers
1463 * as per the number of FIFOs for equal scheduling priority
1465 switch (config->tx_fifo_num) {
1468 writeq(val64, &bar0->tx_w_round_robin_0);
1469 writeq(val64, &bar0->tx_w_round_robin_1);
1470 writeq(val64, &bar0->tx_w_round_robin_2);
1471 writeq(val64, &bar0->tx_w_round_robin_3);
1472 writeq(val64, &bar0->tx_w_round_robin_4);
1475 val64 = 0x0001000100010001ULL;
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 val64 = 0x0001000100000000ULL;
1481 writeq(val64, &bar0->tx_w_round_robin_4);
1484 val64 = 0x0001020001020001ULL;
1485 writeq(val64, &bar0->tx_w_round_robin_0);
1486 val64 = 0x0200010200010200ULL;
1487 writeq(val64, &bar0->tx_w_round_robin_1);
1488 val64 = 0x0102000102000102ULL;
1489 writeq(val64, &bar0->tx_w_round_robin_2);
1490 val64 = 0x0001020001020001ULL;
1491 writeq(val64, &bar0->tx_w_round_robin_3);
1492 val64 = 0x0200010200000000ULL;
1493 writeq(val64, &bar0->tx_w_round_robin_4);
1496 val64 = 0x0001020300010203ULL;
1497 writeq(val64, &bar0->tx_w_round_robin_0);
1498 writeq(val64, &bar0->tx_w_round_robin_1);
1499 writeq(val64, &bar0->tx_w_round_robin_2);
1500 writeq(val64, &bar0->tx_w_round_robin_3);
1501 val64 = 0x0001020300000000ULL;
1502 writeq(val64, &bar0->tx_w_round_robin_4);
1505 val64 = 0x0001020304000102ULL;
1506 writeq(val64, &bar0->tx_w_round_robin_0);
1507 val64 = 0x0304000102030400ULL;
1508 writeq(val64, &bar0->tx_w_round_robin_1);
1509 val64 = 0x0102030400010203ULL;
1510 writeq(val64, &bar0->tx_w_round_robin_2);
1511 val64 = 0x0400010203040001ULL;
1512 writeq(val64, &bar0->tx_w_round_robin_3);
1513 val64 = 0x0203040000000000ULL;
1514 writeq(val64, &bar0->tx_w_round_robin_4);
1517 val64 = 0x0001020304050001ULL;
1518 writeq(val64, &bar0->tx_w_round_robin_0);
1519 val64 = 0x0203040500010203ULL;
1520 writeq(val64, &bar0->tx_w_round_robin_1);
1521 val64 = 0x0405000102030405ULL;
1522 writeq(val64, &bar0->tx_w_round_robin_2);
1523 val64 = 0x0001020304050001ULL;
1524 writeq(val64, &bar0->tx_w_round_robin_3);
1525 val64 = 0x0203040500000000ULL;
1526 writeq(val64, &bar0->tx_w_round_robin_4);
1529 val64 = 0x0001020304050600ULL;
1530 writeq(val64, &bar0->tx_w_round_robin_0);
1531 val64 = 0x0102030405060001ULL;
1532 writeq(val64, &bar0->tx_w_round_robin_1);
1533 val64 = 0x0203040506000102ULL;
1534 writeq(val64, &bar0->tx_w_round_robin_2);
1535 val64 = 0x0304050600010203ULL;
1536 writeq(val64, &bar0->tx_w_round_robin_3);
1537 val64 = 0x0405060000000000ULL;
1538 writeq(val64, &bar0->tx_w_round_robin_4);
1541 val64 = 0x0001020304050607ULL;
1542 writeq(val64, &bar0->tx_w_round_robin_0);
1543 writeq(val64, &bar0->tx_w_round_robin_1);
1544 writeq(val64, &bar0->tx_w_round_robin_2);
1545 writeq(val64, &bar0->tx_w_round_robin_3);
1546 val64 = 0x0001020300000000ULL;
1547 writeq(val64, &bar0->tx_w_round_robin_4);
1551 /* Enable all configured Tx FIFO partitions */
1552 val64 = readq(&bar0->tx_fifo_partition_0);
1553 val64 |= (TX_FIFO_PARTITION_EN);
1554 writeq(val64, &bar0->tx_fifo_partition_0);
1556 /* Filling the Rx round robin registers as per the
1557 * number of Rings and steering based on QoS.
1559 switch (config->rx_ring_num) {
1561 val64 = 0x8080808080808080ULL;
1562 writeq(val64, &bar0->rts_qos_steering);
1565 val64 = 0x0000010000010000ULL;
1566 writeq(val64, &bar0->rx_w_round_robin_0);
1567 val64 = 0x0100000100000100ULL;
1568 writeq(val64, &bar0->rx_w_round_robin_1);
1569 val64 = 0x0001000001000001ULL;
1570 writeq(val64, &bar0->rx_w_round_robin_2);
1571 val64 = 0x0000010000010000ULL;
1572 writeq(val64, &bar0->rx_w_round_robin_3);
1573 val64 = 0x0100000000000000ULL;
1574 writeq(val64, &bar0->rx_w_round_robin_4);
1576 val64 = 0x8080808040404040ULL;
1577 writeq(val64, &bar0->rts_qos_steering);
1580 val64 = 0x0001000102000001ULL;
1581 writeq(val64, &bar0->rx_w_round_robin_0);
1582 val64 = 0x0001020000010001ULL;
1583 writeq(val64, &bar0->rx_w_round_robin_1);
1584 val64 = 0x0200000100010200ULL;
1585 writeq(val64, &bar0->rx_w_round_robin_2);
1586 val64 = 0x0001000102000001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_3);
1588 val64 = 0x0001020000000000ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_4);
1591 val64 = 0x8080804040402020ULL;
1592 writeq(val64, &bar0->rts_qos_steering);
1595 val64 = 0x0001020300010200ULL;
1596 writeq(val64, &bar0->rx_w_round_robin_0);
1597 val64 = 0x0100000102030001ULL;
1598 writeq(val64, &bar0->rx_w_round_robin_1);
1599 val64 = 0x0200010000010203ULL;
1600 writeq(val64, &bar0->rx_w_round_robin_2);
1601 val64 = 0x0001020001000001ULL;
1602 writeq(val64, &bar0->rx_w_round_robin_3);
1603 val64 = 0x0203000100000000ULL;
1604 writeq(val64, &bar0->rx_w_round_robin_4);
1606 val64 = 0x8080404020201010ULL;
1607 writeq(val64, &bar0->rts_qos_steering);
1610 val64 = 0x0001000203000102ULL;
1611 writeq(val64, &bar0->rx_w_round_robin_0);
1612 val64 = 0x0001020001030004ULL;
1613 writeq(val64, &bar0->rx_w_round_robin_1);
1614 val64 = 0x0001000203000102ULL;
1615 writeq(val64, &bar0->rx_w_round_robin_2);
1616 val64 = 0x0001020001030004ULL;
1617 writeq(val64, &bar0->rx_w_round_robin_3);
1618 val64 = 0x0001000000000000ULL;
1619 writeq(val64, &bar0->rx_w_round_robin_4);
1621 val64 = 0x8080404020201008ULL;
1622 writeq(val64, &bar0->rts_qos_steering);
1625 val64 = 0x0001020304000102ULL;
1626 writeq(val64, &bar0->rx_w_round_robin_0);
1627 val64 = 0x0304050001020001ULL;
1628 writeq(val64, &bar0->rx_w_round_robin_1);
1629 val64 = 0x0203000100000102ULL;
1630 writeq(val64, &bar0->rx_w_round_robin_2);
1631 val64 = 0x0304000102030405ULL;
1632 writeq(val64, &bar0->rx_w_round_robin_3);
1633 val64 = 0x0001000200000000ULL;
1634 writeq(val64, &bar0->rx_w_round_robin_4);
1636 val64 = 0x8080404020100804ULL;
1637 writeq(val64, &bar0->rts_qos_steering);
1640 val64 = 0x0001020001020300ULL;
1641 writeq(val64, &bar0->rx_w_round_robin_0);
1642 val64 = 0x0102030400010203ULL;
1643 writeq(val64, &bar0->rx_w_round_robin_1);
1644 val64 = 0x0405060001020001ULL;
1645 writeq(val64, &bar0->rx_w_round_robin_2);
1646 val64 = 0x0304050000010200ULL;
1647 writeq(val64, &bar0->rx_w_round_robin_3);
1648 val64 = 0x0102030000000000ULL;
1649 writeq(val64, &bar0->rx_w_round_robin_4);
1651 val64 = 0x8080402010080402ULL;
1652 writeq(val64, &bar0->rts_qos_steering);
1655 val64 = 0x0001020300040105ULL;
1656 writeq(val64, &bar0->rx_w_round_robin_0);
1657 val64 = 0x0200030106000204ULL;
1658 writeq(val64, &bar0->rx_w_round_robin_1);
1659 val64 = 0x0103000502010007ULL;
1660 writeq(val64, &bar0->rx_w_round_robin_2);
1661 val64 = 0x0304010002060500ULL;
1662 writeq(val64, &bar0->rx_w_round_robin_3);
1663 val64 = 0x0103020400000000ULL;
1664 writeq(val64, &bar0->rx_w_round_robin_4);
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1673 for (i = 0; i < 8; i++)
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1693 &bar0->rts_frm_len_n[i]);
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1700 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1702 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1707 /* Program statistics memory */
1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1721 writeq(val64, &bar0->mac_link_util);
1724 * Initializing the Transmit and Receive Traffic Interrupt
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
1735 * Programmed to generate Apprx 500 Intrs per
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1746 writeq(val64, &bar0->rti_data1_mem);
1748 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1749 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1750 if (nic->config.intr_type == MSI_X)
1751 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1752 RTI_DATA2_MEM_RX_UFC_D(0x40));
1754 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1755 RTI_DATA2_MEM_RX_UFC_D(0x80));
1756 writeq(val64, &bar0->rti_data2_mem);
1758 for (i = 0; i < config->rx_ring_num; i++) {
1759 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1760 | RTI_CMD_MEM_OFFSET(i);
1761 writeq(val64, &bar0->rti_command_mem);
1764 * Once the operation completes, the Strobe bit of the
1765 * command register will be reset. We poll for this
1766 * particular condition. We wait for a maximum of 500ms
1767 * for the operation to complete, if it's not complete
1768 * by then we return error.
1772 val64 = readq(&bar0->rti_command_mem);
1773 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1777 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1787 * Initializing proper values as Pause threshold into all
1788 * the 8 Queues on Rx side.
1790 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1791 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1793 /* Disable RMAC PAD STRIPPING */
1794 add = &bar0->mac_cfg;
1795 val64 = readq(&bar0->mac_cfg);
1796 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1797 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1798 writel((u32) (val64), add);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64 >> 32), (add + 4));
1801 val64 = readq(&bar0->mac_cfg);
1803 /* Enable FCS stripping by adapter */
1804 add = &bar0->mac_cfg;
1805 val64 = readq(&bar0->mac_cfg);
1806 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1807 if (nic->device_type == XFRAME_II_DEVICE)
1808 writeq(val64, &bar0->mac_cfg);
1810 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1811 writel((u32) (val64), add);
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64 >> 32), (add + 4));
1817 * Set the time value to be inserted in the pause frame
1818 * generated by xena.
1820 val64 = readq(&bar0->rmac_pause_cfg);
1821 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1822 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1823 writeq(val64, &bar0->rmac_pause_cfg);
1826 * Set the Threshold Limit for Generating the pause frame
1827 * If the amount of data in any Queue exceeds ratio of
1828 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1829 * pause frame is generated
1832 for (i = 0; i < 4; i++) {
1834 (((u64) 0xFF00 | nic->mac_control.
1835 mc_pause_threshold_q0q3)
1838 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1841 for (i = 0; i < 4; i++) {
1843 (((u64) 0xFF00 | nic->mac_control.
1844 mc_pause_threshold_q4q7)
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1850 * TxDMA will stop Read request if the number of read split has
1851 * exceeded the limit pointed by shared_splits
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1867 if (nic->device_type == XFRAME_II_DEVICE) {
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
1873 writeq(val64, &bar0->pic_control2);
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
1882 #define LINK_UP_DOWN_INTERRUPT 1
1883 #define MAC_RMAC_ERR_TIMER 2
1885 static int s2io_link_fault_indication(struct s2io_nic *nic)
1887 if (nic->config.intr_type != INTA)
1888 return MAC_RMAC_ERR_TIMER;
1889 if (nic->device_type == XFRAME_II_DEVICE)
1890 return LINK_UP_DOWN_INTERRUPT;
1892 return MAC_RMAC_ERR_TIMER;
1896 * do_s2io_write_bits - update alarm bits in alarm register
1897 * @value: alarm bits
1898 * @flag: interrupt status
1899 * @addr: address value
1900 * Description: update alarm bits in alarm register
1904 static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1908 temp64 = readq(addr);
1910 if(flag == ENABLE_INTRS)
1911 temp64 &= ~((u64) value);
1913 temp64 |= ((u64) value);
1914 writeq(temp64, addr);
1917 static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
1919 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1920 register u64 gen_int_mask = 0;
1922 if (mask & TX_DMA_INTR) {
1924 gen_int_mask |= TXDMA_INT_M;
1926 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1927 TXDMA_PCC_INT | TXDMA_TTI_INT |
1928 TXDMA_LSO_INT | TXDMA_TPA_INT |
1929 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1931 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1932 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1933 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1934 &bar0->pfc_err_mask);
1936 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1937 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1938 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1940 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1941 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1942 PCC_N_SERR | PCC_6_COF_OV_ERR |
1943 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1944 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1945 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1956 flag, &bar0->tpa_err_mask);
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1962 if (mask & TX_MAC_INTR) {
1963 gen_int_mask |= TXMAC_INT_M;
1964 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1965 &bar0->mac_int_mask);
1966 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1967 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1968 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1969 flag, &bar0->mac_tmac_err_mask);
1972 if (mask & TX_XGXS_INTR) {
1973 gen_int_mask |= TXXGXS_INT_M;
1974 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1975 &bar0->xgxs_int_mask);
1976 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1977 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1978 flag, &bar0->xgxs_txgxs_err_mask);
1981 if (mask & RX_DMA_INTR) {
1982 gen_int_mask |= RXDMA_INT_M;
1983 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1984 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1985 flag, &bar0->rxdma_int_mask);
1986 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1987 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1988 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1989 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1990 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1991 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1992 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1993 &bar0->prc_pcix_err_mask);
1994 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1995 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1996 &bar0->rpa_err_mask);
1997 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1998 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1999 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2000 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2010 &bar0->mac_int_mask);
2011 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR |
2014 RMAC_LINK_STATE_CHANGE_INT,
2015 flag, &bar0->mac_rmac_err_mask);
2018 if (mask & RX_XGXS_INTR)
2020 gen_int_mask |= RXXGXS_INT_M;
2021 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2022 &bar0->xgxs_int_mask);
2023 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2024 &bar0->xgxs_rxgxs_err_mask);
2027 if (mask & MC_INTR) {
2028 gen_int_mask |= MC_INT_M;
2029 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2030 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2031 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2032 &bar0->mc_err_mask);
2034 nic->general_int_mask = gen_int_mask;
2036 /* Remove this line when alarm interrupts are enabled */
2037 nic->general_int_mask = 0;
2040 * en_dis_able_nic_intrs - Enable or Disable the interrupts
2041 * @nic: device private variable,
2042 * @mask: A mask indicating which Intr block must be modified and,
2043 * @flag: A flag indicating whether to enable or disable the Intrs.
2044 * Description: This function will either disable or enable the interrupts
2045 * depending on the flag argument. The mask argument can be used to
2046 * enable/disable any Intr block.
2047 * Return Value: NONE.
2050 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2052 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2053 register u64 temp64 = 0, intr_mask = 0;
2055 intr_mask = nic->general_int_mask;
2057 /* Top level interrupt classification */
2058 /* PIC Interrupts */
2059 if (mask & TX_PIC_INTR) {
2060 /* Enable PIC Intrs in the general intr mask register */
2061 intr_mask |= TXPIC_INT_M;
2062 if (flag == ENABLE_INTRS) {
2064 * If Hercules adapter enable GPIO otherwise
2065 * disable all PCIX, Flash, MDIO, IIC and GPIO
2066 * interrupts for now.
2069 if (s2io_link_fault_indication(nic) ==
2070 LINK_UP_DOWN_INTERRUPT ) {
2071 do_s2io_write_bits(PIC_INT_GPIO, flag,
2072 &bar0->pic_int_mask);
2073 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2074 &bar0->gpio_int_mask);
2076 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2077 } else if (flag == DISABLE_INTRS) {
2079 * Disable PIC Intrs in the general
2080 * intr mask register
2082 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
2086 /* Tx traffic interrupts */
2087 if (mask & TX_TRAFFIC_INTR) {
2088 intr_mask |= TXTRAFFIC_INT_M;
2089 if (flag == ENABLE_INTRS) {
2091 * Enable all the Tx side interrupts
2092 * writing 0 Enables all 64 TX interrupt levels
2094 writeq(0x0, &bar0->tx_traffic_mask);
2095 } else if (flag == DISABLE_INTRS) {
2097 * Disable Tx Traffic Intrs in the general intr mask
2100 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
2104 /* Rx traffic interrupts */
2105 if (mask & RX_TRAFFIC_INTR) {
2106 intr_mask |= RXTRAFFIC_INT_M;
2107 if (flag == ENABLE_INTRS) {
2108 /* writing 0 Enables all 8 RX interrupt levels */
2109 writeq(0x0, &bar0->rx_traffic_mask);
2110 } else if (flag == DISABLE_INTRS) {
2112 * Disable Rx Traffic Intrs in the general intr mask
2115 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
2119 temp64 = readq(&bar0->general_int_mask);
2120 if (flag == ENABLE_INTRS)
2121 temp64 &= ~((u64) intr_mask);
2123 temp64 = DISABLE_ALL_INTRS;
2124 writeq(temp64, &bar0->general_int_mask);
2126 nic->general_int_mask = readq(&bar0->general_int_mask);
2130 * verify_pcc_quiescent- Checks for PCC quiescent state
2131 * Return: 1 If PCC is quiescence
2132 * 0 If PCC is not quiescence
2134 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
2137 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2138 u64 val64 = readq(&bar0->adapter_status);
2140 herc = (sp->device_type == XFRAME_II_DEVICE);
2142 if (flag == FALSE) {
2143 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2144 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2151 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
2152 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
2153 ADAPTER_STATUS_RMAC_PCC_IDLE))
2156 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
2157 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
2165 * verify_xena_quiescence - Checks whether the H/W is ready
2166 * Description: Returns whether the H/W is ready to go or not. Depending
2167 * on whether adapter enable bit was written or not the comparison
2168 * differs and the calling function passes the input argument flag to
2170 * Return: 1 If xena is quiescence
2171 * 0 If Xena is not quiescence
2174 static int verify_xena_quiescence(struct s2io_nic *sp)
2177 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2178 u64 val64 = readq(&bar0->adapter_status);
2179 mode = s2io_verify_pci_mode(sp);
2181 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2182 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2185 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2186 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2189 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2190 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2193 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2194 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2197 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2198 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2201 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2202 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2205 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2206 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2209 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2210 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2215 * In PCI 33 mode, the P_PLL is not used, and therefore,
2216 * the the P_PLL_LOCK bit in the adapter_status register will
2219 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2220 sp->device_type == XFRAME_II_DEVICE && mode !=
2222 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2225 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2226 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2227 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2234 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2235 * @sp: Pointer to device specifc structure
2237 * New procedure to clear mac address reading problems on Alpha platforms
2241 static void fix_mac_address(struct s2io_nic * sp)
2243 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2247 while (fix_mac[i] != END_SIGN) {
2248 writeq(fix_mac[i++], &bar0->gpio_control);
2250 val64 = readq(&bar0->gpio_control);
2255 * start_nic - Turns the device on
2256 * @nic : device private variable.
2258 * This function actually turns the device on. Before this function is
2259 * called,all Registers are configured from their reset states
2260 * and shared memory is allocated but the NIC is still quiescent. On
2261 * calling this function, the device interrupts are cleared and the NIC is
2262 * literally switched on by writing into the adapter control register.
2264 * SUCCESS on success and -1 on failure.
2267 static int start_nic(struct s2io_nic *nic)
2269 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2270 struct net_device *dev = nic->dev;
2271 register u64 val64 = 0;
2273 struct mac_info *mac_control;
2274 struct config_param *config;
2276 mac_control = &nic->mac_control;
2277 config = &nic->config;
2279 /* PRC Initialization and configuration */
2280 for (i = 0; i < config->rx_ring_num; i++) {
2281 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
2282 &bar0->prc_rxd0_n[i]);
2284 val64 = readq(&bar0->prc_ctrl_n[i]);
2285 if (nic->rxd_mode == RXD_MODE_1)
2286 val64 |= PRC_CTRL_RC_ENABLED;
2288 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
2289 if (nic->device_type == XFRAME_II_DEVICE)
2290 val64 |= PRC_CTRL_GROUP_READS;
2291 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2292 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
2293 writeq(val64, &bar0->prc_ctrl_n[i]);
2296 if (nic->rxd_mode == RXD_MODE_3B) {
2297 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2298 val64 = readq(&bar0->rx_pa_cfg);
2299 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2300 writeq(val64, &bar0->rx_pa_cfg);
2303 if (vlan_tag_strip == 0) {
2304 val64 = readq(&bar0->rx_pa_cfg);
2305 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2306 writeq(val64, &bar0->rx_pa_cfg);
2307 vlan_strip_flag = 0;
2311 * Enabling MC-RLDRAM. After enabling the device, we timeout
2312 * for around 100ms, which is approximately the time required
2313 * for the device to be ready for operation.
2315 val64 = readq(&bar0->mc_rldram_mrs);
2316 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2317 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2318 val64 = readq(&bar0->mc_rldram_mrs);
2320 msleep(100); /* Delay by around 100 ms. */
2322 /* Enabling ECC Protection. */
2323 val64 = readq(&bar0->adapter_control);
2324 val64 &= ~ADAPTER_ECC_EN;
2325 writeq(val64, &bar0->adapter_control);
2328 * Verify if the device is ready to be enabled, if so enable
2331 val64 = readq(&bar0->adapter_status);
2332 if (!verify_xena_quiescence(nic)) {
2333 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2334 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2335 (unsigned long long) val64);
2340 * With some switches, link might be already up at this point.
2341 * Because of this weird behavior, when we enable laser,
2342 * we may not get link. We need to handle this. We cannot
2343 * figure out which switch is misbehaving. So we are forced to
2344 * make a global change.
2347 /* Enabling Laser. */
2348 val64 = readq(&bar0->adapter_control);
2349 val64 |= ADAPTER_EOI_TX_ON;
2350 writeq(val64, &bar0->adapter_control);
2352 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2354 * Dont see link state interrupts initally on some switches,
2355 * so directly scheduling the link state task here.
2357 schedule_work(&nic->set_link_task);
2359 /* SXE-002: Initialize link and activity LED */
2360 subid = nic->pdev->subsystem_device;
2361 if (((subid & 0xFF) >= 0x07) &&
2362 (nic->device_type == XFRAME_I_DEVICE)) {
2363 val64 = readq(&bar0->gpio_control);
2364 val64 |= 0x0000800000000000ULL;
2365 writeq(val64, &bar0->gpio_control);
2366 val64 = 0x0411040400000000ULL;
2367 writeq(val64, (void __iomem *)bar0 + 0x2700);
2373 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2375 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2376 TxD *txdlp, int get_off)
2378 struct s2io_nic *nic = fifo_data->nic;
2379 struct sk_buff *skb;
2384 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
2385 pci_unmap_single(nic->pdev, (dma_addr_t)
2386 txds->Buffer_Pointer, sizeof(u64),
2391 skb = (struct sk_buff *) ((unsigned long)
2392 txds->Host_Control);
2394 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2397 pci_unmap_single(nic->pdev, (dma_addr_t)
2398 txds->Buffer_Pointer,
2399 skb->len - skb->data_len,
2401 frg_cnt = skb_shinfo(skb)->nr_frags;
2404 for (j = 0; j < frg_cnt; j++, txds++) {
2405 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2406 if (!txds->Buffer_Pointer)
2408 pci_unmap_page(nic->pdev, (dma_addr_t)
2409 txds->Buffer_Pointer,
2410 frag->size, PCI_DMA_TODEVICE);
2413 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2418 * free_tx_buffers - Free all queued Tx buffers
2419 * @nic : device private variable.
2421 * Free all queued Tx buffers.
2422 * Return Value: void
2425 static void free_tx_buffers(struct s2io_nic *nic)
2427 struct net_device *dev = nic->dev;
2428 struct sk_buff *skb;
2431 struct mac_info *mac_control;
2432 struct config_param *config;
2435 mac_control = &nic->mac_control;
2436 config = &nic->config;
2438 for (i = 0; i < config->tx_fifo_num; i++) {
2439 unsigned long flags;
2440 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
2441 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2442 txdp = (struct TxD *) \
2443 mac_control->fifos[i].list_info[j].list_virt_addr;
2444 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2446 nic->mac_control.stats_info->sw_stat.mem_freed
2453 "%s:forcibly freeing %d skbs on FIFO%d\n",
2455 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2456 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2457 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
2462 * stop_nic - To stop the nic
2463 * @nic ; device private variable.
2465 * This function does exactly the opposite of what the start_nic()
2466 * function does. This function is called to stop the device.
2471 static void stop_nic(struct s2io_nic *nic)
2473 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2474 register u64 val64 = 0;
2476 struct mac_info *mac_control;
2477 struct config_param *config;
2479 mac_control = &nic->mac_control;
2480 config = &nic->config;
2482 /* Disable all interrupts */
2483 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
2484 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2485 interruptible |= TX_PIC_INTR;
2486 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2488 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2489 val64 = readq(&bar0->adapter_control);
2490 val64 &= ~(ADAPTER_CNTL_EN);
2491 writeq(val64, &bar0->adapter_control);
2495 * fill_rx_buffers - Allocates the Rx side skbs
2496 * @nic: device private variable
2497 * @ring_no: ring number
2499 * The function allocates Rx side skbs and puts the physical
2500 * address of these buffers into the RxD buffer pointers, so that the NIC
2501 * can DMA the received frame into these locations.
2502 * The NIC supports 3 receive modes, viz
2504 * 2. three buffer and
2505 * 3. Five buffer modes.
2506 * Each mode defines how many fragments the received frame will be split
2507 * up into by the NIC. The frame is split into L3 header, L4 Header,
2508 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2509 * is split into 3 fragments. As of now only single buffer mode is
2512 * SUCCESS on success or an appropriate -ve value on failure.
2515 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2517 struct net_device *dev = nic->dev;
2518 struct sk_buff *skb;
2520 int off, off1, size, block_no, block_no1;
2523 struct mac_info *mac_control;
2524 struct config_param *config;
2527 unsigned long flags;
2528 struct RxD_t *first_rxdp = NULL;
2529 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
2532 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
2534 mac_control = &nic->mac_control;
2535 config = &nic->config;
2536 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2537 atomic_read(&nic->rx_bufs_left[ring_no]);
2539 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2540 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2541 while (alloc_tab < alloc_cnt) {
2542 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2544 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2546 rxdp = mac_control->rings[ring_no].
2547 rx_blocks[block_no].rxds[off].virt_addr;
2549 if ((block_no == block_no1) && (off == off1) &&
2550 (rxdp->Host_Control)) {
2551 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2553 DBG_PRINT(INTR_DBG, " info equated\n");
2556 if (off && (off == rxd_count[nic->rxd_mode])) {
2557 mac_control->rings[ring_no].rx_curr_put_info.
2559 if (mac_control->rings[ring_no].rx_curr_put_info.
2560 block_index == mac_control->rings[ring_no].
2562 mac_control->rings[ring_no].rx_curr_put_info.
2564 block_no = mac_control->rings[ring_no].
2565 rx_curr_put_info.block_index;
2566 if (off == rxd_count[nic->rxd_mode])
2568 mac_control->rings[ring_no].rx_curr_put_info.
2570 rxdp = mac_control->rings[ring_no].
2571 rx_blocks[block_no].block_virt_addr;
2572 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2576 spin_lock_irqsave(&nic->put_lock, flags);
2577 mac_control->rings[ring_no].put_pos =
2578 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2579 spin_unlock_irqrestore(&nic->put_lock, flags);
2581 mac_control->rings[ring_no].put_pos =
2582 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2584 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2585 ((nic->rxd_mode == RXD_MODE_3B) &&
2586 (rxdp->Control_2 & s2BIT(0)))) {
2587 mac_control->rings[ring_no].rx_curr_put_info.
2591 /* calculate size of skb based on ring mode */
2592 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2593 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2594 if (nic->rxd_mode == RXD_MODE_1)
2595 size += NET_IP_ALIGN;
2597 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2600 skb = dev_alloc_skb(size);
2602 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2603 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
2606 first_rxdp->Control_1 |= RXD_OWN_XENA;
2608 nic->mac_control.stats_info->sw_stat. \
2609 mem_alloc_fail_cnt++;
2612 nic->mac_control.stats_info->sw_stat.mem_allocated
2614 if (nic->rxd_mode == RXD_MODE_1) {
2615 /* 1 buffer mode - normal operation mode */
2616 rxdp1 = (struct RxD1*)rxdp;
2617 memset(rxdp, 0, sizeof(struct RxD1));
2618 skb_reserve(skb, NET_IP_ALIGN);
2619 rxdp1->Buffer0_ptr = pci_map_single
2620 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2621 PCI_DMA_FROMDEVICE);
2622 if( (rxdp1->Buffer0_ptr == 0) ||
2623 (rxdp1->Buffer0_ptr ==
2625 goto pci_map_failed;
2628 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2630 } else if (nic->rxd_mode == RXD_MODE_3B) {
2633 * 2 buffer mode provides 128
2634 * byte aligned receive buffers.
2637 rxdp3 = (struct RxD3*)rxdp;
2638 /* save buffer pointers to avoid frequent dma mapping */
2639 Buffer0_ptr = rxdp3->Buffer0_ptr;
2640 Buffer1_ptr = rxdp3->Buffer1_ptr;
2641 memset(rxdp, 0, sizeof(struct RxD3));
2642 /* restore the buffer pointers for dma sync*/
2643 rxdp3->Buffer0_ptr = Buffer0_ptr;
2644 rxdp3->Buffer1_ptr = Buffer1_ptr;
2646 ba = &mac_control->rings[ring_no].ba[block_no][off];
2647 skb_reserve(skb, BUF0_LEN);
2648 tmp = (u64)(unsigned long) skb->data;
2651 skb->data = (void *) (unsigned long)tmp;
2652 skb_reset_tail_pointer(skb);
2654 if (!(rxdp3->Buffer0_ptr))
2655 rxdp3->Buffer0_ptr =
2656 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2657 PCI_DMA_FROMDEVICE);
2659 pci_dma_sync_single_for_device(nic->pdev,
2660 (dma_addr_t) rxdp3->Buffer0_ptr,
2661 BUF0_LEN, PCI_DMA_FROMDEVICE);
2662 if( (rxdp3->Buffer0_ptr == 0) ||
2663 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2664 goto pci_map_failed;
2666 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2667 if (nic->rxd_mode == RXD_MODE_3B) {
2668 /* Two buffer mode */
2671 * Buffer2 will have L3/L4 header plus
2674 rxdp3->Buffer2_ptr = pci_map_single
2675 (nic->pdev, skb->data, dev->mtu + 4,
2676 PCI_DMA_FROMDEVICE);
2678 if( (rxdp3->Buffer2_ptr == 0) ||
2679 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2680 goto pci_map_failed;
2682 rxdp3->Buffer1_ptr =
2683 pci_map_single(nic->pdev,
2685 PCI_DMA_FROMDEVICE);
2686 if( (rxdp3->Buffer1_ptr == 0) ||
2687 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2690 (dma_addr_t)rxdp3->Buffer2_ptr,
2692 PCI_DMA_FROMDEVICE);
2693 goto pci_map_failed;
2695 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2696 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2699 rxdp->Control_2 |= s2BIT(0);
2701 rxdp->Host_Control = (unsigned long) (skb);
2702 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2703 rxdp->Control_1 |= RXD_OWN_XENA;
2705 if (off == (rxd_count[nic->rxd_mode] + 1))
2707 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2709 rxdp->Control_2 |= SET_RXD_MARKER;
2710 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2713 first_rxdp->Control_1 |= RXD_OWN_XENA;
2717 atomic_inc(&nic->rx_bufs_left[ring_no]);
2722 /* Transfer ownership of first descriptor to adapter just before
2723 * exiting. Before that, use memory barrier so that ownership
2724 * and other fields are seen by adapter correctly.
2728 first_rxdp->Control_1 |= RXD_OWN_XENA;
2733 stats->pci_map_fail_cnt++;
2734 stats->mem_freed += skb->truesize;
2735 dev_kfree_skb_irq(skb);
2739 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2741 struct net_device *dev = sp->dev;
2743 struct sk_buff *skb;
2745 struct mac_info *mac_control;
2750 mac_control = &sp->mac_control;
2751 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2752 rxdp = mac_control->rings[ring_no].
2753 rx_blocks[blk].rxds[j].virt_addr;
2754 skb = (struct sk_buff *)
2755 ((unsigned long) rxdp->Host_Control);
2759 if (sp->rxd_mode == RXD_MODE_1) {
2760 rxdp1 = (struct RxD1*)rxdp;
2761 pci_unmap_single(sp->pdev, (dma_addr_t)
2764 HEADER_ETHERNET_II_802_3_SIZE
2765 + HEADER_802_2_SIZE +
2767 PCI_DMA_FROMDEVICE);
2768 memset(rxdp, 0, sizeof(struct RxD1));
2769 } else if(sp->rxd_mode == RXD_MODE_3B) {
2770 rxdp3 = (struct RxD3*)rxdp;
2771 ba = &mac_control->rings[ring_no].
2773 pci_unmap_single(sp->pdev, (dma_addr_t)
2776 PCI_DMA_FROMDEVICE);
2777 pci_unmap_single(sp->pdev, (dma_addr_t)
2780 PCI_DMA_FROMDEVICE);
2781 pci_unmap_single(sp->pdev, (dma_addr_t)
2784 PCI_DMA_FROMDEVICE);
2785 memset(rxdp, 0, sizeof(struct RxD3));
2787 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
2789 atomic_dec(&sp->rx_bufs_left[ring_no]);
2794 * free_rx_buffers - Frees all Rx buffers
2795 * @sp: device private variable.
2797 * This function will free all Rx buffers allocated by host.
2802 static void free_rx_buffers(struct s2io_nic *sp)
2804 struct net_device *dev = sp->dev;
2805 int i, blk = 0, buf_cnt = 0;
2806 struct mac_info *mac_control;
2807 struct config_param *config;
2809 mac_control = &sp->mac_control;
2810 config = &sp->config;
2812 for (i = 0; i < config->rx_ring_num; i++) {
2813 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2814 free_rxd_blk(sp,i,blk);
2816 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2817 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2818 mac_control->rings[i].rx_curr_put_info.offset = 0;
2819 mac_control->rings[i].rx_curr_get_info.offset = 0;
2820 atomic_set(&sp->rx_bufs_left[i], 0);
2821 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2822 dev->name, buf_cnt, i);
2827 * s2io_poll - Rx interrupt handler for NAPI support
2828 * @napi : pointer to the napi structure.
2829 * @budget : The number of packets that were budgeted to be processed
2830 * during one pass through the 'Poll" function.
2832 * Comes into picture only if NAPI support has been incorporated. It does
2833 * the same thing that rx_intr_handler does, but not in a interrupt context
2834 * also It will process only a given number of packets.
2836 * 0 on success and 1 if there are No Rx packets to be processed.
2839 static int s2io_poll(struct napi_struct *napi, int budget)
2841 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2842 struct net_device *dev = nic->dev;
2843 int pkt_cnt = 0, org_pkts_to_process;
2844 struct mac_info *mac_control;
2845 struct config_param *config;
2846 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2849 mac_control = &nic->mac_control;
2850 config = &nic->config;
2852 nic->pkts_to_process = budget;
2853 org_pkts_to_process = nic->pkts_to_process;
2855 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2856 readl(&bar0->rx_traffic_int);
2858 for (i = 0; i < config->rx_ring_num; i++) {
2859 rx_intr_handler(&mac_control->rings[i]);
2860 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2861 if (!nic->pkts_to_process) {
2862 /* Quota for the current iteration has been met */
2867 netif_rx_complete(dev, napi);
2869 for (i = 0; i < config->rx_ring_num; i++) {
2870 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2871 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2872 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2876 /* Re enable the Rx interrupts. */
2877 writeq(0x0, &bar0->rx_traffic_mask);
2878 readl(&bar0->rx_traffic_mask);
2882 for (i = 0; i < config->rx_ring_num; i++) {
2883 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2884 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2885 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
2892 #ifdef CONFIG_NET_POLL_CONTROLLER
2894 * s2io_netpoll - netpoll event handler entry point
2895 * @dev : pointer to the device structure.
2897 * This function will be called by upper layer to check for events on the
2898 * interface in situations where interrupts are disabled. It is used for
2899 * specific in-kernel networking tasks, such as remote consoles and kernel
2900 * debugging over the network (example netdump in RedHat).
2902 static void s2io_netpoll(struct net_device *dev)
2904 struct s2io_nic *nic = dev->priv;
2905 struct mac_info *mac_control;
2906 struct config_param *config;
2907 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2908 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2911 if (pci_channel_offline(nic->pdev))
2914 disable_irq(dev->irq);
2916 mac_control = &nic->mac_control;
2917 config = &nic->config;
2919 writeq(val64, &bar0->rx_traffic_int);
2920 writeq(val64, &bar0->tx_traffic_int);
2922 /* we need to free up the transmitted skbufs or else netpoll will
2923 * run out of skbs and will fail and eventually netpoll application such
2924 * as netdump will fail.
2926 for (i = 0; i < config->tx_fifo_num; i++)
2927 tx_intr_handler(&mac_control->fifos[i]);
2929 /* check for received packet and indicate up to network */
2930 for (i = 0; i < config->rx_ring_num; i++)
2931 rx_intr_handler(&mac_control->rings[i]);
2933 for (i = 0; i < config->rx_ring_num; i++) {
2934 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2935 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2936 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
2940 enable_irq(dev->irq);
2946 * rx_intr_handler - Rx interrupt handler
2947 * @nic: device private variable.
2949 * If the interrupt is because of a received frame or if the
2950 * receive ring contains fresh as yet un-processed frames,this function is
2951 * called. It picks out the RxD at which place the last Rx processing had
2952 * stopped and sends the skb to the OSM's Rx handler and then increments
2957 static void rx_intr_handler(struct ring_info *ring_data)
2959 struct s2io_nic *nic = ring_data->nic;
2960 struct net_device *dev = (struct net_device *) nic->dev;
2961 int get_block, put_block, put_offset;
2962 struct rx_curr_get_info get_info, put_info;
2964 struct sk_buff *skb;
2970 spin_lock(&nic->rx_lock);
2972 get_info = ring_data->rx_curr_get_info;
2973 get_block = get_info.block_index;
2974 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2975 put_block = put_info.block_index;
2976 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2978 spin_lock(&nic->put_lock);
2979 put_offset = ring_data->put_pos;
2980 spin_unlock(&nic->put_lock);
2982 put_offset = ring_data->put_pos;
2984 while (RXD_IS_UP2DT(rxdp)) {
2986 * If your are next to put index then it's
2987 * FIFO full condition
2989 if ((get_block == put_block) &&
2990 (get_info.offset + 1) == put_info.offset) {
2991 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2994 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2996 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2998 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2999 spin_unlock(&nic->rx_lock);
3002 if (nic->rxd_mode == RXD_MODE_1) {
3003 rxdp1 = (struct RxD1*)rxdp;
3004 pci_unmap_single(nic->pdev, (dma_addr_t)
3007 HEADER_ETHERNET_II_802_3_SIZE +
3010 PCI_DMA_FROMDEVICE);
3011 } else if (nic->rxd_mode == RXD_MODE_3B) {
3012 rxdp3 = (struct RxD3*)rxdp;
3013 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
3015 BUF0_LEN, PCI_DMA_FROMDEVICE);
3016 pci_unmap_single(nic->pdev, (dma_addr_t)
3019 PCI_DMA_FROMDEVICE);
3021 prefetch(skb->data);
3022 rx_osm_handler(ring_data, rxdp);
3024 ring_data->rx_curr_get_info.offset = get_info.offset;
3025 rxdp = ring_data->rx_blocks[get_block].
3026 rxds[get_info.offset].virt_addr;
3027 if (get_info.offset == rxd_count[nic->rxd_mode]) {
3028 get_info.offset = 0;
3029 ring_data->rx_curr_get_info.offset = get_info.offset;
3031 if (get_block == ring_data->block_count)
3033 ring_data->rx_curr_get_info.block_index = get_block;
3034 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3037 nic->pkts_to_process -= 1;
3038 if ((napi) && (!nic->pkts_to_process))
3041 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3045 /* Clear all LRO sessions before exiting */
3046 for (i=0; i<MAX_LRO_SESSIONS; i++) {
3047 struct lro *lro = &nic->lro0_n[i];
3049 update_L3L4_header(nic, lro);
3050 queue_rx_frame(lro->parent);
3051 clear_lro_session(lro);
3056 spin_unlock(&nic->rx_lock);
3060 * tx_intr_handler - Transmit interrupt handler
3061 * @nic : device private variable
3063 * If an interrupt was raised to indicate DMA complete of the
3064 * Tx packet, this function is called. It identifies the last TxD
3065 * whose buffer was freed and frees all skbs whose data have already
3066 * DMA'ed into the NICs internal memory.
3071 static void tx_intr_handler(struct fifo_info *fifo_data)
3073 struct s2io_nic *nic = fifo_data->nic;
3074 struct tx_curr_get_info get_info, put_info;
3075 struct sk_buff *skb = NULL;
3078 unsigned long flags = 0;
3081 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3084 get_info = fifo_data->tx_curr_get_info;
3085 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3086 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
3088 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3089 (get_info.offset != put_info.offset) &&
3090 (txdlp->Host_Control)) {
3091 /* Check for TxD errors */
3092 if (txdlp->Control_1 & TXD_T_CODE) {
3093 unsigned long long err;
3094 err = txdlp->Control_1 & TXD_T_CODE;
3096 nic->mac_control.stats_info->sw_stat.
3100 /* update t_code statistics */
3101 err_mask = err >> 48;
3104 nic->mac_control.stats_info->sw_stat.
3109 nic->mac_control.stats_info->sw_stat.
3110 tx_desc_abort_cnt++;
3114 nic->mac_control.stats_info->sw_stat.
3115 tx_parity_err_cnt++;
3119 nic->mac_control.stats_info->sw_stat.
3124 nic->mac_control.stats_info->sw_stat.
3125 tx_list_proc_err_cnt++;
3130 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
3132 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3133 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3135 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3140 /* Updating the statistics block */
3141 nic->stats.tx_bytes += skb->len;
3142 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
3143 dev_kfree_skb_irq(skb);
3146 if (get_info.offset == get_info.fifo_len + 1)
3147 get_info.offset = 0;
3148 txdlp = (struct TxD *) fifo_data->list_info
3149 [get_info.offset].list_virt_addr;
3150 fifo_data->tx_curr_get_info.offset =
3154 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3156 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3160 * s2io_mdio_write - Function to write in to MDIO registers
3161 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3162 * @addr : address value
3163 * @value : data value
3164 * @dev : pointer to net_device structure
3166 * This function is used to write values to the MDIO registers
3169 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3172 struct s2io_nic *sp = dev->priv;
3173 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3175 //address transaction
3176 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3177 | MDIO_MMD_DEV_ADDR(mmd_type)
3178 | MDIO_MMS_PRT_ADDR(0x0);
3179 writeq(val64, &bar0->mdio_control);
3180 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3181 writeq(val64, &bar0->mdio_control);
3186 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3187 | MDIO_MMD_DEV_ADDR(mmd_type)
3188 | MDIO_MMS_PRT_ADDR(0x0)
3189 | MDIO_MDIO_DATA(value)
3190 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3191 writeq(val64, &bar0->mdio_control);
3192 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3193 writeq(val64, &bar0->mdio_control);
3197 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3198 | MDIO_MMD_DEV_ADDR(mmd_type)
3199 | MDIO_MMS_PRT_ADDR(0x0)
3200 | MDIO_OP(MDIO_OP_READ_TRANS);
3201 writeq(val64, &bar0->mdio_control);
3202 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3203 writeq(val64, &bar0->mdio_control);
3209 * s2io_mdio_read - Function to write in to MDIO registers
3210 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3211 * @addr : address value
3212 * @dev : pointer to net_device structure
3214 * This function is used to read values to the MDIO registers
3217 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3221 struct s2io_nic *sp = dev->priv;
3222 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3224 /* address transaction */
3225 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3226 | MDIO_MMD_DEV_ADDR(mmd_type)
3227 | MDIO_MMS_PRT_ADDR(0x0);
3228 writeq(val64, &bar0->mdio_control);
3229 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3230 writeq(val64, &bar0->mdio_control);
3233 /* Data transaction */
3235 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3236 | MDIO_MMD_DEV_ADDR(mmd_type)
3237 | MDIO_MMS_PRT_ADDR(0x0)
3238 | MDIO_OP(MDIO_OP_READ_TRANS);
3239 writeq(val64, &bar0->mdio_control);
3240 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3241 writeq(val64, &bar0->mdio_control);
3244 /* Read the value from regs */
3245 rval64 = readq(&bar0->mdio_control);
3246 rval64 = rval64 & 0xFFFF0000;
3247 rval64 = rval64 >> 16;
3251 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3252 * @counter : couter value to be updated
3253 * @flag : flag to indicate the status
3254 * @type : counter type
3256 * This function is to check the status of the xpak counters value
3260 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3265 for(i = 0; i <index; i++)
3270 *counter = *counter + 1;
3271 val64 = *regs_stat & mask;
3272 val64 = val64 >> (index * 0x2);
3279 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3280 "service. Excessive temperatures may "
3281 "result in premature transceiver "
3285 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3286 "service Excessive bias currents may "
3287 "indicate imminent laser diode "
3291 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3292 "service Excessive laser output "
3293 "power may saturate far-end "
3297 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3302 val64 = val64 << (index * 0x2);
3303 *regs_stat = (*regs_stat & (~mask)) | (val64);
3306 *regs_stat = *regs_stat & (~mask);
3311 * s2io_updt_xpak_counter - Function to update the xpak counters
3312 * @dev : pointer to net_device struct
3314 * This function is to upate the status of the xpak counters value
3317 static void s2io_updt_xpak_counter(struct net_device *dev)
3325 struct s2io_nic *sp = dev->priv;
3326 struct stat_block *stat_info = sp->mac_control.stats_info;
3328 /* Check the communication with the MDIO slave */
3331 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3332 if((val64 == 0xFFFF) || (val64 == 0x0000))
3334 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3335 "Returned %llx\n", (unsigned long long)val64);
3339 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3342 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3343 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3344 (unsigned long long)val64);
3348 /* Loading the DOM register to MDIO register */
3350 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3351 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3353 /* Reading the Alarm flags */
3356 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3358 flag = CHECKBIT(val64, 0x7);
3360 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3361 &stat_info->xpak_stat.xpak_regs_stat,
3364 if(CHECKBIT(val64, 0x6))
3365 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3367 flag = CHECKBIT(val64, 0x3);
3369 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3370 &stat_info->xpak_stat.xpak_regs_stat,
3373 if(CHECKBIT(val64, 0x2))
3374 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3376 flag = CHECKBIT(val64, 0x1);
3378 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3379 &stat_info->xpak_stat.xpak_regs_stat,
3382 if(CHECKBIT(val64, 0x0))
3383 stat_info->xpak_stat.alarm_laser_output_power_low++;
3385 /* Reading the Warning flags */
3388 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3390 if(CHECKBIT(val64, 0x7))
3391 stat_info->xpak_stat.warn_transceiver_temp_high++;
3393 if(CHECKBIT(val64, 0x6))
3394 stat_info->xpak_stat.warn_transceiver_temp_low++;
3396 if(CHECKBIT(val64, 0x3))
3397 stat_info->xpak_stat.warn_laser_bias_current_high++;
3399 if(CHECKBIT(val64, 0x2))
3400 stat_info->xpak_stat.warn_laser_bias_current_low++;
3402 if(CHECKBIT(val64, 0x1))
3403 stat_info->xpak_stat.warn_laser_output_power_high++;
3405 if(CHECKBIT(val64, 0x0))
3406 stat_info->xpak_stat.warn_laser_output_power_low++;
3410 * wait_for_cmd_complete - waits for a command to complete.
3411 * @sp : private member of the device structure, which is a pointer to the
3412 * s2io_nic structure.
3413 * Description: Function that waits for a command to Write into RMAC
3414 * ADDR DATA registers to be completed and returns either success or
3415 * error depending on whether the command was complete or not.
3417 * SUCCESS on success and FAILURE on failure.
3420 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3423 int ret = FAILURE, cnt = 0, delay = 1;
3426 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3430 val64 = readq(addr);
3431 if (bit_state == S2IO_BIT_RESET) {
3432 if (!(val64 & busy_bit)) {
3437 if (!(val64 & busy_bit)) {
3454 * check_pci_device_id - Checks if the device id is supported
3456 * Description: Function to check if the pci device id is supported by driver.
3457 * Return value: Actual device id if supported else PCI_ANY_ID
3459 static u16 check_pci_device_id(u16 id)
3462 case PCI_DEVICE_ID_HERC_WIN:
3463 case PCI_DEVICE_ID_HERC_UNI:
3464 return XFRAME_II_DEVICE;
3465 case PCI_DEVICE_ID_S2IO_UNI:
3466 case PCI_DEVICE_ID_S2IO_WIN:
3467 return XFRAME_I_DEVICE;
3474 * s2io_reset - Resets the card.
3475 * @sp : private member of the device structure.
3476 * Description: Function to Reset the card. This function then also
3477 * restores the previously saved PCI configuration space registers as
3478 * the card reset also resets the configuration space.
3483 static void s2io_reset(struct s2io_nic * sp)
3485 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3490 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3491 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3493 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3494 __FUNCTION__, sp->dev->name);
3496 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3497 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3499 val64 = SW_RESET_ALL;
3500 writeq(val64, &bar0->sw_reset);
3501 if (strstr(sp->product_name, "CX4")) {
3505 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3507 /* Restore the PCI state saved during initialization. */
3508 pci_restore_state(sp->pdev);
3509 pci_read_config_word(sp->pdev, 0x2, &val16);
3510 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3515 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3516 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3519 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3523 /* Set swapper to enable I/O register access */
3524 s2io_set_swapper(sp);
3526 /* restore mac_addr entries */
3527 do_s2io_restore_unicast_mc(sp);
3529 /* Restore the MSIX table entries from local variables */
3530 restore_xmsi_data(sp);
3532 /* Clear certain PCI/PCI-X fields after reset */
3533 if (sp->device_type == XFRAME_II_DEVICE) {
3534 /* Clear "detected parity error" bit */
3535 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3537 /* Clearing PCIX Ecc status register */
3538 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3540 /* Clearing PCI_STATUS error reflected here */
3541 writeq(s2BIT(62), &bar0->txpic_int_reg);
3544 /* Reset device statistics maintained by OS */
3545 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3547 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3548 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3549 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3550 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
3551 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
3552 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3553 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3554 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3555 /* save link up/down time/cnt, reset/memory/watchdog cnt */
3556 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
3557 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3558 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3559 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3560 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3561 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
3562 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
3563 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3564 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3565 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
3567 /* SXE-002: Configure link and activity LED to turn it off */
3568 subid = sp->pdev->subsystem_device;
3569 if (((subid & 0xFF) >= 0x07) &&
3570 (sp->device_type == XFRAME_I_DEVICE)) {
3571 val64 = readq(&bar0->gpio_control);
3572 val64 |= 0x0000800000000000ULL;
3573 writeq(val64, &bar0->gpio_control);
3574 val64 = 0x0411040400000000ULL;
3575 writeq(val64, (void __iomem *)bar0 + 0x2700);
3579 * Clear spurious ECC interrupts that would have occured on
3580 * XFRAME II cards after reset.
3582 if (sp->device_type == XFRAME_II_DEVICE) {
3583 val64 = readq(&bar0->pcc_err_reg);
3584 writeq(val64, &bar0->pcc_err_reg);
3587 sp->device_enabled_once = FALSE;
3591 * s2io_set_swapper - to set the swapper controle on the card
3592 * @sp : private member of the device structure,
3593 * pointer to the s2io_nic structure.
3594 * Description: Function to set the swapper control on the card
3595 * correctly depending on the 'endianness' of the system.
3597 * SUCCESS on success and FAILURE on failure.
3600 static int s2io_set_swapper(struct s2io_nic * sp)
3602 struct net_device *dev = sp->dev;
3603 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3604 u64 val64, valt, valr;
3607 * Set proper endian settings and verify the same by reading
3608 * the PIF Feed-back register.
3611 val64 = readq(&bar0->pif_rd_swapper_fb);
3612 if (val64 != 0x0123456789ABCDEFULL) {
3614 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3615 0x8100008181000081ULL, /* FE=1, SE=0 */
3616 0x4200004242000042ULL, /* FE=0, SE=1 */
3617 0}; /* FE=0, SE=0 */
3620 writeq(value[i], &bar0->swapper_ctrl);
3621 val64 = readq(&bar0->pif_rd_swapper_fb);
3622 if (val64 == 0x0123456789ABCDEFULL)
3627 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3629 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3630 (unsigned long long) val64);
3635 valr = readq(&bar0->swapper_ctrl);
3638 valt = 0x0123456789ABCDEFULL;
3639 writeq(valt, &bar0->xmsi_address);
3640 val64 = readq(&bar0->xmsi_address);
3644 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3645 0x0081810000818100ULL, /* FE=1, SE=0 */
3646 0x0042420000424200ULL, /* FE=0, SE=1 */
3647 0}; /* FE=0, SE=0 */
3650 writeq((value[i] | valr), &bar0->swapper_ctrl);
3651 writeq(valt, &bar0->xmsi_address);
3652 val64 = readq(&bar0->xmsi_address);
3658 unsigned long long x = val64;
3659 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3660 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3664 val64 = readq(&bar0->swapper_ctrl);
3665 val64 &= 0xFFFF000000000000ULL;
3669 * The device by default set to a big endian format, so a
3670 * big endian driver need not set anything.
3672 val64 |= (SWAPPER_CTRL_TXP_FE |
3673 SWAPPER_CTRL_TXP_SE |
3674 SWAPPER_CTRL_TXD_R_FE |
3675 SWAPPER_CTRL_TXD_W_FE |
3676 SWAPPER_CTRL_TXF_R_FE |
3677 SWAPPER_CTRL_RXD_R_FE |
3678 SWAPPER_CTRL_RXD_W_FE |
3679 SWAPPER_CTRL_RXF_W_FE |
3680 SWAPPER_CTRL_XMSI_FE |
3681 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3682 if (sp->config.intr_type == INTA)
3683 val64 |= SWAPPER_CTRL_XMSI_SE;
3684 writeq(val64, &bar0->swapper_ctrl);
3687 * Initially we enable all bits to make it accessible by the
3688 * driver, then we selectively enable only those bits that
3691 val64 |= (SWAPPER_CTRL_TXP_FE |
3692 SWAPPER_CTRL_TXP_SE |
3693 SWAPPER_CTRL_TXD_R_FE |
3694 SWAPPER_CTRL_TXD_R_SE |
3695 SWAPPER_CTRL_TXD_W_FE |
3696 SWAPPER_CTRL_TXD_W_SE |
3697 SWAPPER_CTRL_TXF_R_FE |
3698 SWAPPER_CTRL_RXD_R_FE |
3699 SWAPPER_CTRL_RXD_R_SE |
3700 SWAPPER_CTRL_RXD_W_FE |
3701 SWAPPER_CTRL_RXD_W_SE |
3702 SWAPPER_CTRL_RXF_W_FE |
3703 SWAPPER_CTRL_XMSI_FE |
3704 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3705 if (sp->config.intr_type == INTA)
3706 val64 |= SWAPPER_CTRL_XMSI_SE;
3707 writeq(val64, &bar0->swapper_ctrl);
3709 val64 = readq(&bar0->swapper_ctrl);
3712 * Verifying if endian settings are accurate by reading a
3713 * feedback register.
3715 val64 = readq(&bar0->pif_rd_swapper_fb);
3716 if (val64 != 0x0123456789ABCDEFULL) {
3717 /* Endian settings are incorrect, calls for another dekko. */
3718 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3720 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3721 (unsigned long long) val64);
3728 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3730 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3732 int ret = 0, cnt = 0;
3735 val64 = readq(&bar0->xmsi_access);
3736 if (!(val64 & s2BIT(15)))
3742 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3749 static void restore_xmsi_data(struct s2io_nic *nic)
3751 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3755 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3756 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3757 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3758 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
3759 writeq(val64, &bar0->xmsi_access);
3760 if (wait_for_msix_trans(nic, i)) {
3761 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3767 static void store_xmsi_data(struct s2io_nic *nic)
3769 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3770 u64 val64, addr, data;
3773 /* Store and display */
3774 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3775 val64 = (s2BIT(15) | vBIT(i, 26, 6));
3776 writeq(val64, &bar0->xmsi_access);
3777 if (wait_for_msix_trans(nic, i)) {
3778 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3781 addr = readq(&bar0->xmsi_address);
3782 data = readq(&bar0->xmsi_data);
3784 nic->msix_info[i].addr = addr;
3785 nic->msix_info[i].data = data;
3790 static int s2io_enable_msi_x(struct s2io_nic *nic)
3792 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3794 u16 msi_control; /* Temp variable */
3795 int ret, i, j, msix_indx = 1;
3797 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
3799 if (!nic->entries) {
3800 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3802 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3805 nic->mac_control.stats_info->sw_stat.mem_allocated
3806 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3809 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
3811 if (!nic->s2io_entries) {
3812 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
3814 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
3815 kfree(nic->entries);
3816 nic->mac_control.stats_info->sw_stat.mem_freed
3817 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3820 nic->mac_control.stats_info->sw_stat.mem_allocated
3821 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3823 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3824 nic->entries[i].entry = i;
3825 nic->s2io_entries[i].entry = i;
3826 nic->s2io_entries[i].arg = NULL;
3827 nic->s2io_entries[i].in_use = 0;
3830 tx_mat = readq(&bar0->tx_mat0_n[0]);
3831 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3832 tx_mat |= TX_MAT_SET(i, msix_indx);
3833 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3834 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3835 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3837 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3839 rx_mat = readq(&bar0->rx_mat);
3840 for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3841 rx_mat |= RX_MAT_SET(j, msix_indx);
3842 nic->s2io_entries[msix_indx].arg
3843 = &nic->mac_control.rings[j];
3844 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3845 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3847 writeq(rx_mat, &bar0->rx_mat);
3849 nic->avail_msix_vectors = 0;
3850 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3851 /* We fail init if error or we get less vectors than min required */
3852 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3853 nic->avail_msix_vectors = ret;
3854 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3857 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3858 kfree(nic->entries);
3859 nic->mac_control.stats_info->sw_stat.mem_freed
3860 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3861 kfree(nic->s2io_entries);
3862 nic->mac_control.stats_info->sw_stat.mem_freed
3863 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3864 nic->entries = NULL;
3865 nic->s2io_entries = NULL;
3866 nic->avail_msix_vectors = 0;
3869 if (!nic->avail_msix_vectors)
3870 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3873 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3874 * in the herc NIC. (Temp change, needs to be removed later)
3876 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3877 msi_control |= 0x1; /* Enable MSI */
3878 pci_write_config_word(nic->pdev, 0x42, msi_control);
3883 /* Handle software interrupt used during MSI(X) test */
3884 static irqreturn_t s2io_test_intr(int irq, void *dev_id)
3886 struct s2io_nic *sp = dev_id;
3888 sp->msi_detected = 1;
3889 wake_up(&sp->msi_wait);
3894 /* Test interrupt path by forcing a a software IRQ */
3895 static int s2io_test_msi(struct s2io_nic *sp)
3897 struct pci_dev *pdev = sp->pdev;
3898 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3902 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3905 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3906 sp->dev->name, pci_name(pdev), pdev->irq);
3910 init_waitqueue_head (&sp->msi_wait);
3911 sp->msi_detected = 0;
3913 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3914 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3915 val64 |= SCHED_INT_CTRL_TIMER_EN;
3916 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3917 writeq(val64, &bar0->scheduled_int_ctrl);
3919 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3921 if (!sp->msi_detected) {
3922 /* MSI(X) test failed, go back to INTx mode */
3923 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
3924 "using MSI(X) during test\n", sp->dev->name,
3930 free_irq(sp->entries[1].vector, sp);
3932 writeq(saved64, &bar0->scheduled_int_ctrl);
3937 static void remove_msix_isr(struct s2io_nic *sp)
3942 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3943 if (sp->s2io_entries[i].in_use ==
3944 MSIX_REGISTERED_SUCCESS) {
3945 int vector = sp->entries[i].vector;
3946 void *arg = sp->s2io_entries[i].arg;
3947 free_irq(vector, arg);
3952 kfree(sp->s2io_entries);
3954 sp->s2io_entries = NULL;
3956 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3957 msi_control &= 0xFFFE; /* Disable MSI */
3958 pci_write_config_word(sp->pdev, 0x42, msi_control);
3960 pci_disable_msix(sp->pdev);
3963 static void remove_inta_isr(struct s2io_nic *sp)
3965 struct net_device *dev = sp->dev;
3967 free_irq(sp->pdev->irq, dev);
3970 /* ********************************************************* *
3971 * Functions defined below concern the OS part of the driver *
3972 * ********************************************************* */
3975 * s2io_open - open entry point of the driver
3976 * @dev : pointer to the device structure.
3978 * This function is the open entry point of the driver. It mainly calls a
3979 * function to allocate Rx buffers and inserts them into the buffer
3980 * descriptors and then enables the Rx part of the NIC.
3982 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3986 static int s2io_open(struct net_device *dev)
3988 struct s2io_nic *sp = dev->priv;
3992 * Make sure you have link off by default every time
3993 * Nic is initialized
3995 netif_carrier_off(dev);
3996 sp->last_link_state = 0;
3998 if (sp->config.intr_type == MSI_X) {
3999 int ret = s2io_enable_msi_x(sp);
4002 ret = s2io_test_msi(sp);
4003 /* rollback MSI-X, will re-enable during add_isr() */
4004 remove_msix_isr(sp);
4009 "%s: MSI-X requested but failed to enable\n",
4011 sp->config.intr_type = INTA;
4015 /* NAPI doesn't work well with MSI(X) */
4016 if (sp->config.intr_type != INTA) {
4018 sp->config.napi = 0;
4021 /* Initialize H/W and enable interrupts */
4022 err = s2io_card_up(sp);
4024 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4026 goto hw_init_failed;
4029 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
4030 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
4033 goto hw_init_failed;
4035 s2io_start_all_tx_queue(sp);
4039 if (sp->config.intr_type == MSI_X) {
4042 sp->mac_control.stats_info->sw_stat.mem_freed
4043 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
4045 if (sp->s2io_entries) {
4046 kfree(sp->s2io_entries);
4047 sp->mac_control.stats_info->sw_stat.mem_freed
4048 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
4055 * s2io_close -close entry point of the driver
4056 * @dev : device pointer.
4058 * This is the stop entry point of the driver. It needs to undo exactly
4059 * whatever was done by the open entry point,thus it's usually referred to
4060 * as the close function.Among other things this function mainly stops the
4061 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4063 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4067 static int s2io_close(struct net_device *dev)
4069 struct s2io_nic *sp = dev->priv;
4070 struct config_param *config = &sp->config;
4074 /* Return if the device is already closed *
4075 * Can happen when s2io_card_up failed in change_mtu *
4077 if (!is_s2io_card_up(sp))
4080 s2io_stop_all_tx_queue(sp);
4081 /* delete all populated mac entries */
4082 for (offset = 1; offset < config->max_mc_addr; offset++) {
4083 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4084 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4085 do_s2io_delete_unicast_mc(sp, tmp64);
4088 /* Reset card, kill tasklet and free Tx and Rx buffers. */
4095 * s2io_xmit - Tx entry point of te driver
4096 * @skb : the socket buffer containing the Tx data.
4097 * @dev : device pointer.
4099 * This function is the Tx entry point of the driver. S2IO NIC supports
4100 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4101 * NOTE: when device cant queue the pkt,just the trans_start variable will
4104 * 0 on success & 1 on failure.
4107 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4109 struct s2io_nic *sp = dev->priv;
4110 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4113 struct TxFIFO_element __iomem *tx_fifo;
4114 unsigned long flags = 0;
4116 struct fifo_info *fifo = NULL;
4117 struct mac_info *mac_control;
4118 struct config_param *config;
4119 int do_spin_lock = 1;
4121 int enable_per_list_interrupt = 0;
4122 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4124 mac_control = &sp->mac_control;
4125 config = &sp->config;
4127 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
4129 if (unlikely(skb->len <= 0)) {
4130 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4131 dev_kfree_skb_any(skb);
4135 if (!is_s2io_card_up(sp)) {
4136 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4143 if (sp->vlgrp && vlan_tx_tag_present(skb))
4144 vlan_tag = vlan_tx_tag_get(skb);
4145 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4146 if (skb->protocol == htons(ETH_P_IP)) {
4151 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4152 th = (struct tcphdr *)(((unsigned char *)ip) +
4155 if (ip->protocol == IPPROTO_TCP) {
4156 queue_len = sp->total_tcp_fifos;
4157 queue = (ntohs(th->source) +
4159 sp->fifo_selector[queue_len - 1];
4160 if (queue >= queue_len)
4161 queue = queue_len - 1;
4162 } else if (ip->protocol == IPPROTO_UDP) {
4163 queue_len = sp->total_udp_fifos;
4164 queue = (ntohs(th->source) +
4166 sp->fifo_selector[queue_len - 1];
4167 if (queue >= queue_len)
4168 queue = queue_len - 1;
4169 queue += sp->udp_fifo_idx;
4170 if (skb->len > 1024)
4171 enable_per_list_interrupt = 1;
4176 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4177 /* get fifo number based on skb->priority value */
4178 queue = config->fifo_mapping
4179 [skb->priority & (MAX_TX_FIFOS - 1)];
4180 fifo = &mac_control->fifos[queue];
4183 spin_lock_irqsave(&fifo->tx_lock, flags);
4185 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4186 return NETDEV_TX_LOCKED;
4189 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
4190 if (sp->config.multiq) {
4191 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4192 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4193 return NETDEV_TX_BUSY;
4197 if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4198 if (netif_queue_stopped(dev)) {
4199 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4200 return NETDEV_TX_BUSY;
4204 put_off = (u16) fifo->tx_curr_put_info.offset;
4205 get_off = (u16) fifo->tx_curr_get_info.offset;
4206 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
4208 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
4209 /* Avoid "put" pointer going beyond "get" pointer */
4210 if (txdp->Host_Control ||
4211 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4212 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4213 s2io_stop_tx_queue(sp, fifo->fifo_no);
4215 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4219 offload_type = s2io_offload_type(skb);
4220 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
4221 txdp->Control_1 |= TXD_TCP_LSO_EN;
4222 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
4224 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4226 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4229 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4230 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4231 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4232 if (enable_per_list_interrupt)
4233 if (put_off & (queue_len >> 5))
4234 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4236 txdp->Control_2 |= TXD_VLAN_ENABLE;
4237 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4240 frg_len = skb->len - skb->data_len;
4241 if (offload_type == SKB_GSO_UDP) {
4244 ufo_size = s2io_udp_mss(skb);
4246 txdp->Control_1 |= TXD_UFO_EN;
4247 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4248 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4250 fifo->ufo_in_band_v[put_off] =
4251 (u64)skb_shinfo(skb)->ip6_frag_id;
4253 fifo->ufo_in_band_v[put_off] =
4254 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4256 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4257 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
4258 fifo->ufo_in_band_v,
4259 sizeof(u64), PCI_DMA_TODEVICE);
4260 if((txdp->Buffer_Pointer == 0) ||
4261 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4262 goto pci_map_failed;
4266 txdp->Buffer_Pointer = pci_map_single
4267 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
4268 if((txdp->Buffer_Pointer == 0) ||
4269 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4270 goto pci_map_failed;
4272 txdp->Host_Control = (unsigned long) skb;
4273 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
4274 if (offload_type == SKB_GSO_UDP)
4275 txdp->Control_1 |= TXD_UFO_EN;
4277 frg_cnt = skb_shinfo(skb)->nr_frags;
4278 /* For fragmented SKB. */
4279 for (i = 0; i < frg_cnt; i++) {
4280 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4281 /* A '0' length fragment will be ignored */
4285 txdp->Buffer_Pointer = (u64) pci_map_page
4286 (sp->pdev, frag->page, frag->page_offset,
4287 frag->size, PCI_DMA_TODEVICE);
4288 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
4289 if (offload_type == SKB_GSO_UDP)
4290 txdp->Control_1 |= TXD_UFO_EN;
4292 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4294 if (offload_type == SKB_GSO_UDP)
4295 frg_cnt++; /* as Txd0 was used for inband header */
4297 tx_fifo = mac_control->tx_FIFO_start[queue];
4298 val64 = fifo->list_info[put_off].list_phy_addr;
4299 writeq(val64, &tx_fifo->TxDL_Pointer);
4301 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4304 val64 |= TX_FIFO_SPECIAL_FUNC;
4306 writeq(val64, &tx_fifo->List_Control);
4311 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
4313 fifo->tx_curr_put_info.offset = put_off;
4315 /* Avoid "put" pointer going beyond "get" pointer */
4316 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4317 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
4319 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4321 s2io_stop_tx_queue(sp, fifo->fifo_no);
4323 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4324 dev->trans_start = jiffies;
4325 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4329 stats->pci_map_fail_cnt++;
4330 s2io_stop_tx_queue(sp, fifo->fifo_no);
4331 stats->mem_freed += skb->truesize;
4333 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4338 s2io_alarm_handle(unsigned long data)
4340 struct s2io_nic *sp = (struct s2io_nic *)data;
4341 struct net_device *dev = sp->dev;
4343 s2io_handle_errors(dev);
4344 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4347 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
4349 int rxb_size, level;
4352 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4353 level = rx_buffer_level(sp, rxb_size, rng_n);
4355 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4357 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4358 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4359 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4360 DBG_PRINT(INFO_DBG, "Out of memory in %s",
4362 clear_bit(0, (&sp->tasklet_status));
4365 clear_bit(0, (&sp->tasklet_status));
4366 } else if (level == LOW)
4367 tasklet_schedule(&sp->task);
4369 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4370 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4371 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
4376 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4378 struct ring_info *ring = (struct ring_info *)dev_id;
4379 struct s2io_nic *sp = ring->nic;
4381 if (!is_s2io_card_up(sp))
4384 rx_intr_handler(ring);
4385 s2io_chk_rx_buffers(sp, ring->ring_no);
4390 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4392 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4393 struct s2io_nic *sp = fifo->nic;
4395 if (!is_s2io_card_up(sp))
4398 tx_intr_handler(fifo);
4401 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4403 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4406 val64 = readq(&bar0->pic_int_status);
4407 if (val64 & PIC_INT_GPIO) {
4408 val64 = readq(&bar0->gpio_int_reg);
4409 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4410 (val64 & GPIO_INT_REG_LINK_UP)) {
4412 * This is unstable state so clear both up/down
4413 * interrupt and adapter to re-evaluate the link state.
4415 val64 |= GPIO_INT_REG_LINK_DOWN;
4416 val64 |= GPIO_INT_REG_LINK_UP;
4417 writeq(val64, &bar0->gpio_int_reg);
4418 val64 = readq(&bar0->gpio_int_mask);
4419 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4420 GPIO_INT_MASK_LINK_DOWN);
4421 writeq(val64, &bar0->gpio_int_mask);
4423 else if (val64 & GPIO_INT_REG_LINK_UP) {
4424 val64 = readq(&bar0->adapter_status);
4425 /* Enable Adapter */
4426 val64 = readq(&bar0->adapter_control);
4427 val64 |= ADAPTER_CNTL_EN;
4428 writeq(val64, &bar0->adapter_control);
4429 val64 |= ADAPTER_LED_ON;
4430 writeq(val64, &bar0->adapter_control);
4431 if (!sp->device_enabled_once)
4432 sp->device_enabled_once = 1;
4434 s2io_link(sp, LINK_UP);
4436 * unmask link down interrupt and mask link-up
4439 val64 = readq(&bar0->gpio_int_mask);
4440 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4441 val64 |= GPIO_INT_MASK_LINK_UP;
4442 writeq(val64, &bar0->gpio_int_mask);
4444 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4445 val64 = readq(&bar0->adapter_status);
4446 s2io_link(sp, LINK_DOWN);
4447 /* Link is down so unmaks link up interrupt */
4448 val64 = readq(&bar0->gpio_int_mask);
4449 val64 &= ~GPIO_INT_MASK_LINK_UP;
4450 val64 |= GPIO_INT_MASK_LINK_DOWN;
4451 writeq(val64, &bar0->gpio_int_mask);
4454 val64 = readq(&bar0->adapter_control);
4455 val64 = val64 &(~ADAPTER_LED_ON);
4456 writeq(val64, &bar0->adapter_control);
4459 val64 = readq(&bar0->gpio_int_mask);
4463 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4464 * @value: alarm bits
4465 * @addr: address value
4466 * @cnt: counter variable
4467 * Description: Check for alarm and increment the counter
4469 * 1 - if alarm bit set
4470 * 0 - if alarm bit is not set
4472 static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
4473 unsigned long long *cnt)
4476 val64 = readq(addr);
4477 if ( val64 & value ) {
4478 writeq(val64, addr);
4487 * s2io_handle_errors - Xframe error indication handler
4488 * @nic: device private variable
4489 * Description: Handle alarms such as loss of link, single or
4490 * double ECC errors, critical and serious errors.
4494 static void s2io_handle_errors(void * dev_id)
4496 struct net_device *dev = (struct net_device *) dev_id;
4497 struct s2io_nic *sp = dev->priv;
4498 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4499 u64 temp64 = 0,val64=0;
4502 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4503 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4505 if (!is_s2io_card_up(sp))
4508 if (pci_channel_offline(sp->pdev))
4511 memset(&sw_stat->ring_full_cnt, 0,
4512 sizeof(sw_stat->ring_full_cnt));
4514 /* Handling the XPAK counters update */
4515 if(stats->xpak_timer_count < 72000) {
4516 /* waiting for an hour */
4517 stats->xpak_timer_count++;
4519 s2io_updt_xpak_counter(dev);
4520 /* reset the count to zero */
4521 stats->xpak_timer_count = 0;
4524 /* Handling link status change error Intr */
4525 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4526 val64 = readq(&bar0->mac_rmac_err_reg);
4527 writeq(val64, &bar0->mac_rmac_err_reg);
4528 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4529 schedule_work(&sp->set_link_task);
4532 /* In case of a serious error, the device will be Reset. */
4533 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4534 &sw_stat->serious_err_cnt))
4537 /* Check for data parity error */
4538 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4539 &sw_stat->parity_err_cnt))
4542 /* Check for ring full counter */
4543 if (sp->device_type == XFRAME_II_DEVICE) {
4544 val64 = readq(&bar0->ring_bump_counter1);
4545 for (i=0; i<4; i++) {
4546 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4547 temp64 >>= 64 - ((i+1)*16);
4548 sw_stat->ring_full_cnt[i] += temp64;
4551 val64 = readq(&bar0->ring_bump_counter2);
4552 for (i=0; i<4; i++) {
4553 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4554 temp64 >>= 64 - ((i+1)*16);
4555 sw_stat->ring_full_cnt[i+4] += temp64;
4559 val64 = readq(&bar0->txdma_int_status);
4560 /*check for pfc_err*/
4561 if (val64 & TXDMA_PFC_INT) {
4562 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4563 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4564 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4565 &sw_stat->pfc_err_cnt))
4567 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4568 &sw_stat->pfc_err_cnt);
4571 /*check for tda_err*/
4572 if (val64 & TXDMA_TDA_INT) {
4573 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4574 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4575 &sw_stat->tda_err_cnt))
4577 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4578 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4580 /*check for pcc_err*/
4581 if (val64 & TXDMA_PCC_INT) {
4582 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4583 | PCC_N_SERR | PCC_6_COF_OV_ERR
4584 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4585 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4586 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4587 &sw_stat->pcc_err_cnt))
4589 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4590 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4593 /*check for tti_err*/
4594 if (val64 & TXDMA_TTI_INT) {
4595 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4596 &sw_stat->tti_err_cnt))
4598 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4599 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4602 /*check for lso_err*/
4603 if (val64 & TXDMA_LSO_INT) {
4604 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4605 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4606 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4608 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4609 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4612 /*check for tpa_err*/
4613 if (val64 & TXDMA_TPA_INT) {
4614 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4615 &sw_stat->tpa_err_cnt))
4617 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4618 &sw_stat->tpa_err_cnt);
4621 /*check for sm_err*/
4622 if (val64 & TXDMA_SM_INT) {
4623 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4624 &sw_stat->sm_err_cnt))
4628 val64 = readq(&bar0->mac_int_status);
4629 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4630 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4631 &bar0->mac_tmac_err_reg,
4632 &sw_stat->mac_tmac_err_cnt))
4634 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4635 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4636 &bar0->mac_tmac_err_reg,
4637 &sw_stat->mac_tmac_err_cnt);
4640 val64 = readq(&bar0->xgxs_int_status);
4641 if (val64 & XGXS_INT_STATUS_TXGXS) {
4642 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4643 &bar0->xgxs_txgxs_err_reg,
4644 &sw_stat->xgxs_txgxs_err_cnt))
4646 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4647 &bar0->xgxs_txgxs_err_reg,
4648 &sw_stat->xgxs_txgxs_err_cnt);
4651 val64 = readq(&bar0->rxdma_int_status);
4652 if (val64 & RXDMA_INT_RC_INT_M) {
4653 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4654 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4655 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4657 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4658 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4659 &sw_stat->rc_err_cnt);
4660 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4661 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4662 &sw_stat->prc_pcix_err_cnt))
4664 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4665 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4666 &sw_stat->prc_pcix_err_cnt);
4669 if (val64 & RXDMA_INT_RPA_INT_M) {
4670 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4671 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4673 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4674 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4677 if (val64 & RXDMA_INT_RDA_INT_M) {
4678 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4679 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4680 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4681 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4683 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4684 | RDA_MISC_ERR | RDA_PCIX_ERR,
4685 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4688 if (val64 & RXDMA_INT_RTI_INT_M) {
4689 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4690 &sw_stat->rti_err_cnt))
4692 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4693 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4696 val64 = readq(&bar0->mac_int_status);
4697 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4698 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4699 &bar0->mac_rmac_err_reg,
4700 &sw_stat->mac_rmac_err_cnt))
4702 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4703 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4704 &sw_stat->mac_rmac_err_cnt);
4707 val64 = readq(&bar0->xgxs_int_status);
4708 if (val64 & XGXS_INT_STATUS_RXGXS) {
4709 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4710 &bar0->xgxs_rxgxs_err_reg,
4711 &sw_stat->xgxs_rxgxs_err_cnt))
4715 val64 = readq(&bar0->mc_int_status);
4716 if(val64 & MC_INT_STATUS_MC_INT) {
4717 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4718 &sw_stat->mc_err_cnt))
4721 /* Handling Ecc errors */
4722 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4723 writeq(val64, &bar0->mc_err_reg);
4724 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4725 sw_stat->double_ecc_errs++;
4726 if (sp->device_type != XFRAME_II_DEVICE) {
4728 * Reset XframeI only if critical error
4731 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4732 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4736 sw_stat->single_ecc_errs++;
4742 s2io_stop_all_tx_queue(sp);
4743 schedule_work(&sp->rst_timer_task);
4744 sw_stat->soft_reset_cnt++;
4749 * s2io_isr - ISR handler of the device .
4750 * @irq: the irq of the device.
4751 * @dev_id: a void pointer to the dev structure of the NIC.
4752 * Description: This function is the ISR handler of the device. It
4753 * identifies the reason for the interrupt and calls the relevant
4754 * service routines. As a contongency measure, this ISR allocates the
4755 * recv buffers, if their numbers are below the panic value which is
4756 * presently set to 25% of the original number of rcv buffers allocated.
4758 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4759 * IRQ_NONE: will be returned if interrupt is not from our device
4761 static irqreturn_t s2io_isr(int irq, void *dev_id)
4763 struct net_device *dev = (struct net_device *) dev_id;
4764 struct s2io_nic *sp = dev->priv;
4765 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4768 struct mac_info *mac_control;
4769 struct config_param *config;
4771 /* Pretend we handled any irq's from a disconnected card */
4772 if (pci_channel_offline(sp->pdev))
4775 if (!is_s2io_card_up(sp))
4778 mac_control = &sp->mac_control;
4779 config = &sp->config;
4782 * Identify the cause for interrupt and call the appropriate
4783 * interrupt handler. Causes for the interrupt could be;
4788 reason = readq(&bar0->general_int_status);
4790 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4791 /* Nothing much can be done. Get out */
4795 if (reason & (GEN_INTR_RXTRAFFIC |
4796 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4798 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4801 if (reason & GEN_INTR_RXTRAFFIC) {
4802 if (likely(netif_rx_schedule_prep(dev,
4804 __netif_rx_schedule(dev, &sp->napi);
4805 writeq(S2IO_MINUS_ONE,
4806 &bar0->rx_traffic_mask);
4808 writeq(S2IO_MINUS_ONE,
4809 &bar0->rx_traffic_int);
4813 * rx_traffic_int reg is an R1 register, writing all 1's
4814 * will ensure that the actual interrupt causing bit
4815 * get's cleared and hence a read can be avoided.
4817 if (reason & GEN_INTR_RXTRAFFIC)
4818 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4820 for (i = 0; i < config->rx_ring_num; i++)
4821 rx_intr_handler(&mac_control->rings[i]);
4825 * tx_traffic_int reg is an R1 register, writing all 1's
4826 * will ensure that the actual interrupt causing bit get's
4827 * cleared and hence a read can be avoided.
4829 if (reason & GEN_INTR_TXTRAFFIC)
4830 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4832 for (i = 0; i < config->tx_fifo_num; i++)
4833 tx_intr_handler(&mac_control->fifos[i]);
4835 if (reason & GEN_INTR_TXPIC)
4836 s2io_txpic_intr_handle(sp);
4839 * Reallocate the buffers from the interrupt handler itself.
4841 if (!config->napi) {
4842 for (i = 0; i < config->rx_ring_num; i++)
4843 s2io_chk_rx_buffers(sp, i);
4845 writeq(sp->general_int_mask, &bar0->general_int_mask);
4846 readl(&bar0->general_int_status);
4852 /* The interrupt was not raised by us */
4862 static void s2io_updt_stats(struct s2io_nic *sp)
4864 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4868 if (is_s2io_card_up(sp)) {
4869 /* Apprx 30us on a 133 MHz bus */
4870 val64 = SET_UPDT_CLICKS(10) |
4871 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4872 writeq(val64, &bar0->stat_cfg);
4875 val64 = readq(&bar0->stat_cfg);
4876 if (!(val64 & s2BIT(0)))
4880 break; /* Updt failed */
4886 * s2io_get_stats - Updates the device statistics structure.
4887 * @dev : pointer to the device structure.
4889 * This function updates the device statistics structure in the s2io_nic
4890 * structure and returns a pointer to the same.
4892 * pointer to the updated net_device_stats structure.
4895 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4897 struct s2io_nic *sp = dev->priv;
4898 struct mac_info *mac_control;
4899 struct config_param *config;
4902 mac_control = &sp->mac_control;
4903 config = &sp->config;
4905 /* Configure Stats for immediate updt */
4906 s2io_updt_stats(sp);
4908 sp->stats.tx_packets =
4909 le32_to_cpu(mac_control->stats_info->tmac_frms);
4910 sp->stats.tx_errors =
4911 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4912 sp->stats.rx_errors =
4913 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4914 sp->stats.multicast =
4915 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4916 sp->stats.rx_length_errors =
4917 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4919 return (&sp->stats);
4923 * s2io_set_multicast - entry point for multicast address enable/disable.
4924 * @dev : pointer to the device structure
4926 * This function is a driver entry point which gets called by the kernel
4927 * whenever multicast addresses must be enabled/disabled. This also gets
4928 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4929 * determine, if multicast address must be enabled or if promiscuous mode
4930 * is to be disabled etc.
4935 static void s2io_set_multicast(struct net_device *dev)
4938 struct dev_mc_list *mclist;
4939 struct s2io_nic *sp = dev->priv;
4940 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4941 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4943 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
4945 struct config_param *config = &sp->config;
4947 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4948 /* Enable all Multicast addresses */
4949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4950 &bar0->rmac_addr_data0_mem);
4951 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4952 &bar0->rmac_addr_data1_mem);
4953 val64 = RMAC_ADDR_CMD_MEM_WE |
4954 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4955 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
4956 writeq(val64, &bar0->rmac_addr_cmd_mem);
4957 /* Wait till command completes */
4958 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4959 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4963 sp->all_multi_pos = config->max_mc_addr - 1;
4964 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4965 /* Disable all Multicast addresses */
4966 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4967 &bar0->rmac_addr_data0_mem);
4968 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4969 &bar0->rmac_addr_data1_mem);
4970 val64 = RMAC_ADDR_CMD_MEM_WE |
4971 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4972 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4973 writeq(val64, &bar0->rmac_addr_cmd_mem);
4974 /* Wait till command completes */
4975 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4976 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4980 sp->all_multi_pos = 0;
4983 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4984 /* Put the NIC into promiscuous mode */
4985 add = &bar0->mac_cfg;
4986 val64 = readq(&bar0->mac_cfg);
4987 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4989 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4990 writel((u32) val64, add);
4991 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4992 writel((u32) (val64 >> 32), (add + 4));
4994 if (vlan_tag_strip != 1) {
4995 val64 = readq(&bar0->rx_pa_cfg);
4996 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4997 writeq(val64, &bar0->rx_pa_cfg);
4998 vlan_strip_flag = 0;
5001 val64 = readq(&bar0->mac_cfg);
5002 sp->promisc_flg = 1;
5003 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
5005 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5006 /* Remove the NIC from promiscuous mode */
5007 add = &bar0->mac_cfg;
5008 val64 = readq(&bar0->mac_cfg);
5009 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5011 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5012 writel((u32) val64, add);
5013 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5014 writel((u32) (val64 >> 32), (add + 4));
5016 if (vlan_tag_strip != 0) {
5017 val64 = readq(&bar0->rx_pa_cfg);
5018 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5019 writeq(val64, &bar0->rx_pa_cfg);
5020 vlan_strip_flag = 1;
5023 val64 = readq(&bar0->mac_cfg);
5024 sp->promisc_flg = 0;
5025 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
5029 /* Update individual M_CAST address list */
5030 if ((!sp->m_cast_flg) && dev->mc_count) {
5032 (config->max_mc_addr - config->max_mac_addr)) {
5033 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5035 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5036 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5040 prev_cnt = sp->mc_addr_count;
5041 sp->mc_addr_count = dev->mc_count;
5043 /* Clear out the previous list of Mc in the H/W. */
5044 for (i = 0; i < prev_cnt; i++) {
5045 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5046 &bar0->rmac_addr_data0_mem);
5047 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5048 &bar0->rmac_addr_data1_mem);
5049 val64 = RMAC_ADDR_CMD_MEM_WE |
5050 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5051 RMAC_ADDR_CMD_MEM_OFFSET
5052 (config->mc_start_offset + i);
5053 writeq(val64, &bar0->rmac_addr_cmd_mem);
5055 /* Wait for command completes */
5056 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5057 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5059 DBG_PRINT(ERR_DBG, "%s: Adding ",
5061 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5066 /* Create the new Rx filter list and update the same in H/W. */
5067 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5068 i++, mclist = mclist->next) {
5069 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5072 for (j = 0; j < ETH_ALEN; j++) {
5073 mac_addr |= mclist->dmi_addr[j];
5077 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5078 &bar0->rmac_addr_data0_mem);
5079 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
5080 &bar0->rmac_addr_data1_mem);
5081 val64 = RMAC_ADDR_CMD_MEM_WE |
5082 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5083 RMAC_ADDR_CMD_MEM_OFFSET
5084 (i + config->mc_start_offset);
5085 writeq(val64, &bar0->rmac_addr_cmd_mem);
5087 /* Wait for command completes */
5088 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5089 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5091 DBG_PRINT(ERR_DBG, "%s: Adding ",
5093 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5100 /* read from CAM unicast & multicast addresses and store it in
5101 * def_mac_addr structure
5103 void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5107 struct config_param *config = &sp->config;
5109 /* store unicast & multicast mac addresses */
5110 for (offset = 0; offset < config->max_mc_addr; offset++) {
5111 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5112 /* if read fails disable the entry */
5113 if (mac_addr == FAILURE)
5114 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5115 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5119 /* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5120 static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5123 struct config_param *config = &sp->config;
5124 /* restore unicast mac address */
5125 for (offset = 0; offset < config->max_mac_addr; offset++)
5126 do_s2io_prog_unicast(sp->dev,
5127 sp->def_mac_addr[offset].mac_addr);
5129 /* restore multicast mac address */
5130 for (offset = config->mc_start_offset;
5131 offset < config->max_mc_addr; offset++)
5132 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5135 /* add a multicast MAC address to CAM */
5136 static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5140 struct config_param *config = &sp->config;
5142 for (i = 0; i < ETH_ALEN; i++) {
5144 mac_addr |= addr[i];
5146 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5149 /* check if the multicast mac already preset in CAM */
5150 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5152 tmp64 = do_s2io_read_unicast_mc(sp, i);
5153 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5156 if (tmp64 == mac_addr)
5159 if (i == config->max_mc_addr) {
5161 "CAM full no space left for multicast MAC\n");
5164 /* Update the internal structure with this new mac address */
5165 do_s2io_copy_mac_addr(sp, i, mac_addr);
5167 return (do_s2io_add_mac(sp, mac_addr, i));
5170 /* add MAC address to CAM */
5171 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
5174 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5176 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5177 &bar0->rmac_addr_data0_mem);
5180 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5181 RMAC_ADDR_CMD_MEM_OFFSET(off);
5182 writeq(val64, &bar0->rmac_addr_cmd_mem);
5184 /* Wait till command completes */
5185 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5186 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5188 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
5193 /* deletes a specified unicast/multicast mac entry from CAM */
5194 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5197 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5198 struct config_param *config = &sp->config;
5201 offset < config->max_mc_addr; offset++) {
5202 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5203 if (tmp64 == addr) {
5204 /* disable the entry by writing 0xffffffffffffULL */
5205 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5207 /* store the new mac list from CAM */
5208 do_s2io_store_unicast_mc(sp);
5212 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5213 (unsigned long long)addr);
5217 /* read mac entries from CAM */
5218 static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5220 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5221 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5225 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5226 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5227 writeq(val64, &bar0->rmac_addr_cmd_mem);
5229 /* Wait till command completes */
5230 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5231 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5233 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5236 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5237 return (tmp64 >> 16);
5241 * s2io_set_mac_addr driver entry point
5244 static int s2io_set_mac_addr(struct net_device *dev, void *p)
5246 struct sockaddr *addr = p;
5248 if (!is_valid_ether_addr(addr->sa_data))
5251 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5253 /* store the MAC address in CAM */
5254 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5257 * do_s2io_prog_unicast - Programs the Xframe mac address
5258 * @dev : pointer to the device structure.
5259 * @addr: a uchar pointer to the new mac address which is to be set.
5260 * Description : This procedure will program the Xframe to receive
5261 * frames with new Mac Address
5262 * Return value: SUCCESS on success and an appropriate (-)ve integer
5263 * as defined in errno.h file on failure.
5266 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
5268 struct s2io_nic *sp = dev->priv;
5269 register u64 mac_addr = 0, perm_addr = 0;
5272 struct config_param *config = &sp->config;
5275 * Set the new MAC address as the new unicast filter and reflect this
5276 * change on the device address registered with the OS. It will be
5279 for (i = 0; i < ETH_ALEN; i++) {
5281 mac_addr |= addr[i];
5283 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
5286 /* check if the dev_addr is different than perm_addr */
5287 if (mac_addr == perm_addr)
5290 /* check if the mac already preset in CAM */
5291 for (i = 1; i < config->max_mac_addr; i++) {
5292 tmp64 = do_s2io_read_unicast_mc(sp, i);
5293 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5296 if (tmp64 == mac_addr) {
5298 "MAC addr:0x%llx already present in CAM\n",
5299 (unsigned long long)mac_addr);
5303 if (i == config->max_mac_addr) {
5304 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5307 /* Update the internal structure with this new mac address */
5308 do_s2io_copy_mac_addr(sp, i, mac_addr);
5309 return (do_s2io_add_mac(sp, mac_addr, i));
5313 * s2io_ethtool_sset - Sets different link parameters.
5314 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5315 * @info: pointer to the structure with parameters given by ethtool to set
5318 * The function sets different link parameters provided by the user onto
5324 static int s2io_ethtool_sset(struct net_device *dev,
5325 struct ethtool_cmd *info)
5327 struct s2io_nic *sp = dev->priv;
5328 if ((info->autoneg == AUTONEG_ENABLE) ||
5329 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5332 s2io_close(sp->dev);
5340 * s2io_ethtol_gset - Return link specific information.
5341 * @sp : private member of the device structure, pointer to the
5342 * s2io_nic structure.
5343 * @info : pointer to the structure with parameters given by ethtool
5344 * to return link information.
5346 * Returns link specific information like speed, duplex etc.. to ethtool.
5348 * return 0 on success.
5351 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5353 struct s2io_nic *sp = dev->priv;
5354 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5355 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5356 info->port = PORT_FIBRE;
5358 /* info->transceiver */
5359 info->transceiver = XCVR_EXTERNAL;
5361 if (netif_carrier_ok(sp->dev)) {
5362 info->speed = 10000;
5363 info->duplex = DUPLEX_FULL;
5369 info->autoneg = AUTONEG_DISABLE;
5374 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5375 * @sp : private member of the device structure, which is a pointer to the
5376 * s2io_nic structure.
5377 * @info : pointer to the structure with parameters given by ethtool to
5378 * return driver information.
5380 * Returns driver specefic information like name, version etc.. to ethtool.
5385 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5386 struct ethtool_drvinfo *info)
5388 struct s2io_nic *sp = dev->priv;
5390 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5391 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5392 strncpy(info->fw_version, "", sizeof(info->fw_version));
5393 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
5394 info->regdump_len = XENA_REG_SPACE;
5395 info->eedump_len = XENA_EEPROM_SPACE;
5399 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
5400 * @sp: private member of the device structure, which is a pointer to the
5401 * s2io_nic structure.
5402 * @regs : pointer to the structure with parameters given by ethtool for
5403 * dumping the registers.
5404 * @reg_space: The input argumnet into which all the registers are dumped.
5406 * Dumps the entire register space of xFrame NIC into the user given
5412 static void s2io_ethtool_gregs(struct net_device *dev,
5413 struct ethtool_regs *regs, void *space)
5417 u8 *reg_space = (u8 *) space;
5418 struct s2io_nic *sp = dev->priv;
5420 regs->len = XENA_REG_SPACE;
5421 regs->version = sp->pdev->subsystem_device;
5423 for (i = 0; i < regs->len; i += 8) {
5424 reg = readq(sp->bar0 + i);
5425 memcpy((reg_space + i), ®, 8);
5430 * s2io_phy_id - timer function that alternates adapter LED.
5431 * @data : address of the private member of the device structure, which
5432 * is a pointer to the s2io_nic structure, provided as an u32.
5433 * Description: This is actually the timer function that alternates the
5434 * adapter LED bit of the adapter control bit to set/reset every time on
5435 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
5436 * once every second.
5438 static void s2io_phy_id(unsigned long data)
5440 struct s2io_nic *sp = (struct s2io_nic *) data;
5441 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5445 subid = sp->pdev->subsystem_device;
5446 if ((sp->device_type == XFRAME_II_DEVICE) ||
5447 ((subid & 0xFF) >= 0x07)) {
5448 val64 = readq(&bar0->gpio_control);
5449 val64 ^= GPIO_CTRL_GPIO_0;
5450 writeq(val64, &bar0->gpio_control);
5452 val64 = readq(&bar0->adapter_control);
5453 val64 ^= ADAPTER_LED_ON;
5454 writeq(val64, &bar0->adapter_control);
5457 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5461 * s2io_ethtool_idnic - To physically identify the nic on the system.
5462 * @sp : private member of the device structure, which is a pointer to the
5463 * s2io_nic structure.
5464 * @id : pointer to the structure with identification parameters given by
5466 * Description: Used to physically identify the NIC on the system.
5467 * The Link LED will blink for a time specified by the user for
5469 * NOTE: The Link has to be Up to be able to blink the LED. Hence
5470 * identification is possible only if it's link is up.
5472 * int , returns 0 on success
5475 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5477 u64 val64 = 0, last_gpio_ctrl_val;
5478 struct s2io_nic *sp = dev->priv;
5479 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5482 subid = sp->pdev->subsystem_device;
5483 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5484 if ((sp->device_type == XFRAME_I_DEVICE) &&
5485 ((subid & 0xFF) < 0x07)) {
5486 val64 = readq(&bar0->adapter_control);
5487 if (!(val64 & ADAPTER_CNTL_EN)) {
5489 "Adapter Link down, cannot blink LED\n");
5493 if (sp->id_timer.function == NULL) {
5494 init_timer(&sp->id_timer);
5495 sp->id_timer.function = s2io_phy_id;
5496 sp->id_timer.data = (unsigned long) sp;
5498 mod_timer(&sp->id_timer, jiffies);
5500 msleep_interruptible(data * HZ);
5502 msleep_interruptible(MAX_FLICKER_TIME);
5503 del_timer_sync(&sp->id_timer);
5505 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
5506 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5507 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5513 static void s2io_ethtool_gringparam(struct net_device *dev,
5514 struct ethtool_ringparam *ering)
5516 struct s2io_nic *sp = dev->priv;
5517 int i,tx_desc_count=0,rx_desc_count=0;
5519 if (sp->rxd_mode == RXD_MODE_1)
5520 ering->rx_max_pending = MAX_RX_DESC_1;
5521 else if (sp->rxd_mode == RXD_MODE_3B)
5522 ering->rx_max_pending = MAX_RX_DESC_2;
5524 ering->tx_max_pending = MAX_TX_DESC;
5525 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5526 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5528 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5529 ering->tx_pending = tx_desc_count;
5531 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5532 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5534 ering->rx_pending = rx_desc_count;
5536 ering->rx_mini_max_pending = 0;
5537 ering->rx_mini_pending = 0;
5538 if(sp->rxd_mode == RXD_MODE_1)
5539 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5540 else if (sp->rxd_mode == RXD_MODE_3B)
5541 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5542 ering->rx_jumbo_pending = rx_desc_count;
5546 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
5547 * @sp : private member of the device structure, which is a pointer to the
5548 * s2io_nic structure.
5549 * @ep : pointer to the structure with pause parameters given by ethtool.
5551 * Returns the Pause frame generation and reception capability of the NIC.
5555 static void s2io_ethtool_getpause_data(struct net_device *dev,
5556 struct ethtool_pauseparam *ep)
5559 struct s2io_nic *sp = dev->priv;
5560 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5562 val64 = readq(&bar0->rmac_pause_cfg);
5563 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5564 ep->tx_pause = TRUE;
5565 if (val64 & RMAC_PAUSE_RX_ENABLE)
5566 ep->rx_pause = TRUE;
5567 ep->autoneg = FALSE;
5571 * s2io_ethtool_setpause_data - set/reset pause frame generation.
5572 * @sp : private member of the device structure, which is a pointer to the
5573 * s2io_nic structure.
5574 * @ep : pointer to the structure with pause parameters given by ethtool.
5576 * It can be used to set or reset Pause frame generation or reception
5577 * support of the NIC.
5579 * int, returns 0 on Success
5582 static int s2io_ethtool_setpause_data(struct net_device *dev,
5583 struct ethtool_pauseparam *ep)
5586 struct s2io_nic *sp = dev->priv;
5587 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5589 val64 = readq(&bar0->rmac_pause_cfg);
5591 val64 |= RMAC_PAUSE_GEN_ENABLE;
5593 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5595 val64 |= RMAC_PAUSE_RX_ENABLE;
5597 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5598 writeq(val64, &bar0->rmac_pause_cfg);
5603 * read_eeprom - reads 4 bytes of data from user given offset.
5604 * @sp : private member of the device structure, which is a pointer to the
5605 * s2io_nic structure.
5606 * @off : offset at which the data must be written
5607 * @data : Its an output parameter where the data read at the given
5610 * Will read 4 bytes of data from the user given offset and return the
5612 * NOTE: Will allow to read only part of the EEPROM visible through the
5615 * -1 on failure and 0 on success.
5618 #define S2IO_DEV_ID 5
5619 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
5624 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5626 if (sp->device_type == XFRAME_I_DEVICE) {
5627 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5628 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5629 I2C_CONTROL_CNTL_START;
5630 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5632 while (exit_cnt < 5) {
5633 val64 = readq(&bar0->i2c_control);
5634 if (I2C_CONTROL_CNTL_END(val64)) {
5635 *data = I2C_CONTROL_GET_DATA(val64);
5644 if (sp->device_type == XFRAME_II_DEVICE) {
5645 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5646 SPI_CONTROL_BYTECNT(0x3) |
5647 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5648 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5649 val64 |= SPI_CONTROL_REQ;
5650 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5651 while (exit_cnt < 5) {
5652 val64 = readq(&bar0->spi_control);
5653 if (val64 & SPI_CONTROL_NACK) {
5656 } else if (val64 & SPI_CONTROL_DONE) {
5657 *data = readq(&bar0->spi_data);
5670 * write_eeprom - actually writes the relevant part of the data value.
5671 * @sp : private member of the device structure, which is a pointer to the
5672 * s2io_nic structure.
5673 * @off : offset at which the data must be written
5674 * @data : The data that is to be written
5675 * @cnt : Number of bytes of the data that are actually to be written into
5676 * the Eeprom. (max of 3)
5678 * Actually writes the relevant part of the data value into the Eeprom
5679 * through the I2C bus.
5681 * 0 on success, -1 on failure.
5684 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
5686 int exit_cnt = 0, ret = -1;
5688 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5690 if (sp->device_type == XFRAME_I_DEVICE) {
5691 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5692 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5693 I2C_CONTROL_CNTL_START;
5694 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5696 while (exit_cnt < 5) {
5697 val64 = readq(&bar0->i2c_control);
5698 if (I2C_CONTROL_CNTL_END(val64)) {
5699 if (!(val64 & I2C_CONTROL_NACK))
5708 if (sp->device_type == XFRAME_II_DEVICE) {
5709 int write_cnt = (cnt == 8) ? 0 : cnt;
5710 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5712 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
5713 SPI_CONTROL_BYTECNT(write_cnt) |
5714 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5715 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5716 val64 |= SPI_CONTROL_REQ;
5717 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5718 while (exit_cnt < 5) {
5719 val64 = readq(&bar0->spi_control);
5720 if (val64 & SPI_CONTROL_NACK) {
5723 } else if (val64 & SPI_CONTROL_DONE) {
5733 static void s2io_vpd_read(struct s2io_nic *nic)
5737 int i=0, cnt, fail = 0;
5738 int vpd_addr = 0x80;
5740 if (nic->device_type == XFRAME_II_DEVICE) {
5741 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5745 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5748 strcpy(nic->serial_num, "NOT AVAILABLE");
5750 vpd_data = kmalloc(256, GFP_KERNEL);
5752 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
5755 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
5757 for (i = 0; i < 256; i +=4 ) {
5758 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5759 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5760 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5761 for (cnt = 0; cnt <5; cnt++) {
5763 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5768 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5772 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5773 (u32 *)&vpd_data[i]);
5777 /* read serial number of adapter */
5778 for (cnt = 0; cnt < 256; cnt++) {
5779 if ((vpd_data[cnt] == 'S') &&
5780 (vpd_data[cnt+1] == 'N') &&
5781 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5782 memset(nic->serial_num, 0, VPD_STRING_LEN);
5783 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5790 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
5791 memset(nic->product_name, 0, vpd_data[1]);
5792 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5795 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
5799 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5800 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5801 * @eeprom : pointer to the user level structure provided by ethtool,
5802 * containing all relevant information.
5803 * @data_buf : user defined value to be written into Eeprom.
5804 * Description: Reads the values stored in the Eeprom at given offset
5805 * for a given length. Stores these values int the input argument data
5806 * buffer 'data_buf' and returns these to the caller (ethtool.)
5811 static int s2io_ethtool_geeprom(struct net_device *dev,
5812 struct ethtool_eeprom *eeprom, u8 * data_buf)
5816 struct s2io_nic *sp = dev->priv;
5818 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5820 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5821 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5823 for (i = 0; i < eeprom->len; i += 4) {
5824 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5825 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5829 memcpy((data_buf + i), &valid, 4);
5835 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5836 * @sp : private member of the device structure, which is a pointer to the
5837 * s2io_nic structure.
5838 * @eeprom : pointer to the user level structure provided by ethtool,
5839 * containing all relevant information.
5840 * @data_buf ; user defined value to be written into Eeprom.
5842 * Tries to write the user provided value in the Eeprom, at the offset
5843 * given by the user.
5845 * 0 on success, -EFAULT on failure.
5848 static int s2io_ethtool_seeprom(struct net_device *dev,
5849 struct ethtool_eeprom *eeprom,
5852 int len = eeprom->len, cnt = 0;
5853 u64 valid = 0, data;
5854 struct s2io_nic *sp = dev->priv;
5856 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5858 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5859 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5865 data = (u32) data_buf[cnt] & 0x000000FF;
5867 valid = (u32) (data << 24);
5871 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5873 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5875 "write into the specified offset\n");
5886 * s2io_register_test - reads and writes into all clock domains.
5887 * @sp : private member of the device structure, which is a pointer to the
5888 * s2io_nic structure.
5889 * @data : variable that returns the result of each of the test conducted b
5892 * Read and write into all clock domains. The NIC has 3 clock domains,
5893 * see that registers in all the three regions are accessible.
5898 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5900 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5901 u64 val64 = 0, exp_val;
5904 val64 = readq(&bar0->pif_rd_swapper_fb);
5905 if (val64 != 0x123456789abcdefULL) {
5907 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5910 val64 = readq(&bar0->rmac_pause_cfg);
5911 if (val64 != 0xc000ffff00000000ULL) {
5913 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5916 val64 = readq(&bar0->rx_queue_cfg);
5917 if (sp->device_type == XFRAME_II_DEVICE)
5918 exp_val = 0x0404040404040404ULL;
5920 exp_val = 0x0808080808080808ULL;
5921 if (val64 != exp_val) {
5923 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5926 val64 = readq(&bar0->xgxs_efifo_cfg);
5927 if (val64 != 0x000000001923141EULL) {
5929 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5932 val64 = 0x5A5A5A5A5A5A5A5AULL;
5933 writeq(val64, &bar0->xmsi_data);
5934 val64 = readq(&bar0->xmsi_data);
5935 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5937 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5940 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5941 writeq(val64, &bar0->xmsi_data);
5942 val64 = readq(&bar0->xmsi_data);
5943 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5945 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5953 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5954 * @sp : private member of the device structure, which is a pointer to the
5955 * s2io_nic structure.
5956 * @data:variable that returns the result of each of the test conducted by
5959 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5965 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5968 u64 ret_data, org_4F0, org_7F0;
5969 u8 saved_4F0 = 0, saved_7F0 = 0;
5970 struct net_device *dev = sp->dev;
5972 /* Test Write Error at offset 0 */
5973 /* Note that SPI interface allows write access to all areas
5974 * of EEPROM. Hence doing all negative testing only for Xframe I.
5976 if (sp->device_type == XFRAME_I_DEVICE)
5977 if (!write_eeprom(sp, 0, 0, 3))
5980 /* Save current values at offsets 0x4F0 and 0x7F0 */
5981 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5983 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5986 /* Test Write at offset 4f0 */
5987 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5989 if (read_eeprom(sp, 0x4F0, &ret_data))
5992 if (ret_data != 0x012345) {
5993 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5994 "Data written %llx Data read %llx\n",
5995 dev->name, (unsigned long long)0x12345,
5996 (unsigned long long)ret_data);
6000 /* Reset the EEPROM data go FFFF */
6001 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
6003 /* Test Write Request Error at offset 0x7c */
6004 if (sp->device_type == XFRAME_I_DEVICE)
6005 if (!write_eeprom(sp, 0x07C, 0, 3))
6008 /* Test Write Request at offset 0x7f0 */
6009 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
6011 if (read_eeprom(sp, 0x7F0, &ret_data))
6014 if (ret_data != 0x012345) {
6015 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6016 "Data written %llx Data read %llx\n",
6017 dev->name, (unsigned long long)0x12345,
6018 (unsigned long long)ret_data);
6022 /* Reset the EEPROM data go FFFF */
6023 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
6025 if (sp->device_type == XFRAME_I_DEVICE) {
6026 /* Test Write Error at offset 0x80 */
6027 if (!write_eeprom(sp, 0x080, 0, 3))
6030 /* Test Write Error at offset 0xfc */
6031 if (!write_eeprom(sp, 0x0FC, 0, 3))
6034 /* Test Write Error at offset 0x100 */
6035 if (!write_eeprom(sp, 0x100, 0, 3))
6038 /* Test Write Error at offset 4ec */
6039 if (!write_eeprom(sp, 0x4EC, 0, 3))
6043 /* Restore values at offsets 0x4F0 and 0x7F0 */
6045 write_eeprom(sp, 0x4F0, org_4F0, 3);
6047 write_eeprom(sp, 0x7F0, org_7F0, 3);
6054 * s2io_bist_test - invokes the MemBist test of the card .
6055 * @sp : private member of the device structure, which is a pointer to the
6056 * s2io_nic structure.
6057 * @data:variable that returns the result of each of the test conducted by
6060 * This invokes the MemBist test of the card. We give around
6061 * 2 secs time for the Test to complete. If it's still not complete
6062 * within this peiod, we consider that the test failed.
6064 * 0 on success and -1 on failure.
6067 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
6070 int cnt = 0, ret = -1;
6072 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6073 bist |= PCI_BIST_START;
6074 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6077 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6078 if (!(bist & PCI_BIST_START)) {
6079 *data = (bist & PCI_BIST_CODE_MASK);
6091 * s2io-link_test - verifies the link state of the nic
6092 * @sp ; private member of the device structure, which is a pointer to the
6093 * s2io_nic structure.
6094 * @data: variable that returns the result of each of the test conducted by
6097 * The function verifies the link state of the NIC and updates the input
6098 * argument 'data' appropriately.
6103 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
6105 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6108 val64 = readq(&bar0->adapter_status);
6109 if(!(LINK_IS_UP(val64)))
6118 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6119 * @sp - private member of the device structure, which is a pointer to the
6120 * s2io_nic structure.
6121 * @data - variable that returns the result of each of the test
6122 * conducted by the driver.
6124 * This is one of the offline test that tests the read and write
6125 * access to the RldRam chip on the NIC.
6130 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
6132 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6134 int cnt, iteration = 0, test_fail = 0;
6136 val64 = readq(&bar0->adapter_control);
6137 val64 &= ~ADAPTER_ECC_EN;
6138 writeq(val64, &bar0->adapter_control);
6140 val64 = readq(&bar0->mc_rldram_test_ctrl);
6141 val64 |= MC_RLDRAM_TEST_MODE;
6142 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6144 val64 = readq(&bar0->mc_rldram_mrs);
6145 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6146 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6148 val64 |= MC_RLDRAM_MRS_ENABLE;
6149 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6151 while (iteration < 2) {
6152 val64 = 0x55555555aaaa0000ULL;
6153 if (iteration == 1) {
6154 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6156 writeq(val64, &bar0->mc_rldram_test_d0);
6158 val64 = 0xaaaa5a5555550000ULL;
6159 if (iteration == 1) {
6160 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6162 writeq(val64, &bar0->mc_rldram_test_d1);
6164 val64 = 0x55aaaaaaaa5a0000ULL;
6165 if (iteration == 1) {
6166 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6168 writeq(val64, &bar0->mc_rldram_test_d2);
6170 val64 = (u64) (0x0000003ffffe0100ULL);
6171 writeq(val64, &bar0->mc_rldram_test_add);
6173 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6175 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6177 for (cnt = 0; cnt < 5; cnt++) {
6178 val64 = readq(&bar0->mc_rldram_test_ctrl);
6179 if (val64 & MC_RLDRAM_TEST_DONE)
6187 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6188 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
6190 for (cnt = 0; cnt < 5; cnt++) {
6191 val64 = readq(&bar0->mc_rldram_test_ctrl);
6192 if (val64 & MC_RLDRAM_TEST_DONE)
6200 val64 = readq(&bar0->mc_rldram_test_ctrl);
6201 if (!(val64 & MC_RLDRAM_TEST_PASS))
6209 /* Bring the adapter out of test mode */
6210 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6216 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6217 * @sp : private member of the device structure, which is a pointer to the
6218 * s2io_nic structure.
6219 * @ethtest : pointer to a ethtool command specific structure that will be
6220 * returned to the user.
6221 * @data : variable that returns the result of each of the test
6222 * conducted by the driver.
6224 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6225 * the health of the card.
6230 static void s2io_ethtool_test(struct net_device *dev,
6231 struct ethtool_test *ethtest,
6234 struct s2io_nic *sp = dev->priv;
6235 int orig_state = netif_running(sp->dev);
6237 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6238 /* Offline Tests. */
6240 s2io_close(sp->dev);
6242 if (s2io_register_test(sp, &data[0]))
6243 ethtest->flags |= ETH_TEST_FL_FAILED;
6247 if (s2io_rldram_test(sp, &data[3]))
6248 ethtest->flags |= ETH_TEST_FL_FAILED;
6252 if (s2io_eeprom_test(sp, &data[1]))
6253 ethtest->flags |= ETH_TEST_FL_FAILED;
6255 if (s2io_bist_test(sp, &data[4]))
6256 ethtest->flags |= ETH_TEST_FL_FAILED;
6266 "%s: is not up, cannot run test\n",
6275 if (s2io_link_test(sp, &data[2]))
6276 ethtest->flags |= ETH_TEST_FL_FAILED;
6285 static void s2io_get_ethtool_stats(struct net_device *dev,
6286 struct ethtool_stats *estats,
6290 struct s2io_nic *sp = dev->priv;
6291 struct stat_block *stat_info = sp->mac_control.stats_info;
6293 s2io_updt_stats(sp);
6295 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6296 le32_to_cpu(stat_info->tmac_frms);
6298 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6299 le32_to_cpu(stat_info->tmac_data_octets);
6300 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
6302 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6303 le32_to_cpu(stat_info->tmac_mcst_frms);
6305 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6306 le32_to_cpu(stat_info->tmac_bcst_frms);
6307 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
6309 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6310 le32_to_cpu(stat_info->tmac_ttl_octets);
6312 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6313 le32_to_cpu(stat_info->tmac_ucst_frms);
6315 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6316 le32_to_cpu(stat_info->tmac_nucst_frms);
6318 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6319 le32_to_cpu(stat_info->tmac_any_err_frms);
6320 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
6321 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
6323 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6324 le32_to_cpu(stat_info->tmac_vld_ip);
6326 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6327 le32_to_cpu(stat_info->tmac_drop_ip);
6329 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6330 le32_to_cpu(stat_info->tmac_icmp);
6332 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6333 le32_to_cpu(stat_info->tmac_rst_tcp);
6334 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
6335 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6336 le32_to_cpu(stat_info->tmac_udp);
6338 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6339 le32_to_cpu(stat_info->rmac_vld_frms);
6341 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6342 le32_to_cpu(stat_info->rmac_data_octets);
6343 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6344 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
6346 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6347 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6349 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6350 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
6351 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
6352 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
6353 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6354 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
6355 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6357 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6358 le32_to_cpu(stat_info->rmac_ttl_octets);
6360 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6361 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6363 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6364 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
6366 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6367 le32_to_cpu(stat_info->rmac_discarded_frms);
6369 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6370 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6371 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6372 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
6374 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6375 le32_to_cpu(stat_info->rmac_usized_frms);
6377 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6378 le32_to_cpu(stat_info->rmac_osized_frms);
6380 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6381 le32_to_cpu(stat_info->rmac_frag_frms);
6383 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6384 le32_to_cpu(stat_info->rmac_jabber_frms);
6385 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6386 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6387 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6388 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6389 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6390 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6392 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
6393 le32_to_cpu(stat_info->rmac_ip);
6394 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6395 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
6397 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
6398 le32_to_cpu(stat_info->rmac_drop_ip);
6400 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
6401 le32_to_cpu(stat_info->rmac_icmp);
6402 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
6404 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
6405 le32_to_cpu(stat_info->rmac_udp);
6407 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6408 le32_to_cpu(stat_info->rmac_err_drp_udp);
6409 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6410 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6411 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6412 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6413 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6414 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6415 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6416 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6417 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6418 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6419 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6420 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6421 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6422 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6423 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6424 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6425 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
6427 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6428 le32_to_cpu(stat_info->rmac_pause_cnt);
6429 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6430 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
6432 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6433 le32_to_cpu(stat_info->rmac_accepted_ip);
6434 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
6435 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6436 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6437 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6438 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6439 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6449 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6450 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6451 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6452 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
6454 /* Enhanced statistics exist only for Hercules */
6455 if(sp->device_type == XFRAME_II_DEVICE) {
6457 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6459 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6461 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6462 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6463 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6464 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6465 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6466 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6467 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6468 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6469 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6470 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6471 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6472 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6473 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6474 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6478 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6479 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
6480 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6481 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6482 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6483 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
6484 for (k = 0; k < MAX_RX_RINGS; k++)
6485 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
6486 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6487 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6488 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6489 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6490 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6491 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6492 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6493 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6494 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6495 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6496 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6497 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
6498 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6499 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6500 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6501 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
6502 if (stat_info->sw_stat.num_aggregations) {
6503 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6506 * Since 64-bit divide does not work on all platforms,
6507 * do repeated subtraction.
6509 while (tmp >= stat_info->sw_stat.num_aggregations) {
6510 tmp -= stat_info->sw_stat.num_aggregations;
6513 tmp_stats[i++] = count;
6517 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
6518 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
6519 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
6520 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6521 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6522 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6523 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6525 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6527 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6528 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6529 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6531 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6533 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6538 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6552 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6553 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6554 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6555 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6556 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6557 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6558 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
6561 static int s2io_ethtool_get_regs_len(struct net_device *dev)
6563 return (XENA_REG_SPACE);
6567 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
6569 struct s2io_nic *sp = dev->priv;
6571 return (sp->rx_csum);
6574 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
6576 struct s2io_nic *sp = dev->priv;
6586 static int s2io_get_eeprom_len(struct net_device *dev)
6588 return (XENA_EEPROM_SPACE);
6591 static int s2io_get_sset_count(struct net_device *dev, int sset)
6593 struct s2io_nic *sp = dev->priv;
6597 return S2IO_TEST_LEN;
6599 switch(sp->device_type) {
6600 case XFRAME_I_DEVICE:
6601 return XFRAME_I_STAT_LEN;
6602 case XFRAME_II_DEVICE:
6603 return XFRAME_II_STAT_LEN;
6612 static void s2io_ethtool_get_strings(struct net_device *dev,
6613 u32 stringset, u8 * data)
6616 struct s2io_nic *sp = dev->priv;
6618 switch (stringset) {
6620 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6623 stat_size = sizeof(ethtool_xena_stats_keys);
6624 memcpy(data, ðtool_xena_stats_keys,stat_size);
6625 if(sp->device_type == XFRAME_II_DEVICE) {
6626 memcpy(data + stat_size,
6627 ðtool_enhanced_stats_keys,
6628 sizeof(ethtool_enhanced_stats_keys));
6629 stat_size += sizeof(ethtool_enhanced_stats_keys);
6632 memcpy(data + stat_size, ðtool_driver_stats_keys,
6633 sizeof(ethtool_driver_stats_keys));
6637 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
6640 dev->features |= NETIF_F_IP_CSUM;
6642 dev->features &= ~NETIF_F_IP_CSUM;
6647 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6649 return (dev->features & NETIF_F_TSO) != 0;
6651 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6654 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6656 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6661 static const struct ethtool_ops netdev_ethtool_ops = {
6662 .get_settings = s2io_ethtool_gset,
6663 .set_settings = s2io_ethtool_sset,
6664 .get_drvinfo = s2io_ethtool_gdrvinfo,
6665 .get_regs_len = s2io_ethtool_get_regs_len,
6666 .get_regs = s2io_ethtool_gregs,
6667 .get_link = ethtool_op_get_link,
6668 .get_eeprom_len = s2io_get_eeprom_len,
6669 .get_eeprom = s2io_ethtool_geeprom,
6670 .set_eeprom = s2io_ethtool_seeprom,
6671 .get_ringparam = s2io_ethtool_gringparam,
6672 .get_pauseparam = s2io_ethtool_getpause_data,
6673 .set_pauseparam = s2io_ethtool_setpause_data,
6674 .get_rx_csum = s2io_ethtool_get_rx_csum,
6675 .set_rx_csum = s2io_ethtool_set_rx_csum,
6676 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
6677 .set_sg = ethtool_op_set_sg,
6678 .get_tso = s2io_ethtool_op_get_tso,
6679 .set_tso = s2io_ethtool_op_set_tso,
6680 .set_ufo = ethtool_op_set_ufo,
6681 .self_test = s2io_ethtool_test,
6682 .get_strings = s2io_ethtool_get_strings,
6683 .phys_id = s2io_ethtool_idnic,
6684 .get_ethtool_stats = s2io_get_ethtool_stats,
6685 .get_sset_count = s2io_get_sset_count,
6689 * s2io_ioctl - Entry point for the Ioctl
6690 * @dev : Device pointer.
6691 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6692 * a proprietary structure used to pass information to the driver.
6693 * @cmd : This is used to distinguish between the different commands that
6694 * can be passed to the IOCTL functions.
6696 * Currently there are no special functionality supported in IOCTL, hence
6697 * function always return EOPNOTSUPPORTED
6700 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6706 * s2io_change_mtu - entry point to change MTU size for the device.
6707 * @dev : device pointer.
6708 * @new_mtu : the new MTU size for the device.
6709 * Description: A driver entry point to change MTU size for the device.
6710 * Before changing the MTU the device must be stopped.
6712 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6716 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6718 struct s2io_nic *sp = dev->priv;
6721 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6722 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6728 if (netif_running(dev)) {
6729 s2io_stop_all_tx_queue(sp);
6731 ret = s2io_card_up(sp);
6733 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6737 s2io_wake_all_tx_queue(sp);
6738 } else { /* Device is down */
6739 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6740 u64 val64 = new_mtu;
6742 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6749 * s2io_tasklet - Bottom half of the ISR.
6750 * @dev_adr : address of the device structure in dma_addr_t format.
6752 * This is the tasklet or the bottom half of the ISR. This is
6753 * an extension of the ISR which is scheduled by the scheduler to be run
6754 * when the load on the CPU is low. All low priority tasks of the ISR can
6755 * be pushed into the tasklet. For now the tasklet is used only to
6756 * replenish the Rx buffers in the Rx buffer descriptors.
6761 static void s2io_tasklet(unsigned long dev_addr)
6763 struct net_device *dev = (struct net_device *) dev_addr;
6764 struct s2io_nic *sp = dev->priv;
6766 struct mac_info *mac_control;
6767 struct config_param *config;
6769 mac_control = &sp->mac_control;
6770 config = &sp->config;
6772 if (!TASKLET_IN_USE) {
6773 for (i = 0; i < config->rx_ring_num; i++) {
6774 ret = fill_rx_buffers(sp, i);
6775 if (ret == -ENOMEM) {
6776 DBG_PRINT(INFO_DBG, "%s: Out of ",
6778 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
6780 } else if (ret == -EFILL) {
6782 "%s: Rx Ring %d is full\n",
6787 clear_bit(0, (&sp->tasklet_status));
6792 * s2io_set_link - Set the LInk status
6793 * @data: long pointer to device private structue
6794 * Description: Sets the link status for the adapter
6797 static void s2io_set_link(struct work_struct *work)
6799 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
6800 struct net_device *dev = nic->dev;
6801 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6807 if (!netif_running(dev))
6810 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
6811 /* The card is being reset, no point doing anything */
6815 subid = nic->pdev->subsystem_device;
6816 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6818 * Allow a small delay for the NICs self initiated
6819 * cleanup to complete.
6824 val64 = readq(&bar0->adapter_status);
6825 if (LINK_IS_UP(val64)) {
6826 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6827 if (verify_xena_quiescence(nic)) {
6828 val64 = readq(&bar0->adapter_control);
6829 val64 |= ADAPTER_CNTL_EN;
6830 writeq(val64, &bar0->adapter_control);
6831 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6832 nic->device_type, subid)) {
6833 val64 = readq(&bar0->gpio_control);
6834 val64 |= GPIO_CTRL_GPIO_0;
6835 writeq(val64, &bar0->gpio_control);
6836 val64 = readq(&bar0->gpio_control);
6838 val64 |= ADAPTER_LED_ON;
6839 writeq(val64, &bar0->adapter_control);
6841 nic->device_enabled_once = TRUE;
6843 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6844 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6845 s2io_stop_all_tx_queue(nic);
6848 val64 = readq(&bar0->adapter_control);
6849 val64 |= ADAPTER_LED_ON;
6850 writeq(val64, &bar0->adapter_control);
6851 s2io_link(nic, LINK_UP);
6853 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6855 val64 = readq(&bar0->gpio_control);
6856 val64 &= ~GPIO_CTRL_GPIO_0;
6857 writeq(val64, &bar0->gpio_control);
6858 val64 = readq(&bar0->gpio_control);
6861 val64 = readq(&bar0->adapter_control);
6862 val64 = val64 &(~ADAPTER_LED_ON);
6863 writeq(val64, &bar0->adapter_control);
6864 s2io_link(nic, LINK_DOWN);
6866 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
6872 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6874 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6875 u64 *temp2, int size)
6877 struct net_device *dev = sp->dev;
6878 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
6880 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6881 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
6884 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6886 * As Rx frame are not going to be processed,
6887 * using same mapped address for the Rxd
6890 rxdp1->Buffer0_ptr = *temp0;
6892 *skb = dev_alloc_skb(size);
6894 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6895 DBG_PRINT(INFO_DBG, "memory to allocate ");
6896 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6897 sp->mac_control.stats_info->sw_stat. \
6898 mem_alloc_fail_cnt++;
6901 sp->mac_control.stats_info->sw_stat.mem_allocated
6902 += (*skb)->truesize;
6903 /* storing the mapped addr in a temp variable
6904 * such it will be used for next rxd whose
6905 * Host Control is NULL
6907 rxdp1->Buffer0_ptr = *temp0 =
6908 pci_map_single( sp->pdev, (*skb)->data,
6909 size - NET_IP_ALIGN,
6910 PCI_DMA_FROMDEVICE);
6911 if( (rxdp1->Buffer0_ptr == 0) ||
6912 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6913 goto memalloc_failed;
6915 rxdp->Host_Control = (unsigned long) (*skb);
6917 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6918 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
6919 /* Two buffer Mode */
6921 rxdp3->Buffer2_ptr = *temp2;
6922 rxdp3->Buffer0_ptr = *temp0;
6923 rxdp3->Buffer1_ptr = *temp1;
6925 *skb = dev_alloc_skb(size);
6927 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6928 DBG_PRINT(INFO_DBG, "memory to allocate ");
6929 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6930 sp->mac_control.stats_info->sw_stat. \
6931 mem_alloc_fail_cnt++;
6934 sp->mac_control.stats_info->sw_stat.mem_allocated
6935 += (*skb)->truesize;
6936 rxdp3->Buffer2_ptr = *temp2 =
6937 pci_map_single(sp->pdev, (*skb)->data,
6939 PCI_DMA_FROMDEVICE);
6940 if( (rxdp3->Buffer2_ptr == 0) ||
6941 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6942 goto memalloc_failed;
6944 rxdp3->Buffer0_ptr = *temp0 =
6945 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6946 PCI_DMA_FROMDEVICE);
6947 if( (rxdp3->Buffer0_ptr == 0) ||
6948 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6949 pci_unmap_single (sp->pdev,
6950 (dma_addr_t)rxdp3->Buffer2_ptr,
6951 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6952 goto memalloc_failed;
6954 rxdp->Host_Control = (unsigned long) (*skb);
6956 /* Buffer-1 will be dummy buffer not used */
6957 rxdp3->Buffer1_ptr = *temp1 =
6958 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6959 PCI_DMA_FROMDEVICE);
6960 if( (rxdp3->Buffer1_ptr == 0) ||
6961 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6962 pci_unmap_single (sp->pdev,
6963 (dma_addr_t)rxdp3->Buffer0_ptr,
6964 BUF0_LEN, PCI_DMA_FROMDEVICE);
6965 pci_unmap_single (sp->pdev,
6966 (dma_addr_t)rxdp3->Buffer2_ptr,
6967 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6968 goto memalloc_failed;
6974 stats->pci_map_fail_cnt++;
6975 stats->mem_freed += (*skb)->truesize;
6976 dev_kfree_skb(*skb);
6980 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6983 struct net_device *dev = sp->dev;
6984 if (sp->rxd_mode == RXD_MODE_1) {
6985 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6986 } else if (sp->rxd_mode == RXD_MODE_3B) {
6987 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6988 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6989 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6993 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6995 int i, j, k, blk_cnt = 0, size;
6996 struct mac_info * mac_control = &sp->mac_control;
6997 struct config_param *config = &sp->config;
6998 struct net_device *dev = sp->dev;
6999 struct RxD_t *rxdp = NULL;
7000 struct sk_buff *skb = NULL;
7001 struct buffAdd *ba = NULL;
7002 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7004 /* Calculate the size based on ring mode */
7005 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7006 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7007 if (sp->rxd_mode == RXD_MODE_1)
7008 size += NET_IP_ALIGN;
7009 else if (sp->rxd_mode == RXD_MODE_3B)
7010 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
7012 for (i = 0; i < config->rx_ring_num; i++) {
7013 blk_cnt = config->rx_cfg[i].num_rxd /
7014 (rxd_count[sp->rxd_mode] +1);
7016 for (j = 0; j < blk_cnt; j++) {
7017 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
7018 rxdp = mac_control->rings[i].
7019 rx_blocks[j].rxds[k].virt_addr;
7020 if(sp->rxd_mode == RXD_MODE_3B)
7021 ba = &mac_control->rings[i].ba[j][k];
7022 if (set_rxd_buffer_pointer(sp, rxdp, ba,
7023 &skb,(u64 *)&temp0_64,
7030 set_rxd_buffer_size(sp, rxdp, size);
7032 /* flip the Ownership bit to Hardware */
7033 rxdp->Control_1 |= RXD_OWN_XENA;
7041 static int s2io_add_isr(struct s2io_nic * sp)
7044 struct net_device *dev = sp->dev;
7047 if (sp->config.intr_type == MSI_X)
7048 ret = s2io_enable_msi_x(sp);
7050 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
7051 sp->config.intr_type = INTA;
7054 /* Store the values of the MSIX table in the struct s2io_nic structure */
7055 store_xmsi_data(sp);
7057 /* After proper initialization of H/W, register ISR */
7058 if (sp->config.intr_type == MSI_X) {
7059 int i, msix_tx_cnt=0,msix_rx_cnt=0;
7061 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
7062 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
7063 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7065 err = request_irq(sp->entries[i].vector,
7066 s2io_msix_fifo_handle, 0, sp->desc[i],
7067 sp->s2io_entries[i].arg);
7068 /* If either data or addr is zero print it */
7069 if(!(sp->msix_info[i].addr &&
7070 sp->msix_info[i].data)) {
7071 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7072 "Data:0x%lx\n",sp->desc[i],
7073 (unsigned long long)
7074 sp->msix_info[i].addr,
7076 ntohl(sp->msix_info[i].data));
7081 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7083 err = request_irq(sp->entries[i].vector,
7084 s2io_msix_ring_handle, 0, sp->desc[i],
7085 sp->s2io_entries[i].arg);
7086 /* If either data or addr is zero print it */
7087 if(!(sp->msix_info[i].addr &&
7088 sp->msix_info[i].data)) {
7089 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
7090 "Data:0x%lx\n",sp->desc[i],
7091 (unsigned long long)
7092 sp->msix_info[i].addr,
7094 ntohl(sp->msix_info[i].data));
7100 remove_msix_isr(sp);
7101 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
7102 "failed\n", dev->name, i);
7103 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
7105 sp->config.intr_type = INTA;
7108 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
7111 printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
7113 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7117 if (sp->config.intr_type == INTA) {
7118 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7121 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7128 static void s2io_rem_isr(struct s2io_nic * sp)
7130 if (sp->config.intr_type == MSI_X)
7131 remove_msix_isr(sp);
7133 remove_inta_isr(sp);
7136 static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
7139 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7140 unsigned long flags;
7141 register u64 val64 = 0;
7142 struct config_param *config;
7143 config = &sp->config;
7145 if (!is_s2io_card_up(sp))
7148 del_timer_sync(&sp->alarm_timer);
7149 /* If s2io_set_link task is executing, wait till it completes. */
7150 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
7153 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
7157 napi_disable(&sp->napi);
7159 /* disable Tx and Rx traffic on the NIC */
7166 tasklet_kill(&sp->task);
7168 /* Check if the device is Quiescent and then Reset the NIC */
7170 /* As per the HW requirement we need to replenish the
7171 * receive buffer to avoid the ring bump. Since there is
7172 * no intention of processing the Rx frame at this pointwe are
7173 * just settting the ownership bit of rxd in Each Rx
7174 * ring to HW and set the appropriate buffer size
7175 * based on the ring mode
7177 rxd_owner_bit_reset(sp);
7179 val64 = readq(&bar0->adapter_status);
7180 if (verify_xena_quiescence(sp)) {
7181 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
7189 "s2io_close:Device not Quiescent ");
7190 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7191 (unsigned long long) val64);
7198 /* Free all Tx buffers */
7199 free_tx_buffers(sp);
7201 /* Free all Rx buffers */
7202 spin_lock_irqsave(&sp->rx_lock, flags);
7203 free_rx_buffers(sp);
7204 spin_unlock_irqrestore(&sp->rx_lock, flags);
7206 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
7209 static void s2io_card_down(struct s2io_nic * sp)
7211 do_s2io_card_down(sp, 1);
7214 static int s2io_card_up(struct s2io_nic * sp)
7217 struct mac_info *mac_control;
7218 struct config_param *config;
7219 struct net_device *dev = (struct net_device *) sp->dev;
7222 /* Initialize the H/W I/O registers */
7225 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7233 * Initializing the Rx buffers. For now we are considering only 1
7234 * Rx ring and initializing buffers into 30 Rx blocks
7236 mac_control = &sp->mac_control;
7237 config = &sp->config;
7239 for (i = 0; i < config->rx_ring_num; i++) {
7240 if ((ret = fill_rx_buffers(sp, i))) {
7241 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7244 free_rx_buffers(sp);
7247 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7248 atomic_read(&sp->rx_bufs_left[i]));
7251 /* Initialise napi */
7253 napi_enable(&sp->napi);
7255 /* Maintain the state prior to the open */
7256 if (sp->promisc_flg)
7257 sp->promisc_flg = 0;
7258 if (sp->m_cast_flg) {
7260 sp->all_multi_pos= 0;
7263 /* Setting its receive mode */
7264 s2io_set_multicast(dev);
7267 /* Initialize max aggregatable pkts per session based on MTU */
7268 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7269 /* Check if we can use(if specified) user provided value */
7270 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7271 sp->lro_max_aggr_per_sess = lro_max_pkts;
7274 /* Enable Rx Traffic and interrupts on the NIC */
7275 if (start_nic(sp)) {
7276 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
7278 free_rx_buffers(sp);
7282 /* Add interrupt service routine */
7283 if (s2io_add_isr(sp) != 0) {
7284 if (sp->config.intr_type == MSI_X)
7287 free_rx_buffers(sp);
7291 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7293 /* Enable tasklet for the device */
7294 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
7296 /* Enable select interrupts */
7297 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
7298 if (sp->config.intr_type != INTA)
7299 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
7301 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
7302 interruptible |= TX_PIC_INTR;
7303 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7306 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
7311 * s2io_restart_nic - Resets the NIC.
7312 * @data : long pointer to the device private structure
7314 * This function is scheduled to be run by the s2io_tx_watchdog
7315 * function after 0.5 secs to reset the NIC. The idea is to reduce
7316 * the run time of the watch dog routine which is run holding a
7320 static void s2io_restart_nic(struct work_struct *work)
7322 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
7323 struct net_device *dev = sp->dev;
7327 if (!netif_running(dev))
7331 if (s2io_card_up(sp)) {
7332 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7335 s2io_wake_all_tx_queue(sp);
7336 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7343 * s2io_tx_watchdog - Watchdog for transmit side.
7344 * @dev : Pointer to net device structure
7346 * This function is triggered if the Tx Queue is stopped
7347 * for a pre-defined amount of time when the Interface is still up.
7348 * If the Interface is jammed in such a situation, the hardware is
7349 * reset (by s2io_close) and restarted again (by s2io_open) to
7350 * overcome any problem that might have been caused in the hardware.
7355 static void s2io_tx_watchdog(struct net_device *dev)
7357 struct s2io_nic *sp = dev->priv;
7359 if (netif_carrier_ok(dev)) {
7360 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
7361 schedule_work(&sp->rst_timer_task);
7362 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
7367 * rx_osm_handler - To perform some OS related operations on SKB.
7368 * @sp: private member of the device structure,pointer to s2io_nic structure.
7369 * @skb : the socket buffer pointer.
7370 * @len : length of the packet
7371 * @cksum : FCS checksum of the frame.
7372 * @ring_no : the ring from which this RxD was extracted.
7374 * This function is called by the Rx interrupt serivce routine to perform
7375 * some OS related operations on the SKB before passing it to the upper
7376 * layers. It mainly checks if the checksum is OK, if so adds it to the
7377 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7378 * to the upper layer. If the checksum is wrong, it increments the Rx
7379 * packet error count, frees the SKB and returns error.
7381 * SUCCESS on success and -1 on failure.
7383 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7385 struct s2io_nic *sp = ring_data->nic;
7386 struct net_device *dev = (struct net_device *) sp->dev;
7387 struct sk_buff *skb = (struct sk_buff *)
7388 ((unsigned long) rxdp->Host_Control);
7389 int ring_no = ring_data->ring_no;
7390 u16 l3_csum, l4_csum;
7391 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
7398 /* Check for parity error */
7400 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7402 err_mask = err >> 48;
7405 sp->mac_control.stats_info->sw_stat.
7406 rx_parity_err_cnt++;
7410 sp->mac_control.stats_info->sw_stat.
7415 sp->mac_control.stats_info->sw_stat.
7416 rx_parity_abort_cnt++;
7420 sp->mac_control.stats_info->sw_stat.
7425 sp->mac_control.stats_info->sw_stat.
7430 sp->mac_control.stats_info->sw_stat.
7435 sp->mac_control.stats_info->sw_stat.
7436 rx_buf_size_err_cnt++;
7440 sp->mac_control.stats_info->sw_stat.
7441 rx_rxd_corrupt_cnt++;
7445 sp->mac_control.stats_info->sw_stat.
7450 * Drop the packet if bad transfer code. Exception being
7451 * 0x5, which could be due to unsupported IPv6 extension header.
7452 * In this case, we let stack handle the packet.
7453 * Note that in this case, since checksum will be incorrect,
7454 * stack will validate the same.
7456 if (err_mask != 0x5) {
7457 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7458 dev->name, err_mask);
7459 sp->stats.rx_crc_errors++;
7460 sp->mac_control.stats_info->sw_stat.mem_freed
7463 atomic_dec(&sp->rx_bufs_left[ring_no]);
7464 rxdp->Host_Control = 0;
7469 /* Updating statistics */
7470 sp->stats.rx_packets++;
7471 rxdp->Host_Control = 0;
7472 if (sp->rxd_mode == RXD_MODE_1) {
7473 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7475 sp->stats.rx_bytes += len;
7478 } else if (sp->rxd_mode == RXD_MODE_3B) {
7479 int get_block = ring_data->rx_curr_get_info.block_index;
7480 int get_off = ring_data->rx_curr_get_info.offset;
7481 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7482 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7483 unsigned char *buff = skb_push(skb, buf0_len);
7485 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7486 sp->stats.rx_bytes += buf0_len + buf2_len;
7487 memcpy(buff, ba->ba_0, buf0_len);
7488 skb_put(skb, buf2_len);
7491 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7492 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
7494 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
7495 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7496 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
7498 * NIC verifies if the Checksum of the received
7499 * frame is Ok or not and accordingly returns
7500 * a flag in the RxD.
7502 skb->ip_summed = CHECKSUM_UNNECESSARY;
7508 ret = s2io_club_tcp_session(skb->data, &tcp,
7512 case 3: /* Begin anew */
7515 case 1: /* Aggregate */
7517 lro_append_pkt(sp, lro,
7521 case 4: /* Flush session */
7523 lro_append_pkt(sp, lro,
7525 queue_rx_frame(lro->parent);
7526 clear_lro_session(lro);
7527 sp->mac_control.stats_info->
7528 sw_stat.flush_max_pkts++;
7531 case 2: /* Flush both */
7532 lro->parent->data_len =
7534 sp->mac_control.stats_info->
7535 sw_stat.sending_both++;
7536 queue_rx_frame(lro->parent);
7537 clear_lro_session(lro);
7539 case 0: /* sessions exceeded */
7540 case -1: /* non-TCP or not
7544 * First pkt in session not
7545 * L3/L4 aggregatable
7550 "%s: Samadhana!!\n",
7557 * Packet with erroneous checksum, let the
7558 * upper layers deal with it.
7560 skb->ip_summed = CHECKSUM_NONE;
7563 skb->ip_summed = CHECKSUM_NONE;
7565 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7567 skb->protocol = eth_type_trans(skb, dev);
7568 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7570 /* Queueing the vlan frame to the upper layer */
7572 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7573 RXD_GET_VLAN_TAG(rxdp->Control_2));
7575 vlan_hwaccel_rx(skb, sp->vlgrp,
7576 RXD_GET_VLAN_TAG(rxdp->Control_2));
7579 netif_receive_skb(skb);
7585 queue_rx_frame(skb);
7587 dev->last_rx = jiffies;
7589 atomic_dec(&sp->rx_bufs_left[ring_no]);
7594 * s2io_link - stops/starts the Tx queue.
7595 * @sp : private member of the device structure, which is a pointer to the
7596 * s2io_nic structure.
7597 * @link : inidicates whether link is UP/DOWN.
7599 * This function stops/starts the Tx queue depending on whether the link
7600 * status of the NIC is is down or up. This is called by the Alarm
7601 * interrupt handler whenever a link change interrupt comes up.
7606 static void s2io_link(struct s2io_nic * sp, int link)
7608 struct net_device *dev = (struct net_device *) sp->dev;
7610 if (link != sp->last_link_state) {
7612 if (link == LINK_DOWN) {
7613 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7614 s2io_stop_all_tx_queue(sp);
7615 netif_carrier_off(dev);
7616 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7617 sp->mac_control.stats_info->sw_stat.link_up_time =
7618 jiffies - sp->start_time;
7619 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
7621 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
7622 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
7623 sp->mac_control.stats_info->sw_stat.link_down_time =
7624 jiffies - sp->start_time;
7625 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7626 netif_carrier_on(dev);
7627 s2io_wake_all_tx_queue(sp);
7630 sp->last_link_state = link;
7631 sp->start_time = jiffies;
7635 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7636 * @sp : private member of the device structure, which is a pointer to the
7637 * s2io_nic structure.
7639 * This function initializes a few of the PCI and PCI-X configuration registers
7640 * with recommended values.
7645 static void s2io_init_pci(struct s2io_nic * sp)
7647 u16 pci_cmd = 0, pcix_cmd = 0;
7649 /* Enable Data Parity Error Recovery in PCI-X command register. */
7650 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7652 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7654 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
7657 /* Set the PErr Response bit in PCI command register. */
7658 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7659 pci_write_config_word(sp->pdev, PCI_COMMAND,
7660 (pci_cmd | PCI_COMMAND_PARITY));
7661 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7664 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7667 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7668 (tx_fifo_num < 1)) {
7669 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7670 "(%d) not supported\n", tx_fifo_num);
7672 if (tx_fifo_num < 1)
7675 tx_fifo_num = MAX_TX_FIFOS;
7677 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7678 DBG_PRINT(ERR_DBG, "tx fifos\n");
7681 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
7683 DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n");
7688 *dev_multiq = multiq;
7690 if (tx_steering_type && (1 == tx_fifo_num)) {
7691 if (tx_steering_type != TX_DEFAULT_STEERING)
7693 "s2io: Tx steering is not supported with "
7694 "one fifo. Disabling Tx steering.\n");
7695 tx_steering_type = NO_STEERING;
7698 if ((tx_steering_type < NO_STEERING) ||
7699 (tx_steering_type > TX_DEFAULT_STEERING)) {
7700 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7702 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7703 tx_steering_type = NO_STEERING;
7706 if ( rx_ring_num > 8) {
7707 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7709 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7712 if (*dev_intr_type != INTA)
7715 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
7716 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7717 "Defaulting to INTA\n");
7718 *dev_intr_type = INTA;
7721 if ((*dev_intr_type == MSI_X) &&
7722 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7723 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
7724 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
7725 "Defaulting to INTA\n");
7726 *dev_intr_type = INTA;
7729 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
7730 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
7731 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7738 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7739 * or Traffic class respectively.
7740 * @nic: device private variable
7741 * Description: The function configures the receive steering to
7742 * desired receive ring.
7743 * Return Value: SUCCESS on success and
7744 * '-1' on failure (endian settings incorrect).
7746 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7748 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7749 register u64 val64 = 0;
7751 if (ds_codepoint > 63)
7754 val64 = RTS_DS_MEM_DATA(ring);
7755 writeq(val64, &bar0->rts_ds_mem_data);
7757 val64 = RTS_DS_MEM_CTRL_WE |
7758 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7759 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7761 writeq(val64, &bar0->rts_ds_mem_ctrl);
7763 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7764 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7769 * s2io_init_nic - Initialization of the adapter .
7770 * @pdev : structure containing the PCI related information of the device.
7771 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7773 * The function initializes an adapter identified by the pci_dec structure.
7774 * All OS related initialization including memory and device structure and
7775 * initlaization of the device private variable is done. Also the swapper
7776 * control register is initialized to enable read and write into the I/O
7777 * registers of the device.
7779 * returns 0 on success and negative on failure.
7782 static int __devinit
7783 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7785 struct s2io_nic *sp;
7786 struct net_device *dev;
7788 int dma_flag = FALSE;
7789 u32 mac_up, mac_down;
7790 u64 val64 = 0, tmp64 = 0;
7791 struct XENA_dev_config __iomem *bar0 = NULL;
7793 struct mac_info *mac_control;
7794 struct config_param *config;
7796 u8 dev_intr_type = intr_type;
7798 DECLARE_MAC_BUF(mac);
7800 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7804 if ((ret = pci_enable_device(pdev))) {
7806 "s2io_init_nic: pci_enable_device failed\n");
7810 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
7811 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7813 if (pci_set_consistent_dma_mask
7814 (pdev, DMA_64BIT_MASK)) {
7816 "Unable to obtain 64bit DMA for \
7817 consistent allocations\n");
7818 pci_disable_device(pdev);
7821 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
7822 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7824 pci_disable_device(pdev);
7827 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7828 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7829 pci_disable_device(pdev);
7832 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
7834 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7837 dev = alloc_etherdev(sizeof(struct s2io_nic));
7839 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7840 pci_disable_device(pdev);
7841 pci_release_regions(pdev);
7845 pci_set_master(pdev);
7846 pci_set_drvdata(pdev, dev);
7847 SET_NETDEV_DEV(dev, &pdev->dev);
7849 /* Private member variable initialized to s2io NIC structure */
7851 memset(sp, 0, sizeof(struct s2io_nic));
7854 sp->high_dma_flag = dma_flag;
7855 sp->device_enabled_once = FALSE;
7856 if (rx_ring_mode == 1)
7857 sp->rxd_mode = RXD_MODE_1;
7858 if (rx_ring_mode == 2)
7859 sp->rxd_mode = RXD_MODE_3B;
7861 sp->config.intr_type = dev_intr_type;
7863 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7864 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7865 sp->device_type = XFRAME_II_DEVICE;
7867 sp->device_type = XFRAME_I_DEVICE;
7869 sp->lro = lro_enable;
7871 /* Initialize some PCI/PCI-X fields of the NIC. */
7875 * Setting the device configuration parameters.
7876 * Most of these parameters can be specified by the user during
7877 * module insertion as they are module loadable parameters. If
7878 * these parameters are not not specified during load time, they
7879 * are initialized with default values.
7881 mac_control = &sp->mac_control;
7882 config = &sp->config;
7884 config->napi = napi;
7885 config->tx_steering_type = tx_steering_type;
7887 /* Tx side parameters. */
7888 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7889 config->tx_fifo_num = MAX_TX_FIFOS;
7891 config->tx_fifo_num = tx_fifo_num;
7893 /* Initialize the fifos used for tx steering */
7894 if (config->tx_fifo_num < 5) {
7895 if (config->tx_fifo_num == 1)
7896 sp->total_tcp_fifos = 1;
7898 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7899 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7900 sp->total_udp_fifos = 1;
7901 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7903 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7904 FIFO_OTHER_MAX_NUM);
7905 sp->udp_fifo_idx = sp->total_tcp_fifos;
7906 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7907 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7910 config->multiq = dev_multiq;
7911 for (i = 0; i < config->tx_fifo_num; i++) {
7912 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7913 config->tx_cfg[i].fifo_priority = i;
7916 /* mapping the QoS priority to the configured fifos */
7917 for (i = 0; i < MAX_TX_FIFOS; i++)
7918 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7920 /* map the hashing selector table to the configured fifos */
7921 for (i = 0; i < config->tx_fifo_num; i++)
7922 sp->fifo_selector[i] = fifo_selector[i];
7925 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7926 for (i = 0; i < config->tx_fifo_num; i++) {
7927 config->tx_cfg[i].f_no_snoop =
7928 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7929 if (config->tx_cfg[i].fifo_len < 65) {
7930 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7934 /* + 2 because one Txd for skb->data and one Txd for UFO */
7935 config->max_txds = MAX_SKB_FRAGS + 2;
7937 /* Rx side parameters. */
7938 config->rx_ring_num = rx_ring_num;
7939 for (i = 0; i < MAX_RX_RINGS; i++) {
7940 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
7941 (rxd_count[sp->rxd_mode] + 1);
7942 config->rx_cfg[i].ring_priority = i;
7945 for (i = 0; i < rx_ring_num; i++) {
7946 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7947 config->rx_cfg[i].f_no_snoop =
7948 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7951 /* Setting Mac Control parameters */
7952 mac_control->rmac_pause_time = rmac_pause_time;
7953 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7954 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7957 /* Initialize Ring buffer parameters. */
7958 for (i = 0; i < config->rx_ring_num; i++)
7959 atomic_set(&sp->rx_bufs_left[i], 0);
7961 /* initialize the shared memory used by the NIC and the host */
7962 if (init_shared_mem(sp)) {
7963 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7966 goto mem_alloc_failed;
7969 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7970 pci_resource_len(pdev, 0));
7972 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7975 goto bar0_remap_failed;
7978 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7979 pci_resource_len(pdev, 2));
7981 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7984 goto bar1_remap_failed;
7987 dev->irq = pdev->irq;
7988 dev->base_addr = (unsigned long) sp->bar0;
7990 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7991 for (j = 0; j < MAX_TX_FIFOS; j++) {
7992 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7993 (sp->bar1 + (j * 0x00020000));
7996 /* Driver entry points */
7997 dev->open = &s2io_open;
7998 dev->stop = &s2io_close;
7999 dev->hard_start_xmit = &s2io_xmit;
8000 dev->get_stats = &s2io_get_stats;
8001 dev->set_multicast_list = &s2io_set_multicast;
8002 dev->do_ioctl = &s2io_ioctl;
8003 dev->set_mac_address = &s2io_set_mac_addr;
8004 dev->change_mtu = &s2io_change_mtu;
8005 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
8006 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8007 dev->vlan_rx_register = s2io_vlan_rx_register;
8010 * will use eth_mac_addr() for dev->set_mac_address
8011 * mac address will be set every time dev->open() is called
8013 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
8015 #ifdef CONFIG_NET_POLL_CONTROLLER
8016 dev->poll_controller = s2io_netpoll;
8019 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8020 if (sp->high_dma_flag == TRUE)
8021 dev->features |= NETIF_F_HIGHDMA;
8022 dev->features |= NETIF_F_TSO;
8023 dev->features |= NETIF_F_TSO6;
8024 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
8025 dev->features |= NETIF_F_UFO;
8026 dev->features |= NETIF_F_HW_CSUM;
8028 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
8030 dev->features |= NETIF_F_MULTI_QUEUE;
8032 dev->tx_timeout = &s2io_tx_watchdog;
8033 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
8034 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8035 INIT_WORK(&sp->set_link_task, s2io_set_link);
8037 pci_save_state(sp->pdev);
8039 /* Setting swapper control on the NIC, for proper reset operation */
8040 if (s2io_set_swapper(sp)) {
8041 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
8044 goto set_swap_failed;
8047 /* Verify if the Herc works on the slot its placed into */
8048 if (sp->device_type & XFRAME_II_DEVICE) {
8049 mode = s2io_verify_pci_mode(sp);
8051 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
8052 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8054 goto set_swap_failed;
8058 /* Not needed for Herc */
8059 if (sp->device_type & XFRAME_I_DEVICE) {
8061 * Fix for all "FFs" MAC address problems observed on
8064 fix_mac_address(sp);
8069 * MAC address initialization.
8070 * For now only one mac address will be read and used.
8073 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
8074 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
8075 writeq(val64, &bar0->rmac_addr_cmd_mem);
8076 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
8077 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
8078 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8079 mac_down = (u32) tmp64;
8080 mac_up = (u32) (tmp64 >> 32);
8082 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8083 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8084 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8085 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8086 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8087 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8089 /* Set the factory defined MAC address initially */
8090 dev->addr_len = ETH_ALEN;
8091 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
8092 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
8094 /* initialize number of multicast & unicast MAC entries variables */
8095 if (sp->device_type == XFRAME_I_DEVICE) {
8096 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8097 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8098 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8099 } else if (sp->device_type == XFRAME_II_DEVICE) {
8100 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8101 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8102 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8105 /* store mac addresses from CAM to s2io_nic structure */
8106 do_s2io_store_unicast_mc(sp);
8108 /* Store the values of the MSIX table in the s2io_nic structure */
8109 store_xmsi_data(sp);
8110 /* reset Nic and bring it to known state */
8114 * Initialize the tasklet status and link state flags
8115 * and the card state parameter
8117 sp->tasklet_status = 0;
8120 /* Initialize spinlocks */
8121 for (i = 0; i < sp->config.tx_fifo_num; i++)
8122 spin_lock_init(&mac_control->fifos[i].tx_lock);
8125 spin_lock_init(&sp->put_lock);
8126 spin_lock_init(&sp->rx_lock);
8129 * SXE-002: Configure link and activity LED to init state
8132 subid = sp->pdev->subsystem_device;
8133 if ((subid & 0xFF) >= 0x07) {
8134 val64 = readq(&bar0->gpio_control);
8135 val64 |= 0x0000800000000000ULL;
8136 writeq(val64, &bar0->gpio_control);
8137 val64 = 0x0411040400000000ULL;
8138 writeq(val64, (void __iomem *) bar0 + 0x2700);
8139 val64 = readq(&bar0->gpio_control);
8142 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8144 if (register_netdev(dev)) {
8145 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8147 goto register_failed;
8150 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
8151 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
8152 sp->product_name, pdev->revision);
8153 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8154 s2io_driver_version);
8155 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8156 dev->name, print_mac(mac, dev->dev_addr));
8157 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
8158 if (sp->device_type & XFRAME_II_DEVICE) {
8159 mode = s2io_print_pci_mode(sp);
8161 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8163 unregister_netdev(dev);
8164 goto set_swap_failed;
8167 switch(sp->rxd_mode) {
8169 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8173 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8179 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8181 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8182 sp->config.tx_fifo_num);
8184 switch(sp->config.intr_type) {
8186 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8189 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8192 if (sp->config.multiq) {
8193 for (i = 0; i < sp->config.tx_fifo_num; i++)
8194 mac_control->fifos[i].multiq = config->multiq;
8195 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8198 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8201 switch (sp->config.tx_steering_type) {
8203 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8204 " transmit\n", dev->name);
8206 case TX_PRIORITY_STEERING:
8207 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8208 " transmit\n", dev->name);
8210 case TX_DEFAULT_STEERING:
8211 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8212 " transmit\n", dev->name);
8216 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
8219 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8220 " enabled\n", dev->name);
8221 /* Initialize device name */
8222 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
8225 * Make Link state as off at this point, when the Link change
8226 * interrupt comes the state will be automatically changed to
8229 netif_carrier_off(dev);
8240 free_shared_mem(sp);
8241 pci_disable_device(pdev);
8242 pci_release_regions(pdev);
8243 pci_set_drvdata(pdev, NULL);
8250 * s2io_rem_nic - Free the PCI device
8251 * @pdev: structure containing the PCI related information of the device.
8252 * Description: This function is called by the Pci subsystem to release a
8253 * PCI device and free up all resource held up by the device. This could
8254 * be in response to a Hot plug event or when the driver is to be removed
8258 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8260 struct net_device *dev =
8261 (struct net_device *) pci_get_drvdata(pdev);
8262 struct s2io_nic *sp;
8265 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8269 flush_scheduled_work();
8272 unregister_netdev(dev);
8274 free_shared_mem(sp);
8277 pci_release_regions(pdev);
8278 pci_set_drvdata(pdev, NULL);
8280 pci_disable_device(pdev);
8284 * s2io_starter - Entry point for the driver
8285 * Description: This function is the entry point for the driver. It verifies
8286 * the module loadable parameters and initializes PCI configuration space.
8289 static int __init s2io_starter(void)
8291 return pci_register_driver(&s2io_driver);
8295 * s2io_closer - Cleanup routine for the driver
8296 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8299 static __exit void s2io_closer(void)
8301 pci_unregister_driver(&s2io_driver);
8302 DBG_PRINT(INIT_DBG, "cleanup done\n");
8305 module_init(s2io_starter);
8306 module_exit(s2io_closer);
8308 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8309 struct tcphdr **tcp, struct RxD_t *rxdp)
8312 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8314 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8315 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8321 * By default the VLAN field in the MAC is stripped by the card, if this
8322 * feature is turned off in rx_pa_cfg register, then the ip_off field
8323 * has to be shifted by a further 2 bytes
8326 case 0: /* DIX type */
8327 case 4: /* DIX type with VLAN */
8328 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8330 /* LLC, SNAP etc are considered non-mergeable */
8335 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8336 ip_len = (u8)((*ip)->ihl);
8338 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8343 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
8346 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8347 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8348 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8353 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8355 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8358 static void initiate_new_session(struct lro *lro, u8 *l2h,
8359 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
8361 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8365 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
8366 lro->tcp_ack = tcp->ack_seq;
8368 lro->total_len = ntohs(ip->tot_len);
8371 * check if we saw TCP timestamp. Other consistency checks have
8372 * already been done.
8374 if (tcp->doff == 8) {
8376 ptr = (__be32 *)(tcp+1);
8378 lro->cur_tsval = ntohl(*(ptr+1));
8379 lro->cur_tsecr = *(ptr+2);
8384 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8386 struct iphdr *ip = lro->iph;
8387 struct tcphdr *tcp = lro->tcph;
8389 struct stat_block *statinfo = sp->mac_control.stats_info;
8390 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8392 /* Update L3 header */
8393 ip->tot_len = htons(lro->total_len);
8395 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8398 /* Update L4 header */
8399 tcp->ack_seq = lro->tcp_ack;
8400 tcp->window = lro->window;
8402 /* Update tsecr field if this session has timestamps enabled */
8404 __be32 *ptr = (__be32 *)(tcp + 1);
8405 *(ptr+2) = lro->cur_tsecr;
8408 /* Update counters required for calculation of
8409 * average no. of packets aggregated.
8411 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8412 statinfo->sw_stat.num_aggregations++;
8415 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
8416 struct tcphdr *tcp, u32 l4_pyld)
8418 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8419 lro->total_len += l4_pyld;
8420 lro->frags_len += l4_pyld;
8421 lro->tcp_next_seq += l4_pyld;
8424 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8425 lro->tcp_ack = tcp->ack_seq;
8426 lro->window = tcp->window;
8430 /* Update tsecr and tsval from this packet */
8431 ptr = (__be32 *)(tcp+1);
8432 lro->cur_tsval = ntohl(*(ptr+1));
8433 lro->cur_tsecr = *(ptr + 2);
8437 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
8438 struct tcphdr *tcp, u32 tcp_pyld_len)
8442 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8444 if (!tcp_pyld_len) {
8445 /* Runt frame or a pure ack */
8449 if (ip->ihl != 5) /* IP has options */
8452 /* If we see CE codepoint in IP header, packet is not mergeable */
8453 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8456 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
8457 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
8458 tcp->ece || tcp->cwr || !tcp->ack) {
8460 * Currently recognize only the ack control word and
8461 * any other control field being set would result in
8462 * flushing the LRO session
8468 * Allow only one TCP timestamp option. Don't aggregate if
8469 * any other options are detected.
8471 if (tcp->doff != 5 && tcp->doff != 8)
8474 if (tcp->doff == 8) {
8475 ptr = (u8 *)(tcp + 1);
8476 while (*ptr == TCPOPT_NOP)
8478 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8481 /* Ensure timestamp value increases monotonically */
8483 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
8486 /* timestamp echo reply should be non-zero */
8487 if (*((__be32 *)(ptr+6)) == 0)
8495 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8496 struct RxD_t *rxdp, struct s2io_nic *sp)
8499 struct tcphdr *tcph;
8502 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8504 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8505 ip->saddr, ip->daddr);
8510 tcph = (struct tcphdr *)*tcp;
8511 *tcp_len = get_l4_pyld_length(ip, tcph);
8512 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8513 struct lro *l_lro = &sp->lro0_n[i];
8514 if (l_lro->in_use) {
8515 if (check_for_socket_match(l_lro, ip, tcph))
8517 /* Sock pair matched */
8520 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8521 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8522 "0x%x, actual 0x%x\n", __FUNCTION__,
8523 (*lro)->tcp_next_seq,
8526 sp->mac_control.stats_info->
8527 sw_stat.outof_sequence_pkts++;
8532 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8533 ret = 1; /* Aggregate */
8535 ret = 2; /* Flush both */
8541 /* Before searching for available LRO objects,
8542 * check if the pkt is L3/L4 aggregatable. If not
8543 * don't create new LRO session. Just send this
8546 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8550 for (i=0; i<MAX_LRO_SESSIONS; i++) {
8551 struct lro *l_lro = &sp->lro0_n[i];
8552 if (!(l_lro->in_use)) {
8554 ret = 3; /* Begin anew */
8560 if (ret == 0) { /* sessions exceeded */
8561 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8569 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8572 update_L3L4_header(sp, *lro);
8575 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8576 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8577 update_L3L4_header(sp, *lro);
8578 ret = 4; /* Flush the LRO */
8582 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8590 static void clear_lro_session(struct lro *lro)
8592 static u16 lro_struct_size = sizeof(struct lro);
8594 memset(lro, 0, lro_struct_size);
8597 static void queue_rx_frame(struct sk_buff *skb)
8599 struct net_device *dev = skb->dev;
8601 skb->protocol = eth_type_trans(skb, dev);
8603 netif_receive_skb(skb);
8608 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8609 struct sk_buff *skb,
8612 struct sk_buff *first = lro->parent;
8614 first->len += tcp_len;
8615 first->data_len = lro->frags_len;
8616 skb_pull(skb, (skb->len - tcp_len));
8617 if (skb_shinfo(first)->frag_list)
8618 lro->last_frag->next = skb;
8620 skb_shinfo(first)->frag_list = skb;
8621 first->truesize += skb->truesize;
8622 lro->last_frag = skb;
8623 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8628 * s2io_io_error_detected - called when PCI error is detected
8629 * @pdev: Pointer to PCI device
8630 * @state: The current pci connection state
8632 * This function is called after a PCI bus error affecting
8633 * this device has been detected.
8635 static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8636 pci_channel_state_t state)
8638 struct net_device *netdev = pci_get_drvdata(pdev);
8639 struct s2io_nic *sp = netdev->priv;
8641 netif_device_detach(netdev);
8643 if (netif_running(netdev)) {
8644 /* Bring down the card, while avoiding PCI I/O */
8645 do_s2io_card_down(sp, 0);
8647 pci_disable_device(pdev);
8649 return PCI_ERS_RESULT_NEED_RESET;
8653 * s2io_io_slot_reset - called after the pci bus has been reset.
8654 * @pdev: Pointer to PCI device
8656 * Restart the card from scratch, as if from a cold-boot.
8657 * At this point, the card has exprienced a hard reset,
8658 * followed by fixups by BIOS, and has its config space
8659 * set up identically to what it was at cold boot.
8661 static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8663 struct net_device *netdev = pci_get_drvdata(pdev);
8664 struct s2io_nic *sp = netdev->priv;
8666 if (pci_enable_device(pdev)) {
8667 printk(KERN_ERR "s2io: "
8668 "Cannot re-enable PCI device after reset.\n");
8669 return PCI_ERS_RESULT_DISCONNECT;
8672 pci_set_master(pdev);
8675 return PCI_ERS_RESULT_RECOVERED;
8679 * s2io_io_resume - called when traffic can start flowing again.
8680 * @pdev: Pointer to PCI device
8682 * This callback is called when the error recovery driver tells
8683 * us that its OK to resume normal operation.
8685 static void s2io_io_resume(struct pci_dev *pdev)
8687 struct net_device *netdev = pci_get_drvdata(pdev);
8688 struct s2io_nic *sp = netdev->priv;
8690 if (netif_running(netdev)) {
8691 if (s2io_card_up(sp)) {
8692 printk(KERN_ERR "s2io: "
8693 "Can't bring device back up after reset.\n");
8697 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8699 printk(KERN_ERR "s2io: "
8700 "Can't resetore mac addr after reset.\n");
8705 netif_device_attach(netdev);
8706 netif_wake_queue(netdev);