2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_nic.c: Definition of common code for NIC. */
16 #include "aq_pci_func.h"
17 #include "aq_nic_internal.h"
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/timer.h>
22 #include <linux/cpu.h>
24 #include <linux/tcp.h>
27 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
30 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
33 static u8 rss_key[40] = {
34 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
35 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
36 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
37 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
38 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
41 rss_params->hash_secret_key_size = sizeof(rss_key);
42 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
43 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
45 for (i = rss_params->indirection_table_size; i--;)
46 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
49 /* Fills aq_nic_cfg with valid defaults */
50 static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
52 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
54 cfg->aq_hw_caps = &self->aq_hw_caps;
56 cfg->vecs = AQ_CFG_VECS_DEF;
57 cfg->tcs = AQ_CFG_TCS_DEF;
59 cfg->rxds = AQ_CFG_RXDS_DEF;
60 cfg->txds = AQ_CFG_TXDS_DEF;
62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
65 cfg->itr = cfg->is_interrupt_moderation ?
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
68 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
70 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
71 cfg->flow_control = AQ_CFG_FC_MODE;
73 cfg->mtu = AQ_CFG_MTU_DEF;
74 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
75 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
77 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
81 aq_nic_rss_init(self, cfg->num_rss_queues);
84 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
85 int aq_nic_cfg_start(struct aq_nic_s *self)
87 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
90 cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
91 cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
94 cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
95 cfg->vecs = min(cfg->vecs, num_online_cpus());
96 /* cfg->vecs should be power of 2 for RSS */
99 else if (cfg->vecs >= 4U)
101 else if (cfg->vecs >= 2U)
106 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
108 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
110 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
111 (self->aq_hw_caps.vecs == 1U) ||
117 cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
118 cfg->hw_features = self->aq_hw_caps.hw_features;
122 static void aq_nic_service_timer_cb(unsigned long param)
124 struct aq_nic_s *self = (struct aq_nic_s *)param;
125 struct net_device *ndev = aq_nic_get_ndev(self);
128 struct aq_ring_stats_rx_s stats_rx;
129 struct aq_ring_stats_tx_s stats_tx;
131 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
134 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
138 self->link_status = self->aq_hw->aq_link_status;
140 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
141 self->aq_nic_cfg.is_interrupt_moderation);
143 if (self->link_status.mbps) {
144 aq_utils_obj_set(&self->header.flags,
145 AQ_NIC_FLAG_STARTED);
146 aq_utils_obj_clear(&self->header.flags,
148 netif_carrier_on(self->ndev);
150 netif_carrier_off(self->ndev);
151 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
154 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
155 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
156 for (i = AQ_DIMOF(self->aq_vec); i--;) {
158 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
161 ndev->stats.rx_packets = stats_rx.packets;
162 ndev->stats.rx_bytes = stats_rx.bytes;
163 ndev->stats.rx_errors = stats_rx.errors;
164 ndev->stats.tx_packets = stats_tx.packets;
165 ndev->stats.tx_bytes = stats_tx.bytes;
166 ndev->stats.tx_errors = stats_tx.errors;
169 mod_timer(&self->service_timer,
170 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
173 static void aq_nic_polling_timer_cb(unsigned long param)
175 struct aq_nic_s *self = (struct aq_nic_s *)param;
176 struct aq_vec_s *aq_vec = NULL;
179 for (i = 0U, aq_vec = self->aq_vec[0];
180 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
181 aq_vec_isr(i, (void *)aq_vec);
183 mod_timer(&self->polling_timer, jiffies +
184 AQ_CFG_POLLING_TIMER_INTERVAL);
187 static struct net_device *aq_nic_ndev_alloc(void)
189 return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
192 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
193 const struct ethtool_ops *et_ops,
195 struct aq_pci_func_s *aq_pci_func,
197 const struct aq_hw_ops *aq_hw_ops)
199 struct net_device *ndev = NULL;
200 struct aq_nic_s *self = NULL;
203 ndev = aq_nic_ndev_alloc();
209 self = netdev_priv(ndev);
211 ndev->netdev_ops = ndev_ops;
212 ndev->ethtool_ops = et_ops;
214 SET_NETDEV_DEV(ndev, dev);
216 ndev->if_port = port;
217 ndev->min_mtu = ETH_MIN_MTU;
220 self->aq_pci_func = aq_pci_func;
222 self->aq_hw_ops = *aq_hw_ops;
223 self->port = (u8)port;
225 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
227 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
231 aq_nic_cfg_init_defaults(self);
235 aq_nic_free_hot_resources(self);
241 int aq_nic_ndev_register(struct aq_nic_s *self)
250 err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
251 self->aq_nic_cfg.aq_hw_caps,
252 self->ndev->dev_addr);
256 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
258 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
260 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
264 netif_carrier_off(self->ndev);
266 for (i = AQ_CFG_VECS_MAX; i--;)
267 aq_nic_ndev_queue_stop(self, i);
269 err = register_netdev(self->ndev);
277 int aq_nic_ndev_init(struct aq_nic_s *self)
279 struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
280 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
282 self->ndev->hw_features |= aq_hw_caps->hw_features;
283 self->ndev->features = aq_hw_caps->hw_features;
284 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
285 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
290 void aq_nic_ndev_free(struct aq_nic_s *self)
295 if (self->ndev->reg_state == NETREG_REGISTERED)
296 unregister_netdev(self->ndev);
299 self->aq_hw_ops.destroy(self->aq_hw);
301 free_netdev(self->ndev);
306 struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
308 struct aq_nic_s *self = NULL;
315 self = netdev_priv(ndev);
321 if (netif_running(ndev)) {
324 for (i = AQ_CFG_VECS_MAX; i--;)
325 netif_stop_subqueue(ndev, i);
328 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
330 self->aq_vec[self->aq_vecs] =
331 aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
332 if (!self->aq_vec[self->aq_vecs]) {
340 aq_nic_free_hot_resources(self);
346 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
347 struct aq_ring_s *ring)
349 self->aq_ring_tx[idx] = ring;
352 struct device *aq_nic_get_dev(struct aq_nic_s *self)
354 return self->ndev->dev.parent;
357 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
362 int aq_nic_init(struct aq_nic_s *self)
364 struct aq_vec_s *aq_vec = NULL;
368 self->power_state = AQ_HW_POWER_STATE_D0;
369 err = self->aq_hw_ops.hw_reset(self->aq_hw);
373 err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
374 aq_nic_get_ndev(self)->dev_addr);
378 for (i = 0U, aq_vec = self->aq_vec[0];
379 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
380 aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
386 void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
388 netif_start_subqueue(self->ndev, idx);
391 void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
393 netif_stop_subqueue(self->ndev, idx);
396 int aq_nic_start(struct aq_nic_s *self)
398 struct aq_vec_s *aq_vec = NULL;
402 err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
404 self->mc_list.count);
408 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
409 self->packet_filter);
413 for (i = 0U, aq_vec = self->aq_vec[0];
414 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
415 err = aq_vec_start(aq_vec);
420 err = self->aq_hw_ops.hw_start(self->aq_hw);
424 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
425 self->aq_nic_cfg.is_interrupt_moderation);
428 setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
429 (unsigned long)self);
430 mod_timer(&self->service_timer, jiffies +
431 AQ_CFG_SERVICE_TIMER_INTERVAL);
433 if (self->aq_nic_cfg.is_polling) {
434 setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
435 (unsigned long)self);
436 mod_timer(&self->polling_timer, jiffies +
437 AQ_CFG_POLLING_TIMER_INTERVAL);
439 for (i = 0U, aq_vec = self->aq_vec[0];
440 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
441 err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
442 self->ndev->name, aq_vec,
443 aq_vec_get_affinity_mask(aq_vec));
448 err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
454 for (i = 0U, aq_vec = self->aq_vec[0];
455 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
456 aq_nic_ndev_queue_start(self, i);
458 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
462 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
470 static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
472 struct aq_ring_s *ring)
474 unsigned int ret = 0U;
475 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
476 unsigned int frag_count = 0U;
477 unsigned int dx = ring->sw_tail;
478 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
480 if (unlikely(skb_is_gso(skb))) {
482 dx_buff->len_pkt = skb->len;
483 dx_buff->len_l2 = ETH_HLEN;
484 dx_buff->len_l3 = ip_hdrlen(skb);
485 dx_buff->len_l4 = tcp_hdrlen(skb);
486 dx_buff->mss = skb_shinfo(skb)->gso_size;
487 dx_buff->is_txc = 1U;
490 (ip_hdr(skb)->version == 6) ? 1U : 0U;
492 dx = aq_ring_next_dx(ring, dx);
493 dx_buff = &ring->buff_ring[dx];
498 dx_buff->len = skb_headlen(skb);
499 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
504 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
507 dx_buff->len_pkt = skb->len;
508 dx_buff->is_sop = 1U;
509 dx_buff->is_mapped = 1U;
512 if (skb->ip_summed == CHECKSUM_PARTIAL) {
513 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
516 if (ip_hdr(skb)->version == 4) {
517 dx_buff->is_tcp_cso =
518 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
520 dx_buff->is_udp_cso =
521 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
523 } else if (ip_hdr(skb)->version == 6) {
524 dx_buff->is_tcp_cso =
525 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
527 dx_buff->is_udp_cso =
528 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
533 for (; nr_frags--; ++frag_count) {
534 unsigned int frag_len = 0U;
536 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
538 frag_len = skb_frag_size(frag);
539 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
540 frag_len, DMA_TO_DEVICE);
542 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
545 while (frag_len > AQ_CFG_TX_FRAME_MAX) {
546 dx = aq_ring_next_dx(ring, dx);
547 dx_buff = &ring->buff_ring[dx];
550 dx_buff->len = AQ_CFG_TX_FRAME_MAX;
551 dx_buff->pa = frag_pa;
552 dx_buff->is_mapped = 1U;
554 frag_len -= AQ_CFG_TX_FRAME_MAX;
555 frag_pa += AQ_CFG_TX_FRAME_MAX;
559 dx = aq_ring_next_dx(ring, dx);
560 dx_buff = &ring->buff_ring[dx];
563 dx_buff->len = frag_len;
564 dx_buff->pa = frag_pa;
565 dx_buff->is_mapped = 1U;
569 dx_buff->is_eop = 1U;
574 for (dx = ring->sw_tail;
576 --ret, dx = aq_ring_next_dx(ring, dx)) {
577 dx_buff = &ring->buff_ring[dx];
579 if (!dx_buff->is_txc && dx_buff->pa) {
580 if (unlikely(dx_buff->is_sop)) {
581 dma_unmap_single(aq_nic_get_dev(self),
586 dma_unmap_page(aq_nic_get_dev(self),
598 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
600 struct aq_ring_s *ring = NULL;
601 unsigned int frags = 0U;
602 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
603 unsigned int tc = 0U;
604 int err = NETDEV_TX_OK;
605 bool is_nic_in_bad_state;
607 frags = skb_shinfo(skb)->nr_frags + 1;
609 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
611 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
612 dev_kfree_skb_any(skb);
616 is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
617 AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
618 (aq_ring_avail_dx(ring) <
619 AQ_CFG_SKB_FRAGS_MAX);
621 if (is_nic_in_bad_state) {
622 aq_nic_ndev_queue_stop(self, ring->idx);
623 err = NETDEV_TX_BUSY;
627 frags = aq_nic_map_skb(self, skb, ring);
630 err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
634 if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
635 aq_nic_ndev_queue_stop(self, ring->idx);
637 ++ring->stats.tx.packets;
638 ring->stats.tx.bytes += skb->len;
641 err = NETDEV_TX_BUSY;
648 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
652 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
656 self->packet_filter = flags;
662 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
664 struct netdev_hw_addr *ha = NULL;
667 self->mc_list.count = 0U;
669 netdev_for_each_mc_addr(ha, ndev) {
670 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
671 ++self->mc_list.count;
674 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
676 self->mc_list.count);
679 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
683 if (new_mtu > self->aq_hw_caps.mtu) {
687 self->aq_nic_cfg.mtu = new_mtu;
693 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
695 return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
698 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
700 return self->link_status.mbps;
703 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
710 err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
711 &self->aq_hw_caps, regs_buff);
719 int aq_nic_get_regs_count(struct aq_nic_s *self)
721 return self->aq_hw_caps.mac_regs_count;
724 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
726 struct aq_vec_s *aq_vec = NULL;
728 unsigned int count = 0U;
731 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
738 for (i = 0U, aq_vec = self->aq_vec[0];
739 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
741 aq_vec_get_sw_stats(aq_vec, data, &count);
748 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
749 struct ethtool_link_ksettings *cmd)
751 cmd->base.port = PORT_TP;
752 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
753 cmd->base.duplex = DUPLEX_FULL;
754 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
756 ethtool_link_ksettings_zero_link_mode(cmd, supported);
758 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
759 ethtool_link_ksettings_add_link_mode(cmd, supported,
762 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
763 ethtool_link_ksettings_add_link_mode(cmd, supported,
766 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
767 ethtool_link_ksettings_add_link_mode(cmd, supported,
770 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
771 ethtool_link_ksettings_add_link_mode(cmd, supported,
774 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
775 ethtool_link_ksettings_add_link_mode(cmd, supported,
778 if (self->aq_hw_caps.flow_control)
779 ethtool_link_ksettings_add_link_mode(cmd, supported,
782 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
783 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
785 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
787 if (self->aq_nic_cfg.is_autoneg)
788 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
790 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
791 ethtool_link_ksettings_add_link_mode(cmd, advertising,
794 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
795 ethtool_link_ksettings_add_link_mode(cmd, advertising,
798 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
799 ethtool_link_ksettings_add_link_mode(cmd, advertising,
802 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
803 ethtool_link_ksettings_add_link_mode(cmd, advertising,
806 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
807 ethtool_link_ksettings_add_link_mode(cmd, advertising,
810 if (self->aq_nic_cfg.flow_control)
811 ethtool_link_ksettings_add_link_mode(cmd, advertising,
814 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
817 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
818 const struct ethtool_link_ksettings *cmd)
824 if (cmd->base.autoneg == AUTONEG_ENABLE) {
825 rate = self->aq_hw_caps.link_speed_msk;
826 self->aq_nic_cfg.is_autoneg = true;
828 speed = cmd->base.speed;
832 rate = AQ_NIC_RATE_100M;
836 rate = AQ_NIC_RATE_1G;
840 rate = AQ_NIC_RATE_2GS;
844 rate = AQ_NIC_RATE_5G;
848 rate = AQ_NIC_RATE_10G;
856 if (!(self->aq_hw_caps.link_speed_msk & rate)) {
861 self->aq_nic_cfg.is_autoneg = false;
864 err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
868 self->aq_nic_cfg.link_speed_msk = rate;
874 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
876 return &self->aq_nic_cfg;
879 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
883 self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
888 int aq_nic_stop(struct aq_nic_s *self)
890 struct aq_vec_s *aq_vec = NULL;
893 for (i = 0U, aq_vec = self->aq_vec[0];
894 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
895 aq_nic_ndev_queue_stop(self, i);
897 del_timer_sync(&self->service_timer);
899 self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
901 if (self->aq_nic_cfg.is_polling)
902 del_timer_sync(&self->polling_timer);
904 aq_pci_func_free_irqs(self->aq_pci_func);
906 for (i = 0U, aq_vec = self->aq_vec[0];
907 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
910 return self->aq_hw_ops.hw_stop(self->aq_hw);
913 void aq_nic_deinit(struct aq_nic_s *self)
915 struct aq_vec_s *aq_vec = NULL;
921 for (i = 0U, aq_vec = self->aq_vec[0];
922 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
923 aq_vec_deinit(aq_vec);
925 if (self->power_state == AQ_HW_POWER_STATE_D0) {
926 (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
928 (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
935 void aq_nic_free_hot_resources(struct aq_nic_s *self)
942 for (i = AQ_DIMOF(self->aq_vec); i--;) {
943 if (self->aq_vec[i]) {
944 aq_vec_free(self->aq_vec[i]);
945 self->aq_vec[i] = NULL;
952 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
956 if (!netif_running(self->ndev)) {
961 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
962 self->power_state = AQ_HW_POWER_STATE_D3;
963 netif_device_detach(self->ndev);
964 netif_tx_stop_all_queues(self->ndev);
966 err = aq_nic_stop(self);
972 err = aq_nic_init(self);
976 err = aq_nic_start(self);
980 netif_device_attach(self->ndev);
981 netif_tx_start_all_queues(self->ndev);