2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_nic.c: Definition of common code for NIC. */
16 #include "aq_pci_func.h"
17 #include "aq_nic_internal.h"
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/timer.h>
22 #include <linux/cpu.h>
24 #include <linux/tcp.h>
27 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
29 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
30 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
33 static u8 rss_key[40] = {
34 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
35 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
36 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
37 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
38 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
41 rss_params->hash_secret_key_size = sizeof(rss_key);
42 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
43 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
45 for (i = rss_params->indirection_table_size; i--;)
46 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
49 /* Fills aq_nic_cfg with valid defaults */
50 static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
52 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
54 cfg->aq_hw_caps = &self->aq_hw_caps;
56 cfg->vecs = AQ_CFG_VECS_DEF;
57 cfg->tcs = AQ_CFG_TCS_DEF;
59 cfg->rxds = AQ_CFG_RXDS_DEF;
60 cfg->txds = AQ_CFG_TXDS_DEF;
62 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
64 cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
65 cfg->itr = cfg->is_interrupt_moderation ?
66 AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
68 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
69 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
70 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
71 cfg->flow_control = AQ_CFG_FC_MODE;
73 cfg->mtu = AQ_CFG_MTU_DEF;
74 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
75 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
77 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
81 aq_nic_rss_init(self, cfg->num_rss_queues);
84 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
85 int aq_nic_cfg_start(struct aq_nic_s *self)
87 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
90 cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
91 cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
94 cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
95 cfg->vecs = min(cfg->vecs, num_online_cpus());
96 /* cfg->vecs should be power of 2 for RSS */
99 else if (cfg->vecs >= 4U)
101 else if (cfg->vecs >= 2U)
106 cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
108 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
109 (self->aq_hw_caps.vecs == 1U) ||
115 cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
116 cfg->hw_features = self->aq_hw_caps.hw_features;
120 static void aq_nic_service_timer_cb(unsigned long param)
122 struct aq_nic_s *self = (struct aq_nic_s *)param;
123 struct net_device *ndev = aq_nic_get_ndev(self);
126 struct aq_hw_link_status_s link_status;
127 struct aq_ring_stats_rx_s stats_rx;
128 struct aq_ring_stats_tx_s stats_tx;
130 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
133 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
137 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
138 self->aq_nic_cfg.is_interrupt_moderation);
140 if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
141 if (link_status.mbps) {
142 aq_utils_obj_set(&self->header.flags,
143 AQ_NIC_FLAG_STARTED);
144 aq_utils_obj_clear(&self->header.flags,
146 netif_carrier_on(self->ndev);
148 netif_carrier_off(self->ndev);
149 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
152 self->link_status = link_status;
155 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
156 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
157 for (i = AQ_DIMOF(self->aq_vec); i--;) {
159 aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
162 ndev->stats.rx_packets = stats_rx.packets;
163 ndev->stats.rx_bytes = stats_rx.bytes;
164 ndev->stats.rx_errors = stats_rx.errors;
165 ndev->stats.tx_packets = stats_tx.packets;
166 ndev->stats.tx_bytes = stats_tx.bytes;
167 ndev->stats.tx_errors = stats_tx.errors;
170 mod_timer(&self->service_timer,
171 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
174 static void aq_nic_polling_timer_cb(unsigned long param)
176 struct aq_nic_s *self = (struct aq_nic_s *)param;
177 struct aq_vec_s *aq_vec = NULL;
180 for (i = 0U, aq_vec = self->aq_vec[0];
181 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
182 aq_vec_isr(i, (void *)aq_vec);
184 mod_timer(&self->polling_timer, jiffies +
185 AQ_CFG_POLLING_TIMER_INTERVAL);
188 static struct net_device *aq_nic_ndev_alloc(void)
190 return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
193 struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
194 const struct ethtool_ops *et_ops,
196 struct aq_pci_func_s *aq_pci_func,
198 const struct aq_hw_ops *aq_hw_ops)
200 struct net_device *ndev = NULL;
201 struct aq_nic_s *self = NULL;
204 ndev = aq_nic_ndev_alloc();
210 self = netdev_priv(ndev);
212 ndev->netdev_ops = ndev_ops;
213 ndev->ethtool_ops = et_ops;
215 SET_NETDEV_DEV(ndev, dev);
217 ndev->if_port = port;
218 ndev->min_mtu = ETH_MIN_MTU;
221 self->aq_pci_func = aq_pci_func;
223 self->aq_hw_ops = *aq_hw_ops;
224 self->port = (u8)port;
226 self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
228 err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
232 aq_nic_cfg_init_defaults(self);
236 aq_nic_free_hot_resources(self);
242 int aq_nic_ndev_register(struct aq_nic_s *self)
251 err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
252 self->aq_nic_cfg.aq_hw_caps,
253 self->ndev->dev_addr);
257 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
259 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
261 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
265 netif_carrier_off(self->ndev);
267 for (i = AQ_CFG_VECS_MAX; i--;)
268 aq_nic_ndev_queue_stop(self, i);
270 err = register_netdev(self->ndev);
278 int aq_nic_ndev_init(struct aq_nic_s *self)
280 struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
281 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
283 self->ndev->hw_features |= aq_hw_caps->hw_features;
284 self->ndev->features = aq_hw_caps->hw_features;
285 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
286 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
291 void aq_nic_ndev_free(struct aq_nic_s *self)
296 if (self->ndev->reg_state == NETREG_REGISTERED)
297 unregister_netdev(self->ndev);
300 self->aq_hw_ops.destroy(self->aq_hw);
302 free_netdev(self->ndev);
307 struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
309 struct aq_nic_s *self = NULL;
316 self = netdev_priv(ndev);
322 if (netif_running(ndev)) {
325 for (i = AQ_CFG_VECS_MAX; i--;)
326 netif_stop_subqueue(ndev, i);
329 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
331 self->aq_vec[self->aq_vecs] =
332 aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
333 if (!self->aq_vec[self->aq_vecs]) {
341 aq_nic_free_hot_resources(self);
347 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
348 struct aq_ring_s *ring)
350 self->aq_ring_tx[idx] = ring;
353 struct device *aq_nic_get_dev(struct aq_nic_s *self)
355 return self->ndev->dev.parent;
358 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
363 int aq_nic_init(struct aq_nic_s *self)
365 struct aq_vec_s *aq_vec = NULL;
369 self->power_state = AQ_HW_POWER_STATE_D0;
370 err = self->aq_hw_ops.hw_reset(self->aq_hw);
374 err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
375 aq_nic_get_ndev(self)->dev_addr);
379 for (i = 0U, aq_vec = self->aq_vec[0];
380 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
381 aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
387 void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
389 netif_start_subqueue(self->ndev, idx);
392 void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
394 netif_stop_subqueue(self->ndev, idx);
397 int aq_nic_start(struct aq_nic_s *self)
399 struct aq_vec_s *aq_vec = NULL;
403 err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
405 self->mc_list.count);
409 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
410 self->packet_filter);
414 for (i = 0U, aq_vec = self->aq_vec[0];
415 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
416 err = aq_vec_start(aq_vec);
421 err = self->aq_hw_ops.hw_start(self->aq_hw);
425 err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
426 self->aq_nic_cfg.is_interrupt_moderation);
429 setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
430 (unsigned long)self);
431 mod_timer(&self->service_timer, jiffies +
432 AQ_CFG_SERVICE_TIMER_INTERVAL);
434 if (self->aq_nic_cfg.is_polling) {
435 setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
436 (unsigned long)self);
437 mod_timer(&self->polling_timer, jiffies +
438 AQ_CFG_POLLING_TIMER_INTERVAL);
440 for (i = 0U, aq_vec = self->aq_vec[0];
441 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
442 err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
443 self->ndev->name, aq_vec,
444 aq_vec_get_affinity_mask(aq_vec));
449 err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
455 for (i = 0U, aq_vec = self->aq_vec[0];
456 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
457 aq_nic_ndev_queue_start(self, i);
459 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
463 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
471 static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
473 struct aq_ring_s *ring)
475 unsigned int ret = 0U;
476 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
477 unsigned int frag_count = 0U;
478 unsigned int dx = ring->sw_tail;
479 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
481 if (unlikely(skb_is_gso(skb))) {
483 dx_buff->len_pkt = skb->len;
484 dx_buff->len_l2 = ETH_HLEN;
485 dx_buff->len_l3 = ip_hdrlen(skb);
486 dx_buff->len_l4 = tcp_hdrlen(skb);
487 dx_buff->mss = skb_shinfo(skb)->gso_size;
488 dx_buff->is_txc = 1U;
491 (ip_hdr(skb)->version == 6) ? 1U : 0U;
493 dx = aq_ring_next_dx(ring, dx);
494 dx_buff = &ring->buff_ring[dx];
499 dx_buff->len = skb_headlen(skb);
500 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
505 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
508 dx_buff->len_pkt = skb->len;
509 dx_buff->is_sop = 1U;
510 dx_buff->is_mapped = 1U;
513 if (skb->ip_summed == CHECKSUM_PARTIAL) {
514 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
517 if (ip_hdr(skb)->version == 4) {
518 dx_buff->is_tcp_cso =
519 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
521 dx_buff->is_udp_cso =
522 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
524 } else if (ip_hdr(skb)->version == 6) {
525 dx_buff->is_tcp_cso =
526 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
528 dx_buff->is_udp_cso =
529 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
534 for (; nr_frags--; ++frag_count) {
535 unsigned int frag_len = 0U;
537 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
539 frag_len = skb_frag_size(frag);
540 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
541 frag_len, DMA_TO_DEVICE);
543 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa)))
546 while (frag_len > AQ_CFG_TX_FRAME_MAX) {
547 dx = aq_ring_next_dx(ring, dx);
548 dx_buff = &ring->buff_ring[dx];
551 dx_buff->len = AQ_CFG_TX_FRAME_MAX;
552 dx_buff->pa = frag_pa;
553 dx_buff->is_mapped = 1U;
555 frag_len -= AQ_CFG_TX_FRAME_MAX;
556 frag_pa += AQ_CFG_TX_FRAME_MAX;
560 dx = aq_ring_next_dx(ring, dx);
561 dx_buff = &ring->buff_ring[dx];
564 dx_buff->len = frag_len;
565 dx_buff->pa = frag_pa;
566 dx_buff->is_mapped = 1U;
570 dx_buff->is_eop = 1U;
575 for (dx = ring->sw_tail;
577 --ret, dx = aq_ring_next_dx(ring, dx)) {
578 dx_buff = &ring->buff_ring[dx];
580 if (!dx_buff->is_txc && dx_buff->pa) {
581 if (unlikely(dx_buff->is_sop)) {
582 dma_unmap_single(aq_nic_get_dev(self),
587 dma_unmap_page(aq_nic_get_dev(self),
599 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
600 __releases(&ring->lock)
601 __acquires(&ring->lock)
603 struct aq_ring_s *ring = NULL;
604 unsigned int frags = 0U;
605 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
606 unsigned int tc = 0U;
607 unsigned int trys = AQ_CFG_LOCK_TRYS;
608 int err = NETDEV_TX_OK;
609 bool is_nic_in_bad_state;
611 frags = skb_shinfo(skb)->nr_frags + 1;
613 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
615 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
616 dev_kfree_skb_any(skb);
620 is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
621 AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
622 (aq_ring_avail_dx(ring) <
623 AQ_CFG_SKB_FRAGS_MAX);
625 if (is_nic_in_bad_state) {
626 aq_nic_ndev_queue_stop(self, ring->idx);
627 err = NETDEV_TX_BUSY;
632 if (spin_trylock(&ring->header.lock)) {
633 frags = aq_nic_map_skb(self, skb, ring);
636 err = self->aq_hw_ops.hw_ring_tx_xmit(
640 if (aq_ring_avail_dx(ring) <
641 AQ_CFG_SKB_FRAGS_MAX + 1)
642 aq_nic_ndev_queue_stop(
646 ++ring->stats.tx.packets;
647 ring->stats.tx.bytes += skb->len;
650 err = NETDEV_TX_BUSY;
653 spin_unlock(&ring->header.lock);
659 err = NETDEV_TX_BUSY;
667 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
671 err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
675 self->packet_filter = flags;
681 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
683 struct netdev_hw_addr *ha = NULL;
686 self->mc_list.count = 0U;
688 netdev_for_each_mc_addr(ha, ndev) {
689 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
690 ++self->mc_list.count;
693 return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
695 self->mc_list.count);
698 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
702 if (new_mtu > self->aq_hw_caps.mtu) {
706 self->aq_nic_cfg.mtu = new_mtu;
712 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
714 return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
717 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
719 return self->link_status.mbps;
722 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
729 err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
730 &self->aq_hw_caps, regs_buff);
738 int aq_nic_get_regs_count(struct aq_nic_s *self)
740 return self->aq_hw_caps.mac_regs_count;
743 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
745 struct aq_vec_s *aq_vec = NULL;
747 unsigned int count = 0U;
750 err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
757 for (i = 0U, aq_vec = self->aq_vec[0];
758 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
760 aq_vec_get_sw_stats(aq_vec, data, &count);
767 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
768 struct ethtool_link_ksettings *cmd)
770 cmd->base.port = PORT_TP;
771 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
772 cmd->base.duplex = DUPLEX_FULL;
773 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
775 ethtool_link_ksettings_zero_link_mode(cmd, supported);
777 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
778 ethtool_link_ksettings_add_link_mode(cmd, supported,
781 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
782 ethtool_link_ksettings_add_link_mode(cmd, supported,
785 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
786 ethtool_link_ksettings_add_link_mode(cmd, supported,
789 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
790 ethtool_link_ksettings_add_link_mode(cmd, supported,
793 if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
794 ethtool_link_ksettings_add_link_mode(cmd, supported,
797 if (self->aq_hw_caps.flow_control)
798 ethtool_link_ksettings_add_link_mode(cmd, supported,
801 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
802 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
804 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
806 if (self->aq_nic_cfg.is_autoneg)
807 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
809 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
810 ethtool_link_ksettings_add_link_mode(cmd, advertising,
813 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
814 ethtool_link_ksettings_add_link_mode(cmd, advertising,
817 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
818 ethtool_link_ksettings_add_link_mode(cmd, advertising,
821 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
822 ethtool_link_ksettings_add_link_mode(cmd, advertising,
825 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
826 ethtool_link_ksettings_add_link_mode(cmd, advertising,
829 if (self->aq_nic_cfg.flow_control)
830 ethtool_link_ksettings_add_link_mode(cmd, advertising,
833 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
836 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
837 const struct ethtool_link_ksettings *cmd)
843 if (cmd->base.autoneg == AUTONEG_ENABLE) {
844 rate = self->aq_hw_caps.link_speed_msk;
845 self->aq_nic_cfg.is_autoneg = true;
847 speed = cmd->base.speed;
851 rate = AQ_NIC_RATE_100M;
855 rate = AQ_NIC_RATE_1G;
859 rate = AQ_NIC_RATE_2GS;
863 rate = AQ_NIC_RATE_5G;
867 rate = AQ_NIC_RATE_10G;
875 if (!(self->aq_hw_caps.link_speed_msk & rate)) {
880 self->aq_nic_cfg.is_autoneg = false;
883 err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
887 self->aq_nic_cfg.link_speed_msk = rate;
893 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
895 return &self->aq_nic_cfg;
898 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
902 self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
907 int aq_nic_stop(struct aq_nic_s *self)
909 struct aq_vec_s *aq_vec = NULL;
912 for (i = 0U, aq_vec = self->aq_vec[0];
913 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
914 aq_nic_ndev_queue_stop(self, i);
916 del_timer_sync(&self->service_timer);
918 self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
920 if (self->aq_nic_cfg.is_polling)
921 del_timer_sync(&self->polling_timer);
923 aq_pci_func_free_irqs(self->aq_pci_func);
925 for (i = 0U, aq_vec = self->aq_vec[0];
926 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
929 return self->aq_hw_ops.hw_stop(self->aq_hw);
932 void aq_nic_deinit(struct aq_nic_s *self)
934 struct aq_vec_s *aq_vec = NULL;
940 for (i = 0U, aq_vec = self->aq_vec[0];
941 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
942 aq_vec_deinit(aq_vec);
944 if (self->power_state == AQ_HW_POWER_STATE_D0) {
945 (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
947 (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
954 void aq_nic_free_hot_resources(struct aq_nic_s *self)
961 for (i = AQ_DIMOF(self->aq_vec); i--;) {
962 if (self->aq_vec[i]) {
963 aq_vec_free(self->aq_vec[i]);
964 self->aq_vec[i] = NULL;
971 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
975 if (!netif_running(self->ndev)) {
980 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
981 self->power_state = AQ_HW_POWER_STATE_D3;
982 netif_device_detach(self->ndev);
983 netif_tx_stop_all_queues(self->ndev);
985 err = aq_nic_stop(self);
991 err = aq_nic_init(self);
995 err = aq_nic_start(self);
999 netif_device_attach(self->ndev);
1000 netif_tx_start_all_queues(self->ndev);