2 * aQuantia Corporation Network Driver
3 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
10 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
11 * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
19 #include <linux/netdevice.h>
22 struct aq_obj_s header;
23 struct aq_hw_ops *aq_hw_ops;
24 struct aq_hw_s *aq_hw;
25 struct aq_nic_s *aq_nic;
26 unsigned int tx_rings;
27 unsigned int rx_rings;
28 struct aq_ring_param_s aq_ring_param;
29 struct napi_struct napi;
30 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
33 #define AQ_VEC_TX_ID 0
34 #define AQ_VEC_RX_ID 1
36 static int aq_vec_poll(struct napi_struct *napi, int budget)
37 __releases(&self->lock)
38 __acquires(&self->lock)
40 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
41 struct aq_ring_s *ring = NULL;
45 unsigned int sw_tail_old = 0U;
46 bool was_tx_cleaned = false;
50 } else if (spin_trylock(&self->header.lock)) {
51 for (i = 0U, ring = self->ring[0];
52 self->tx_rings > i; ++i, ring = self->ring[i]) {
53 if (self->aq_hw_ops->hw_ring_tx_head_update) {
54 err = self->aq_hw_ops->hw_ring_tx_head_update(
61 if (ring[AQ_VEC_TX_ID].sw_head !=
62 ring[AQ_VEC_TX_ID].hw_head) {
63 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
65 if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
66 AQ_CFG_SKB_FRAGS_MAX) {
67 aq_nic_ndev_queue_start(self->aq_nic,
68 ring[AQ_VEC_TX_ID].idx);
70 was_tx_cleaned = true;
73 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
78 if (ring[AQ_VEC_RX_ID].sw_head !=
79 ring[AQ_VEC_RX_ID].hw_head) {
80 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
86 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
88 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
92 err = self->aq_hw_ops->hw_ring_rx_fill(
94 &ring[AQ_VEC_RX_ID], sw_tail_old);
103 if (work_done < budget) {
104 napi_complete_done(napi, work_done);
105 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
106 1U << self->aq_ring_param.vec_idx);
110 spin_unlock(&self->header.lock);
116 struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
117 struct aq_nic_cfg_s *aq_nic_cfg)
119 struct aq_vec_s *self = NULL;
120 struct aq_ring_s *ring = NULL;
124 self = kzalloc(sizeof(*self), GFP_KERNEL);
130 self->aq_nic = aq_nic;
131 self->aq_ring_param.vec_idx = idx;
132 self->aq_ring_param.cpu =
133 idx + aq_nic_cfg->aq_rss.base_cpu_number;
135 cpumask_set_cpu(self->aq_ring_param.cpu,
136 &self->aq_ring_param.affinity_mask);
141 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
142 aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
144 for (i = 0; i < aq_nic_cfg->tcs; ++i) {
145 unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
147 self->aq_ring_param.vec_idx);
149 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
150 idx_ring, aq_nic_cfg);
158 aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
160 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
161 idx_ring, aq_nic_cfg);
178 int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
179 struct aq_hw_s *aq_hw)
181 struct aq_ring_s *ring = NULL;
185 self->aq_hw_ops = aq_hw_ops;
188 spin_lock_init(&self->header.lock);
190 for (i = 0U, ring = self->ring[0];
191 self->tx_rings > i; ++i, ring = self->ring[i]) {
192 err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
196 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
198 &self->aq_ring_param);
202 err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
206 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
208 &self->aq_ring_param);
212 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
216 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
217 &ring[AQ_VEC_RX_ID], 0U);
226 int aq_vec_start(struct aq_vec_s *self)
228 struct aq_ring_s *ring = NULL;
232 for (i = 0U, ring = self->ring[0];
233 self->tx_rings > i; ++i, ring = self->ring[i]) {
234 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
235 &ring[AQ_VEC_TX_ID]);
239 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
240 &ring[AQ_VEC_RX_ID]);
245 napi_enable(&self->napi);
251 void aq_vec_stop(struct aq_vec_s *self)
253 struct aq_ring_s *ring = NULL;
256 for (i = 0U, ring = self->ring[0];
257 self->tx_rings > i; ++i, ring = self->ring[i]) {
258 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
259 &ring[AQ_VEC_TX_ID]);
261 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
262 &ring[AQ_VEC_RX_ID]);
265 napi_disable(&self->napi);
268 void aq_vec_deinit(struct aq_vec_s *self)
270 struct aq_ring_s *ring = NULL;
276 for (i = 0U, ring = self->ring[0];
277 self->tx_rings > i; ++i, ring = self->ring[i]) {
278 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
279 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
284 void aq_vec_free(struct aq_vec_s *self)
286 struct aq_ring_s *ring = NULL;
292 for (i = 0U, ring = self->ring[0];
293 self->tx_rings > i; ++i, ring = self->ring[i]) {
294 aq_ring_free(&ring[AQ_VEC_TX_ID]);
295 aq_ring_free(&ring[AQ_VEC_RX_ID]);
298 netif_napi_del(&self->napi);
305 irqreturn_t aq_vec_isr(int irq, void *private)
307 struct aq_vec_s *self = private;
314 napi_schedule(&self->napi);
317 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
320 irqreturn_t aq_vec_isr_legacy(int irq, void *private)
322 struct aq_vec_s *self = private;
330 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
335 self->aq_hw_ops->hw_irq_disable(self->aq_hw,
336 1U << self->aq_ring_param.vec_idx);
337 napi_schedule(&self->napi);
339 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
344 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
347 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
349 return &self->aq_ring_param.affinity_mask;
352 void aq_vec_add_stats(struct aq_vec_s *self,
353 struct aq_ring_stats_rx_s *stats_rx,
354 struct aq_ring_stats_tx_s *stats_tx)
356 struct aq_ring_s *ring = NULL;
359 for (r = 0U, ring = self->ring[0];
360 self->tx_rings > r; ++r, ring = self->ring[r]) {
361 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
362 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
364 stats_rx->packets += rx->packets;
365 stats_rx->bytes += rx->bytes;
366 stats_rx->errors += rx->errors;
367 stats_rx->jumbo_packets += rx->jumbo_packets;
368 stats_rx->lro_packets += rx->lro_packets;
370 stats_tx->packets += tx->packets;
371 stats_tx->bytes += tx->bytes;
372 stats_tx->errors += tx->errors;
376 int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
378 unsigned int count = 0U;
379 struct aq_ring_stats_rx_s stats_rx;
380 struct aq_ring_stats_tx_s stats_tx;
382 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
383 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
384 aq_vec_add_stats(self, &stats_rx, &stats_tx);
386 data[count] += stats_rx.packets;
387 data[++count] += stats_tx.packets;
388 data[++count] += stats_rx.jumbo_packets;
389 data[++count] += stats_rx.lro_packets;
390 data[++count] += stats_rx.errors;