}
}
+static inline void niu_sync_rx_discard_stats(struct niu *np,
+ struct rx_ring_info *rp,
+ const int limit)
+{
+ /* This elaborate scheme is needed for reading the RX discard
+ * counters, as they are only 16-bit and can overflow quickly,
+ * and because the overflow indication bit is not usable as
+ * the counter value does not wrap, but remains at max value
+ * 0xFFFF.
+ *
+ * In theory and in practice counters can be lost in between
+ * reading nr64() and clearing the counter nw64(). For this
+ * reason, the number of counter clearings nw64() is
+ * limited/reduced though the limit parameter.
+ */
+ int rx_channel = rp->rx_channel;
+ u32 misc, wred;
+
+ /* RXMISC (Receive Miscellaneous Discard Count), covers the
+ * following discard events: IPP (Input Port Process),
+ * FFLP/TCAM, Full RCR (Receive Completion Ring) RBR (Receive
+ * Block Ring) prefetch buffer is empty.
+ */
+ misc = nr64(RXMISC(rx_channel));
+ if (unlikely((misc & RXMISC_COUNT) > limit)) {
+ nw64(RXMISC(rx_channel), 0);
+ rp->rx_errors += misc & RXMISC_COUNT;
+
+ if (unlikely(misc & RXMISC_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow "
+ "RXMISC discard\n", rx_channel);
+
+ niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
+ np->dev->name, rx_channel, misc, misc-limit);
+ }
+
+ /* WRED (Weighted Random Early Discard) by hardware */
+ wred = nr64(RED_DIS_CNT(rx_channel));
+ if (unlikely((wred & RED_DIS_CNT_COUNT) > limit)) {
+ nw64(RED_DIS_CNT(rx_channel), 0);
+ rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
+
+ if (unlikely(wred & RED_DIS_CNT_OFLOW))
+ dev_err(np->device, "rx-%d: Counter overflow "
+ "WRED discard\n", rx_channel);
+
+ niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
+ np->dev->name, rx_channel, wred, wred-limit);
+ }
+}
+
static int niu_rx_work(struct niu *np, struct rx_ring_info *rp, int budget)
{
int qlen, rcr_done = 0, work_done = 0;
nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat);
+ /* Only sync discards stats when qlen indicate potential for drops */
+ if (qlen > 10)
+ niu_sync_rx_discard_stats(np, rp, 0x7FFF);
+
return work_done;
}
work_done = niu_poll_core(np, lp, budget);
if (work_done < budget) {
- netif_rx_complete(np->dev, napi);
+ netif_rx_complete(napi);
niu_ldg_rearm(np, lp, 1);
}
return work_done;
static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
u64 v0, u64 v1, u64 v2)
{
- if (likely(netif_rx_schedule_prep(np->dev, &lp->napi))) {
+ if (likely(netif_rx_schedule_prep(&lp->napi))) {
lp->v0 = v0;
lp->v1 = v1;
lp->v2 = v2;
__niu_fastpath_interrupt(np, lp->ldg_num, v0);
- __netif_rx_schedule(np->dev, &lp->napi);
+ __netif_rx_schedule(&lp->napi);
}
}
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
+ niu_sync_rx_discard_stats(np, rp, 0);
+
pkts += rp->rx_packets;
bytes += rp->rx_bytes;
dropped += rp->rx_dropped;
for (i = 0; i < np->num_rx_rings; i++) {
struct rx_ring_info *rp = &np->rx_rings[i];
+ niu_sync_rx_discard_stats(np, rp, 0);
+
data[0] = rp->rx_channel;
data[1] = rp->rx_packets;
data[2] = rp->rx_bytes;
static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction)
{
- return dma_unmap_page(dev, dma_address, size, direction);
+ dma_unmap_page(dev, dma_address, size, direction);
}
static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,