num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
if (num_fw_handles) {
- arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
+ arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
if (!arr)
goto out; /* Keep the existing array */
} else
}
if (num_registrations) {
- arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
+ arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
if (!arr)
goto out; /* Keep the existing array */
} else
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
- u64 hret, rx_packets, tx_packets;
+ u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i;
memset(stats, 0, sizeof(*stats));
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0;
- for (i = 0; i < port->num_def_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets;
+ rx_bytes += port->port_res[i].rx_bytes;
+ }
tx_packets = 0;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
+ tx_bytes += port->port_res[i].tx_bytes;
+ }
stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
- stats->rx_bytes = cb2->rxo;
- stats->tx_bytes = cb2->txo;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
out_herr:
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
+ ehea_info("Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
struct net_device *dev = pr->port->netdev;
int i;
- for (i = 0; i < pr->rq1_skba.len; i++) {
+ if (nr_rq1a > pr->rq1_skba.len) {
+ ehea_error("NR_RQ1A bigger than skb array len\n");
+ return;
+ }
+
+ for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i])
+ if (!skb_arr_rq1[i]) {
+ ehea_info("No enough memory to allocate skb array\n");
break;
+ }
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, nr_rq1a);
+ ehea_update_rq1a(pr->qp, i);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
pr->port->vgrp);
- if (use_lro) {
+ if (skb->dev->features & NETIF_F_LRO) {
if (vlan_extracted)
lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
pr->port->vgrp,
int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3;
+ u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb)
+ if (!skb) {
+ ehea_info("Not enough memory to allocate skb\n");
break;
+ }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
processed_rq3++;
}
+ processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb);
} else {
pr->p_stats.poll_receive_errors++;
}
cqe = ehea_poll_rq1(qp, &wqe_index);
}
- if (use_lro)
+ if (dev->features & NETIF_F_LRO)
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
+ pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2);
struct ehea_port_res *pr = &port->port_res[i];
pr->sq_restart_flag = 0;
}
+ wake_up(&port->restart_wq);
}
static void check_sqs(struct ehea_port *port)
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
+ int ret;
k = 0;
swqe = ehea_get_swqe(pr->qp, &swqe_index);
memset(swqe, 0, SWQE_HEADER_SIZE);
ehea_post_swqe(pr->qp, swqe);
- while (pr->sq_restart_flag == 0) {
- msleep(5);
- if (++k == 100) {
- ehea_error("HW/SW queues out of sync");
- ehea_schedule_port_reset(pr->port);
- return;
- }
+ ret = wait_event_timeout(port->restart_wq,
+ pr->sq_restart_flag == 0,
+ msecs_to_jiffies(100));
+
+ if (!ret) {
+ ehea_error("HW/SW queues out of sync");
+ ehea_schedule_port_reset(pr->port);
+ return;
}
}
-
- return;
}
pr->queue_stopped = 0;
}
spin_unlock_irqrestore(&pr->netif_queue, flags);
+ wake_up(&pr->port->swqe_avail_wq);
return cqe;
}
enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO;
+ u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+ tx_bytes = pr->tx_bytes;
+ tx_packets = pr->tx_packets;
+ rx_bytes = pr->rx_bytes;
+ rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res));
+ pr->tx_bytes = rx_bytes;
+ pr->tx_packets = tx_packets;
+ pr->rx_bytes = rx_bytes;
+ pr->rx_packets = rx_packets;
+
pr->port = port;
spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue);
struct hcp_ehea_port_cb7 *cb7;
u64 hret;
- if ((enable && port->promisc) || (!enable && !port->promisc))
+ if (enable == port->promisc)
return;
cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
+ if (vlan_tx_tag_present(skb)) {
+ swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+ swqe->vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ pr->tx_packets++;
+ pr->tx_bytes += skb->len;
+
if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter;
}
pr->swqe_id_counter += 1;
- if (port->vgrp && vlan_tx_tag_present(skb)) {
- swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
- swqe->vlan_tag = vlan_tx_tag_get(skb);
- }
-
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, 512, "swqe");
}
ehea_post_swqe(pr->qp, swqe);
- pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);
netif_start_queue(dev);
}
+ init_waitqueue_head(&port->swqe_avail_wq);
+ init_waitqueue_head(&port->restart_wq);
+
mutex_unlock(&port->port_lock);
return ret;
for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
struct ehea_port_res *pr = &port->port_res[i];
int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
- int k = 0;
- while (atomic_read(&pr->swqe_avail) < swqe_max) {
- msleep(5);
- if (++k == 20) {
- ehea_error("WARNING: sq not flushed completely");
- break;
- }
+ int ret;
+
+ ret = wait_event_timeout(port->swqe_avail_wq,
+ atomic_read(&pr->swqe_avail) >= swqe_max,
+ msecs_to_jiffies(100));
+
+ if (!ret) {
+ ehea_error("WARNING: sq not flushed completely");
+ break;
}
}
}
| NETIF_F_LLTX;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
+ if (use_lro)
+ dev->features |= NETIF_F_LRO;
+
INIT_WORK(&port->reset_task, ehea_reset_port);
ret = register_netdev(dev);
if (ret)
ehea_info("failed registering memory remove notifier");
- ret = crash_shutdown_register(&ehea_crash_handler);
+ ret = crash_shutdown_register(ehea_crash_handler);
if (ret)
ehea_info("failed registering crash handler");
out2:
unregister_memory_notifier(&ehea_mem_nb);
unregister_reboot_notifier(&ehea_reboot_nb);
- crash_shutdown_unregister(&ehea_crash_handler);
+ crash_shutdown_unregister(ehea_crash_handler);
out:
return ret;
}
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
unregister_reboot_notifier(&ehea_reboot_nb);
- ret = crash_shutdown_unregister(&ehea_crash_handler);
+ ret = crash_shutdown_unregister(ehea_crash_handler);
if (ret)
ehea_info("failed unregistering crash handler");
unregister_memory_notifier(&ehea_mem_nb);