2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/if_ether.h>
37 #include <linux/notifier.h>
38 #include <linux/reboot.h>
39 #include <linux/memory.h>
40 #include <asm/kexec.h>
41 #include <linux/mutex.h>
47 #include "ehea_phyp.h"
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
52 MODULE_DESCRIPTION("IBM eServer HEA Driver");
53 MODULE_VERSION(DRV_VERSION);
56 static int msg_level = -1;
57 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
58 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
59 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
60 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
63 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
64 static int num_tx_qps = EHEA_NUM_TX_QP;
65 static int prop_carrier_state;
67 module_param(msg_level, int, 0);
68 module_param(rq1_entries, int, 0);
69 module_param(rq2_entries, int, 0);
70 module_param(rq3_entries, int, 0);
71 module_param(sq_entries, int, 0);
72 module_param(prop_carrier_state, int, 0);
73 module_param(use_mcs, int, 0);
74 module_param(use_lro, int, 0);
75 module_param(lro_max_aggr, int, 0);
76 module_param(num_tx_qps, int, 0);
78 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
79 MODULE_PARM_DESC(msg_level, "msg_level");
80 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
82 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
85 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
88 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
91 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
94 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
96 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
97 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
98 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
101 static int port_name_cnt;
102 static LIST_HEAD(adapter_list);
103 static unsigned long ehea_driver_flags;
104 struct work_struct ehea_rereg_mr_task;
105 static DEFINE_MUTEX(dlpar_mem_lock);
106 struct ehea_fw_handle_array ehea_fw_handles;
107 struct ehea_bcmc_reg_array ehea_bcmc_regs;
110 static int __devinit ehea_probe_adapter(struct of_device *dev,
111 const struct of_device_id *id);
113 static int __devexit ehea_remove(struct of_device *dev);
115 static struct of_device_id ehea_device_table[] = {
118 .compatible = "IBM,lhea",
122 MODULE_DEVICE_TABLE(of, ehea_device_table);
124 static struct of_platform_driver ehea_driver = {
126 .match_table = ehea_device_table,
127 .probe = ehea_probe_adapter,
128 .remove = ehea_remove,
131 void ehea_dump(void *adr, int len, char *msg)
134 unsigned char *deb = adr;
135 for (x = 0; x < len; x += 16) {
136 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
137 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
142 void ehea_schedule_port_reset(struct ehea_port *port)
144 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
145 schedule_work(&port->reset_task);
148 static void ehea_update_firmware_handles(void)
150 struct ehea_fw_handle_entry *arr = NULL;
151 struct ehea_adapter *adapter;
152 int num_adapters = 0;
156 int num_fw_handles, k, l;
158 /* Determine number of handles */
159 mutex_lock(&ehea_fw_handles.lock);
161 list_for_each_entry(adapter, &adapter_list, list) {
164 for (k = 0; k < EHEA_MAX_PORTS; k++) {
165 struct ehea_port *port = adapter->port[k];
167 if (!port || (port->state != EHEA_PORT_UP))
171 num_portres += port->num_def_qps + port->num_add_tx_qps;
175 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
176 num_ports * EHEA_NUM_PORT_FW_HANDLES +
177 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
179 if (num_fw_handles) {
180 arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
182 goto out; /* Keep the existing array */
186 list_for_each_entry(adapter, &adapter_list, list) {
187 if (num_adapters == 0)
190 for (k = 0; k < EHEA_MAX_PORTS; k++) {
191 struct ehea_port *port = adapter->port[k];
193 if (!port || (port->state != EHEA_PORT_UP) ||
198 l < port->num_def_qps + port->num_add_tx_qps;
200 struct ehea_port_res *pr = &port->port_res[l];
202 arr[i].adh = adapter->handle;
203 arr[i++].fwh = pr->qp->fw_handle;
204 arr[i].adh = adapter->handle;
205 arr[i++].fwh = pr->send_cq->fw_handle;
206 arr[i].adh = adapter->handle;
207 arr[i++].fwh = pr->recv_cq->fw_handle;
208 arr[i].adh = adapter->handle;
209 arr[i++].fwh = pr->eq->fw_handle;
210 arr[i].adh = adapter->handle;
211 arr[i++].fwh = pr->send_mr.handle;
212 arr[i].adh = adapter->handle;
213 arr[i++].fwh = pr->recv_mr.handle;
215 arr[i].adh = adapter->handle;
216 arr[i++].fwh = port->qp_eq->fw_handle;
220 arr[i].adh = adapter->handle;
221 arr[i++].fwh = adapter->neq->fw_handle;
223 if (adapter->mr.handle) {
224 arr[i].adh = adapter->handle;
225 arr[i++].fwh = adapter->mr.handle;
231 kfree(ehea_fw_handles.arr);
232 ehea_fw_handles.arr = arr;
233 ehea_fw_handles.num_entries = i;
235 mutex_unlock(&ehea_fw_handles.lock);
238 static void ehea_update_bcmc_registrations(void)
241 struct ehea_bcmc_reg_entry *arr = NULL;
242 struct ehea_adapter *adapter;
243 struct ehea_mc_list *mc_entry;
244 int num_registrations = 0;
248 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
250 /* Determine number of registrations */
251 list_for_each_entry(adapter, &adapter_list, list)
252 for (k = 0; k < EHEA_MAX_PORTS; k++) {
253 struct ehea_port *port = adapter->port[k];
255 if (!port || (port->state != EHEA_PORT_UP))
258 num_registrations += 2; /* Broadcast registrations */
260 list_for_each_entry(mc_entry, &port->mc_list->list,list)
261 num_registrations += 2;
264 if (num_registrations) {
265 arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
267 goto out; /* Keep the existing array */
271 list_for_each_entry(adapter, &adapter_list, list) {
272 for (k = 0; k < EHEA_MAX_PORTS; k++) {
273 struct ehea_port *port = adapter->port[k];
275 if (!port || (port->state != EHEA_PORT_UP))
278 if (num_registrations == 0)
281 arr[i].adh = adapter->handle;
282 arr[i].port_id = port->logical_port_id;
283 arr[i].reg_type = EHEA_BCMC_BROADCAST |
285 arr[i++].macaddr = port->mac_addr;
287 arr[i].adh = adapter->handle;
288 arr[i].port_id = port->logical_port_id;
289 arr[i].reg_type = EHEA_BCMC_BROADCAST |
290 EHEA_BCMC_VLANID_ALL;
291 arr[i++].macaddr = port->mac_addr;
292 num_registrations -= 2;
294 list_for_each_entry(mc_entry,
295 &port->mc_list->list, list) {
296 if (num_registrations == 0)
299 arr[i].adh = adapter->handle;
300 arr[i].port_id = port->logical_port_id;
301 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
302 EHEA_BCMC_MULTICAST |
304 arr[i++].macaddr = mc_entry->macaddr;
306 arr[i].adh = adapter->handle;
307 arr[i].port_id = port->logical_port_id;
308 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
309 EHEA_BCMC_MULTICAST |
310 EHEA_BCMC_VLANID_ALL;
311 arr[i++].macaddr = mc_entry->macaddr;
312 num_registrations -= 2;
318 kfree(ehea_bcmc_regs.arr);
319 ehea_bcmc_regs.arr = arr;
320 ehea_bcmc_regs.num_entries = i;
322 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
325 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
327 struct ehea_port *port = netdev_priv(dev);
328 struct net_device_stats *stats = &port->stats;
329 struct hcp_ehea_port_cb2 *cb2;
330 u64 hret, rx_packets, tx_packets;
333 memset(stats, 0, sizeof(*stats));
335 cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
337 ehea_error("no mem for cb2");
341 hret = ehea_h_query_ehea_port(port->adapter->handle,
342 port->logical_port_id,
343 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
344 if (hret != H_SUCCESS) {
345 ehea_error("query_ehea_port failed");
349 if (netif_msg_hw(port))
350 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
353 for (i = 0; i < port->num_def_qps; i++)
354 rx_packets += port->port_res[i].rx_packets;
357 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
358 tx_packets += port->port_res[i].tx_packets;
360 stats->tx_packets = tx_packets;
361 stats->multicast = cb2->rxmcp;
362 stats->rx_errors = cb2->rxuerr;
363 stats->rx_bytes = cb2->rxo;
364 stats->tx_bytes = cb2->txo;
365 stats->rx_packets = rx_packets;
368 free_page((unsigned long)cb2);
373 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
375 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
376 struct net_device *dev = pr->port->netdev;
377 int max_index_mask = pr->rq1_skba.len - 1;
378 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
382 pr->rq1_skba.os_skbs = 0;
384 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
386 pr->rq1_skba.index = index;
387 pr->rq1_skba.os_skbs = fill_wqes;
391 for (i = 0; i < fill_wqes; i++) {
392 if (!skb_arr_rq1[index]) {
393 skb_arr_rq1[index] = netdev_alloc_skb(dev,
395 if (!skb_arr_rq1[index]) {
396 pr->rq1_skba.os_skbs = fill_wqes - i;
401 index &= max_index_mask;
409 ehea_update_rq1a(pr->qp, adder);
412 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
414 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
415 struct net_device *dev = pr->port->netdev;
418 for (i = 0; i < pr->rq1_skba.len; i++) {
419 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
424 ehea_update_rq1a(pr->qp, nr_rq1a);
427 static int ehea_refill_rq_def(struct ehea_port_res *pr,
428 struct ehea_q_skb_arr *q_skba, int rq_nr,
429 int num_wqes, int wqe_type, int packet_size)
431 struct net_device *dev = pr->port->netdev;
432 struct ehea_qp *qp = pr->qp;
433 struct sk_buff **skb_arr = q_skba->arr;
434 struct ehea_rwqe *rwqe;
435 int i, index, max_index_mask, fill_wqes;
439 fill_wqes = q_skba->os_skbs + num_wqes;
442 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
443 q_skba->os_skbs = fill_wqes;
447 index = q_skba->index;
448 max_index_mask = q_skba->len - 1;
449 for (i = 0; i < fill_wqes; i++) {
453 skb = netdev_alloc_skb_ip_align(dev, packet_size);
455 q_skba->os_skbs = fill_wqes - i;
456 if (q_skba->os_skbs == q_skba->len - 2) {
457 ehea_info("%s: rq%i ran dry - no mem for skb",
458 pr->port->netdev->name, rq_nr);
464 skb_arr[index] = skb;
465 tmp_addr = ehea_map_vaddr(skb->data);
466 if (tmp_addr == -1) {
468 q_skba->os_skbs = fill_wqes - i;
473 rwqe = ehea_get_next_rwqe(qp, rq_nr);
474 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
475 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
476 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
477 rwqe->sg_list[0].vaddr = tmp_addr;
478 rwqe->sg_list[0].len = packet_size;
479 rwqe->data_segments = 1;
482 index &= max_index_mask;
486 q_skba->index = index;
493 ehea_update_rq2a(pr->qp, adder);
495 ehea_update_rq3a(pr->qp, adder);
501 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
503 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
504 nr_of_wqes, EHEA_RWQE2_TYPE,
509 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
511 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
512 nr_of_wqes, EHEA_RWQE3_TYPE,
513 EHEA_MAX_PACKET_SIZE);
516 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
518 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
519 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
521 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
522 (cqe->header_length == 0))
527 static inline void ehea_fill_skb(struct net_device *dev,
528 struct sk_buff *skb, struct ehea_cqe *cqe)
530 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
532 skb_put(skb, length);
533 skb->ip_summed = CHECKSUM_UNNECESSARY;
534 skb->protocol = eth_type_trans(skb, dev);
537 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
539 struct ehea_cqe *cqe)
541 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
552 prefetchw(pref + EHEA_CACHE_LINE);
554 pref = (skb_array[x]->data);
556 prefetch(pref + EHEA_CACHE_LINE);
557 prefetch(pref + EHEA_CACHE_LINE * 2);
558 prefetch(pref + EHEA_CACHE_LINE * 3);
561 skb = skb_array[skb_index];
562 skb_array[skb_index] = NULL;
566 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
567 int arr_len, int wqe_index)
579 prefetchw(pref + EHEA_CACHE_LINE);
581 pref = (skb_array[x]->data);
583 prefetchw(pref + EHEA_CACHE_LINE);
586 skb = skb_array[wqe_index];
587 skb_array[wqe_index] = NULL;
591 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
592 struct ehea_cqe *cqe, int *processed_rq2,
597 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
598 pr->p_stats.err_tcp_cksum++;
599 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
600 pr->p_stats.err_ip_cksum++;
601 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
602 pr->p_stats.err_frame_crc++;
606 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
608 } else if (rq == 3) {
610 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
614 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
615 if (netif_msg_rx_err(pr->port)) {
616 ehea_error("Critical receive error for QP %d. "
617 "Resetting port.", pr->qp->init_attr.qp_nr);
618 ehea_dump(cqe, sizeof(*cqe), "CQE");
620 ehea_schedule_port_reset(pr->port);
627 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
628 void **tcph, u64 *hdr_flags, void *priv)
630 struct ehea_cqe *cqe = priv;
634 /* non tcp/udp packets */
635 if (!cqe->header_length)
639 skb_reset_network_header(skb);
641 if (iph->protocol != IPPROTO_TCP)
644 ip_len = ip_hdrlen(skb);
645 skb_set_transport_header(skb, ip_len);
646 *tcph = tcp_hdr(skb);
648 /* check if ip header and tcp header are complete */
649 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
652 *hdr_flags = LRO_IPV4 | LRO_TCP;
658 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
661 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
666 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
671 lro_receive_skb(&pr->lro_mgr, skb, cqe);
674 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
677 netif_receive_skb(skb);
681 static int ehea_proc_rwqes(struct net_device *dev,
682 struct ehea_port_res *pr,
685 struct ehea_port *port = pr->port;
686 struct ehea_qp *qp = pr->qp;
687 struct ehea_cqe *cqe;
689 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
690 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
691 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
692 int skb_arr_rq1_len = pr->rq1_skba.len;
693 int skb_arr_rq2_len = pr->rq2_skba.len;
694 int skb_arr_rq3_len = pr->rq3_skba.len;
695 int processed, processed_rq1, processed_rq2, processed_rq3;
696 int wqe_index, last_wqe_index, rq, port_reset;
698 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
701 cqe = ehea_poll_rq1(qp, &wqe_index);
702 while ((processed < budget) && cqe) {
706 if (netif_msg_rx_status(port))
707 ehea_dump(cqe, sizeof(*cqe), "CQE");
709 last_wqe_index = wqe_index;
711 if (!ehea_check_cqe(cqe, &rq)) {
714 skb = get_skb_by_index_ll(skb_arr_rq1,
717 if (unlikely(!skb)) {
718 if (netif_msg_rx_err(port))
719 ehea_error("LL rq1: skb=NULL");
721 skb = netdev_alloc_skb(dev,
726 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
727 cqe->num_bytes_transfered - 4);
728 ehea_fill_skb(dev, skb, cqe);
729 } else if (rq == 2) {
731 skb = get_skb_by_index(skb_arr_rq2,
732 skb_arr_rq2_len, cqe);
733 if (unlikely(!skb)) {
734 if (netif_msg_rx_err(port))
735 ehea_error("rq2: skb=NULL");
738 ehea_fill_skb(dev, skb, cqe);
742 skb = get_skb_by_index(skb_arr_rq3,
743 skb_arr_rq3_len, cqe);
744 if (unlikely(!skb)) {
745 if (netif_msg_rx_err(port))
746 ehea_error("rq3: skb=NULL");
749 ehea_fill_skb(dev, skb, cqe);
753 ehea_proc_skb(pr, cqe, skb);
755 pr->p_stats.poll_receive_errors++;
756 port_reset = ehea_treat_poll_error(pr, rq, cqe,
762 cqe = ehea_poll_rq1(qp, &wqe_index);
765 lro_flush_all(&pr->lro_mgr);
767 pr->rx_packets += processed;
769 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
770 ehea_refill_rq2(pr, processed_rq2);
771 ehea_refill_rq3(pr, processed_rq3);
776 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
779 struct ehea_cq *send_cq = pr->send_cq;
780 struct ehea_cqe *cqe;
781 int quota = my_quota;
787 cqe = ehea_poll_cq(send_cq);
788 while (cqe && (quota > 0)) {
789 ehea_inc_cq(send_cq);
793 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
794 ehea_error("Bad send completion status=0x%04X",
797 if (netif_msg_tx_err(pr->port))
798 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
800 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
801 ehea_error("Resetting port");
802 ehea_schedule_port_reset(pr->port);
807 if (netif_msg_tx_done(pr->port))
808 ehea_dump(cqe, sizeof(*cqe), "CQE");
810 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
811 == EHEA_SWQE2_TYPE)) {
813 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
814 skb = pr->sq_skba.arr[index];
816 pr->sq_skba.arr[index] = NULL;
819 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
822 cqe = ehea_poll_cq(send_cq);
825 ehea_update_feca(send_cq, cqe_counter);
826 atomic_add(swqe_av, &pr->swqe_avail);
828 spin_lock_irqsave(&pr->netif_queue, flags);
830 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
831 >= pr->swqe_refill_th)) {
832 netif_wake_queue(pr->port->netdev);
833 pr->queue_stopped = 0;
835 spin_unlock_irqrestore(&pr->netif_queue, flags);
840 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
841 #define EHEA_POLL_MAX_CQES 65535
843 static int ehea_poll(struct napi_struct *napi, int budget)
845 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
847 struct net_device *dev = pr->port->netdev;
848 struct ehea_cqe *cqe;
849 struct ehea_cqe *cqe_skb = NULL;
850 int force_irq, wqe_index;
853 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
854 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
857 rx += ehea_proc_rwqes(dev, pr, budget - rx);
859 while ((rx != budget) || force_irq) {
860 pr->poll_counter = 0;
863 ehea_reset_cq_ep(pr->recv_cq);
864 ehea_reset_cq_ep(pr->send_cq);
865 ehea_reset_cq_n1(pr->recv_cq);
866 ehea_reset_cq_n1(pr->send_cq);
867 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
868 cqe_skb = ehea_poll_cq(pr->send_cq);
870 if (!cqe && !cqe_skb)
873 if (!napi_reschedule(napi))
876 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
877 rx += ehea_proc_rwqes(dev, pr, budget - rx);
884 #ifdef CONFIG_NET_POLL_CONTROLLER
885 static void ehea_netpoll(struct net_device *dev)
887 struct ehea_port *port = netdev_priv(dev);
890 for (i = 0; i < port->num_def_qps; i++)
891 napi_schedule(&port->port_res[i].napi);
895 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
897 struct ehea_port_res *pr = param;
899 napi_schedule(&pr->napi);
904 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
906 struct ehea_port *port = param;
907 struct ehea_eqe *eqe;
910 u64 resource_type, aer, aerr;
913 eqe = ehea_poll_eq(port->qp_eq);
916 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
917 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
918 eqe->entry, qp_token);
920 qp = port->port_res[qp_token].qp;
922 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
925 if (resource_type == EHEA_AER_RESTYPE_QP) {
926 if ((aer & EHEA_AER_RESET_MASK) ||
927 (aerr & EHEA_AERR_RESET_MASK))
930 reset_port = 1; /* Reset in case of CQ or EQ error */
932 eqe = ehea_poll_eq(port->qp_eq);
936 ehea_error("Resetting port");
937 ehea_schedule_port_reset(port);
943 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
948 for (i = 0; i < EHEA_MAX_PORTS; i++)
949 if (adapter->port[i])
950 if (adapter->port[i]->logical_port_id == logical_port)
951 return adapter->port[i];
955 int ehea_sense_port_attr(struct ehea_port *port)
959 struct hcp_ehea_port_cb0 *cb0;
961 /* may be called via ehea_neq_tasklet() */
962 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
964 ehea_error("no mem for cb0");
969 hret = ehea_h_query_ehea_port(port->adapter->handle,
970 port->logical_port_id, H_PORT_CB0,
971 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
973 if (hret != H_SUCCESS) {
979 port->mac_addr = cb0->port_mac_addr << 16;
981 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
982 ret = -EADDRNOTAVAIL;
987 switch (cb0->port_speed) {
989 port->port_speed = EHEA_SPEED_10M;
990 port->full_duplex = 0;
993 port->port_speed = EHEA_SPEED_10M;
994 port->full_duplex = 1;
997 port->port_speed = EHEA_SPEED_100M;
998 port->full_duplex = 0;
1000 case H_SPEED_100M_F:
1001 port->port_speed = EHEA_SPEED_100M;
1002 port->full_duplex = 1;
1005 port->port_speed = EHEA_SPEED_1G;
1006 port->full_duplex = 1;
1009 port->port_speed = EHEA_SPEED_10G;
1010 port->full_duplex = 1;
1013 port->port_speed = 0;
1014 port->full_duplex = 0;
1019 port->num_mcs = cb0->num_default_qps;
1021 /* Number of default QPs */
1023 port->num_def_qps = cb0->num_default_qps;
1025 port->num_def_qps = 1;
1027 if (!port->num_def_qps) {
1032 port->num_tx_qps = num_tx_qps;
1034 if (port->num_def_qps >= port->num_tx_qps)
1035 port->num_add_tx_qps = 0;
1037 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
1041 if (ret || netif_msg_probe(port))
1042 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1043 free_page((unsigned long)cb0);
1048 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1050 struct hcp_ehea_port_cb4 *cb4;
1054 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1056 ehea_error("no mem for cb4");
1061 cb4->port_speed = port_speed;
1063 netif_carrier_off(port->netdev);
1065 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1066 port->logical_port_id,
1067 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1068 if (hret == H_SUCCESS) {
1069 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1071 hret = ehea_h_query_ehea_port(port->adapter->handle,
1072 port->logical_port_id,
1073 H_PORT_CB4, H_PORT_CB4_SPEED,
1075 if (hret == H_SUCCESS) {
1076 switch (cb4->port_speed) {
1078 port->port_speed = EHEA_SPEED_10M;
1079 port->full_duplex = 0;
1082 port->port_speed = EHEA_SPEED_10M;
1083 port->full_duplex = 1;
1085 case H_SPEED_100M_H:
1086 port->port_speed = EHEA_SPEED_100M;
1087 port->full_duplex = 0;
1089 case H_SPEED_100M_F:
1090 port->port_speed = EHEA_SPEED_100M;
1091 port->full_duplex = 1;
1094 port->port_speed = EHEA_SPEED_1G;
1095 port->full_duplex = 1;
1098 port->port_speed = EHEA_SPEED_10G;
1099 port->full_duplex = 1;
1102 port->port_speed = 0;
1103 port->full_duplex = 0;
1107 ehea_error("Failed sensing port speed");
1111 if (hret == H_AUTHORITY) {
1112 ehea_info("Hypervisor denied setting port speed");
1116 ehea_error("Failed setting port speed");
1119 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1120 netif_carrier_on(port->netdev);
1122 free_page((unsigned long)cb4);
1127 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1132 struct ehea_port *port;
1134 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1135 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1136 port = ehea_get_port(adapter, portnum);
1139 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1142 ehea_error("unknown portnum %x", portnum);
1146 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1147 if (!netif_carrier_ok(port->netdev)) {
1148 ret = ehea_sense_port_attr(port);
1150 ehea_error("failed resensing port "
1155 if (netif_msg_link(port))
1156 ehea_info("%s: Logical port up: %dMbps "
1160 port->full_duplex ==
1161 1 ? "Full" : "Half");
1163 netif_carrier_on(port->netdev);
1164 netif_wake_queue(port->netdev);
1167 if (netif_carrier_ok(port->netdev)) {
1168 if (netif_msg_link(port))
1169 ehea_info("%s: Logical port down",
1170 port->netdev->name);
1171 netif_carrier_off(port->netdev);
1172 netif_stop_queue(port->netdev);
1175 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1176 port->phy_link = EHEA_PHY_LINK_UP;
1177 if (netif_msg_link(port))
1178 ehea_info("%s: Physical port up",
1179 port->netdev->name);
1180 if (prop_carrier_state)
1181 netif_carrier_on(port->netdev);
1183 port->phy_link = EHEA_PHY_LINK_DOWN;
1184 if (netif_msg_link(port))
1185 ehea_info("%s: Physical port down",
1186 port->netdev->name);
1187 if (prop_carrier_state)
1188 netif_carrier_off(port->netdev);
1191 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1192 ehea_info("External switch port is primary port");
1194 ehea_info("External switch port is backup port");
1197 case EHEA_EC_ADAPTER_MALFUNC:
1198 ehea_error("Adapter malfunction");
1200 case EHEA_EC_PORT_MALFUNC:
1201 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1202 netif_carrier_off(port->netdev);
1203 netif_stop_queue(port->netdev);
1206 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
1211 static void ehea_neq_tasklet(unsigned long data)
1213 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1214 struct ehea_eqe *eqe;
1217 eqe = ehea_poll_eq(adapter->neq);
1218 ehea_debug("eqe=%p", eqe);
1221 ehea_debug("*eqe=%lx", eqe->entry);
1222 ehea_parse_eqe(adapter, eqe->entry);
1223 eqe = ehea_poll_eq(adapter->neq);
1224 ehea_debug("next eqe=%p", eqe);
1227 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1228 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1229 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1231 ehea_h_reset_events(adapter->handle,
1232 adapter->neq->fw_handle, event_mask);
1235 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1237 struct ehea_adapter *adapter = param;
1238 tasklet_hi_schedule(&adapter->neq_tasklet);
1243 static int ehea_fill_port_res(struct ehea_port_res *pr)
1246 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1248 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1249 - init_attr->act_nr_rwqes_rq2
1250 - init_attr->act_nr_rwqes_rq3 - 1);
1252 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1254 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1259 static int ehea_reg_interrupts(struct net_device *dev)
1261 struct ehea_port *port = netdev_priv(dev);
1262 struct ehea_port_res *pr;
1266 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1269 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1270 ehea_qp_aff_irq_handler,
1271 IRQF_DISABLED, port->int_aff_name, port);
1273 ehea_error("failed registering irq for qp_aff_irq_handler:"
1274 "ist=%X", port->qp_eq->attr.ist1);
1278 if (netif_msg_ifup(port))
1279 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1280 "registered", port->qp_eq->attr.ist1);
1283 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1284 pr = &port->port_res[i];
1285 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1286 "%s-queue%d", dev->name, i);
1287 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1288 ehea_recv_irq_handler,
1289 IRQF_DISABLED, pr->int_send_name,
1292 ehea_error("failed registering irq for ehea_queue "
1293 "port_res_nr:%d, ist=%X", i,
1297 if (netif_msg_ifup(port))
1298 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1299 "%d registered", pr->eq->attr.ist1, i);
1307 u32 ist = port->port_res[i].eq->attr.ist1;
1308 ibmebus_free_irq(ist, &port->port_res[i]);
1312 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1313 i = port->num_def_qps;
1319 static void ehea_free_interrupts(struct net_device *dev)
1321 struct ehea_port *port = netdev_priv(dev);
1322 struct ehea_port_res *pr;
1327 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1328 pr = &port->port_res[i];
1329 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1330 if (netif_msg_intr(port))
1331 ehea_info("free send irq for res %d with handle 0x%X",
1332 i, pr->eq->attr.ist1);
1335 /* associated events */
1336 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1337 if (netif_msg_intr(port))
1338 ehea_info("associated event interrupt for handle 0x%X freed",
1339 port->qp_eq->attr.ist1);
1342 static int ehea_configure_port(struct ehea_port *port)
1346 struct hcp_ehea_port_cb0 *cb0;
1349 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1353 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1354 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1355 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1356 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1357 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1358 PXLY_RC_VLAN_FILTER)
1359 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1361 for (i = 0; i < port->num_mcs; i++)
1363 cb0->default_qpn_arr[i] =
1364 port->port_res[i].qp->init_attr.qp_nr;
1366 cb0->default_qpn_arr[i] =
1367 port->port_res[0].qp->init_attr.qp_nr;
1369 if (netif_msg_ifup(port))
1370 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1372 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1373 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1375 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1376 port->logical_port_id,
1377 H_PORT_CB0, mask, cb0);
1379 if (hret != H_SUCCESS)
1385 free_page((unsigned long)cb0);
1390 int ehea_gen_smrs(struct ehea_port_res *pr)
1393 struct ehea_adapter *adapter = pr->port->adapter;
1395 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1406 ehea_rem_mr(&pr->send_mr);
1408 ehea_error("Generating SMRS failed\n");
1412 int ehea_rem_smrs(struct ehea_port_res *pr)
1414 if ((ehea_rem_mr(&pr->send_mr)) ||
1415 (ehea_rem_mr(&pr->recv_mr)))
1421 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1423 int arr_size = sizeof(void *) * max_q_entries;
1425 q_skba->arr = vmalloc(arr_size);
1429 memset(q_skba->arr, 0, arr_size);
1431 q_skba->len = max_q_entries;
1433 q_skba->os_skbs = 0;
1438 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1439 struct port_res_cfg *pr_cfg, int queue_token)
1441 struct ehea_adapter *adapter = port->adapter;
1442 enum ehea_eq_type eq_type = EHEA_EQ;
1443 struct ehea_qp_init_attr *init_attr = NULL;
1446 memset(pr, 0, sizeof(struct ehea_port_res));
1449 spin_lock_init(&pr->xmit_lock);
1450 spin_lock_init(&pr->netif_queue);
1452 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1454 ehea_error("create_eq failed (eq)");
1458 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1460 port->logical_port_id);
1462 ehea_error("create_cq failed (cq_recv)");
1466 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1468 port->logical_port_id);
1470 ehea_error("create_cq failed (cq_send)");
1474 if (netif_msg_ifup(port))
1475 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1476 pr->send_cq->attr.act_nr_of_cqes,
1477 pr->recv_cq->attr.act_nr_of_cqes);
1479 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1482 ehea_error("no mem for ehea_qp_init_attr");
1486 init_attr->low_lat_rq1 = 1;
1487 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1488 init_attr->rq_count = 3;
1489 init_attr->qp_token = queue_token;
1490 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1491 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1492 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1493 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1494 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1495 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1496 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1497 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1498 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1499 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1500 init_attr->port_nr = port->logical_port_id;
1501 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1502 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1503 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1505 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1507 ehea_error("create_qp failed");
1512 if (netif_msg_ifup(port))
1513 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1514 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1515 init_attr->act_nr_send_wqes,
1516 init_attr->act_nr_rwqes_rq1,
1517 init_attr->act_nr_rwqes_rq2,
1518 init_attr->act_nr_rwqes_rq3);
1520 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1522 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1523 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1524 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1525 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1529 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1530 if (ehea_gen_smrs(pr) != 0) {
1535 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1539 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1541 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1542 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1543 pr->lro_mgr.lro_arr = pr->lro_desc;
1544 pr->lro_mgr.get_skb_header = get_skb_hdr;
1545 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1546 pr->lro_mgr.dev = port->netdev;
1547 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1548 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1555 vfree(pr->sq_skba.arr);
1556 vfree(pr->rq1_skba.arr);
1557 vfree(pr->rq2_skba.arr);
1558 vfree(pr->rq3_skba.arr);
1559 ehea_destroy_qp(pr->qp);
1560 ehea_destroy_cq(pr->send_cq);
1561 ehea_destroy_cq(pr->recv_cq);
1562 ehea_destroy_eq(pr->eq);
1567 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1572 netif_napi_del(&pr->napi);
1574 ret = ehea_destroy_qp(pr->qp);
1577 ehea_destroy_cq(pr->send_cq);
1578 ehea_destroy_cq(pr->recv_cq);
1579 ehea_destroy_eq(pr->eq);
1581 for (i = 0; i < pr->rq1_skba.len; i++)
1582 if (pr->rq1_skba.arr[i])
1583 dev_kfree_skb(pr->rq1_skba.arr[i]);
1585 for (i = 0; i < pr->rq2_skba.len; i++)
1586 if (pr->rq2_skba.arr[i])
1587 dev_kfree_skb(pr->rq2_skba.arr[i]);
1589 for (i = 0; i < pr->rq3_skba.len; i++)
1590 if (pr->rq3_skba.arr[i])
1591 dev_kfree_skb(pr->rq3_skba.arr[i]);
1593 for (i = 0; i < pr->sq_skba.len; i++)
1594 if (pr->sq_skba.arr[i])
1595 dev_kfree_skb(pr->sq_skba.arr[i]);
1597 vfree(pr->rq1_skba.arr);
1598 vfree(pr->rq2_skba.arr);
1599 vfree(pr->rq3_skba.arr);
1600 vfree(pr->sq_skba.arr);
1601 ret = ehea_rem_smrs(pr);
1607 * The write_* functions store information in swqe which is used by
1608 * the hardware to calculate the ip/tcp/udp checksum
1611 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1612 const struct sk_buff *skb)
1614 swqe->ip_start = skb_network_offset(skb);
1615 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1618 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1619 const struct sk_buff *skb)
1622 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1624 swqe->tcp_end = (u16)skb->len - 1;
1627 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1628 const struct sk_buff *skb)
1631 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1633 swqe->tcp_end = (u16)skb->len - 1;
1637 static void write_swqe2_TSO(struct sk_buff *skb,
1638 struct ehea_swqe *swqe, u32 lkey)
1640 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1641 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1642 int skb_data_size = skb_headlen(skb);
1645 /* Packet is TCP with TSO enabled */
1646 swqe->tx_control |= EHEA_SWQE_TSO;
1647 swqe->mss = skb_shinfo(skb)->gso_size;
1648 /* copy only eth/ip/tcp headers to immediate data and
1649 * the rest of skb->data to sg1entry
1651 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1653 skb_data_size = skb_headlen(skb);
1655 if (skb_data_size >= headersize) {
1656 /* copy immediate data */
1657 skb_copy_from_linear_data(skb, imm_data, headersize);
1658 swqe->immediate_data_length = headersize;
1660 if (skb_data_size > headersize) {
1661 /* set sg1entry data */
1662 sg1entry->l_key = lkey;
1663 sg1entry->len = skb_data_size - headersize;
1665 ehea_map_vaddr(skb->data + headersize);
1666 swqe->descriptors++;
1669 ehea_error("cannot handle fragmented headers");
1672 static void write_swqe2_nonTSO(struct sk_buff *skb,
1673 struct ehea_swqe *swqe, u32 lkey)
1675 int skb_data_size = skb_headlen(skb);
1676 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1677 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1679 /* Packet is any nonTSO type
1681 * Copy as much as possible skb->data to immediate data and
1682 * the rest to sg1entry
1684 if (skb_data_size >= SWQE2_MAX_IMM) {
1685 /* copy immediate data */
1686 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1688 swqe->immediate_data_length = SWQE2_MAX_IMM;
1690 if (skb_data_size > SWQE2_MAX_IMM) {
1691 /* copy sg1entry data */
1692 sg1entry->l_key = lkey;
1693 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1695 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1696 swqe->descriptors++;
1699 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1700 swqe->immediate_data_length = skb_data_size;
1704 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1705 struct ehea_swqe *swqe, u32 lkey)
1707 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1709 int nfrags, sg1entry_contains_frag_data, i;
1711 nfrags = skb_shinfo(skb)->nr_frags;
1712 sg1entry = &swqe->u.immdata_desc.sg_entry;
1713 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1714 swqe->descriptors = 0;
1715 sg1entry_contains_frag_data = 0;
1717 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1718 write_swqe2_TSO(skb, swqe, lkey);
1720 write_swqe2_nonTSO(skb, swqe, lkey);
1722 /* write descriptors */
1724 if (swqe->descriptors == 0) {
1725 /* sg1entry not yet used */
1726 frag = &skb_shinfo(skb)->frags[0];
1728 /* copy sg1entry data */
1729 sg1entry->l_key = lkey;
1730 sg1entry->len = frag->size;
1732 ehea_map_vaddr(page_address(frag->page)
1733 + frag->page_offset);
1734 swqe->descriptors++;
1735 sg1entry_contains_frag_data = 1;
1738 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1740 frag = &skb_shinfo(skb)->frags[i];
1741 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1743 sgentry->l_key = lkey;
1744 sgentry->len = frag->size;
1746 ehea_map_vaddr(page_address(frag->page)
1747 + frag->page_offset);
1748 swqe->descriptors++;
1753 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1759 /* De/Register untagged packets */
1760 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1761 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1762 port->logical_port_id,
1763 reg_type, port->mac_addr, 0, hcallid);
1764 if (hret != H_SUCCESS) {
1765 ehea_error("%sregistering bc address failed (tagged)",
1766 hcallid == H_REG_BCMC ? "" : "de");
1771 /* De/Register VLAN packets */
1772 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1773 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1774 port->logical_port_id,
1775 reg_type, port->mac_addr, 0, hcallid);
1776 if (hret != H_SUCCESS) {
1777 ehea_error("%sregistering bc address failed (vlan)",
1778 hcallid == H_REG_BCMC ? "" : "de");
1785 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1787 struct ehea_port *port = netdev_priv(dev);
1788 struct sockaddr *mac_addr = sa;
1789 struct hcp_ehea_port_cb0 *cb0;
1793 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1794 ret = -EADDRNOTAVAIL;
1798 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1800 ehea_error("no mem for cb0");
1805 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1807 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1809 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1810 port->logical_port_id, H_PORT_CB0,
1811 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1812 if (hret != H_SUCCESS) {
1817 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1819 /* Deregister old MAC in pHYP */
1820 if (port->state == EHEA_PORT_UP) {
1821 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1826 port->mac_addr = cb0->port_mac_addr << 16;
1828 /* Register new MAC in pHYP */
1829 if (port->state == EHEA_PORT_UP) {
1830 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1838 ehea_update_bcmc_registrations();
1840 free_page((unsigned long)cb0);
1845 static void ehea_promiscuous_error(u64 hret, int enable)
1847 if (hret == H_AUTHORITY)
1848 ehea_info("Hypervisor denied %sabling promiscuous mode",
1849 enable == 1 ? "en" : "dis");
1851 ehea_error("failed %sabling promiscuous mode",
1852 enable == 1 ? "en" : "dis");
1855 static void ehea_promiscuous(struct net_device *dev, int enable)
1857 struct ehea_port *port = netdev_priv(dev);
1858 struct hcp_ehea_port_cb7 *cb7;
1861 if ((enable && port->promisc) || (!enable && !port->promisc))
1864 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1866 ehea_error("no mem for cb7");
1870 /* Modify Pxs_DUCQPN in CB7 */
1871 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1873 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1874 port->logical_port_id,
1875 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1877 ehea_promiscuous_error(hret, enable);
1881 port->promisc = enable;
1883 free_page((unsigned long)cb7);
1886 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1892 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1893 | EHEA_BCMC_UNTAGGED;
1895 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1896 port->logical_port_id,
1897 reg_type, mc_mac_addr, 0, hcallid);
1901 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1902 | EHEA_BCMC_VLANID_ALL;
1904 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1905 port->logical_port_id,
1906 reg_type, mc_mac_addr, 0, hcallid);
1911 static int ehea_drop_multicast_list(struct net_device *dev)
1913 struct ehea_port *port = netdev_priv(dev);
1914 struct ehea_mc_list *mc_entry = port->mc_list;
1915 struct list_head *pos;
1916 struct list_head *temp;
1920 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1921 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1923 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1926 ehea_error("failed deregistering mcast MAC");
1936 static void ehea_allmulti(struct net_device *dev, int enable)
1938 struct ehea_port *port = netdev_priv(dev);
1941 if (!port->allmulti) {
1943 /* Enable ALLMULTI */
1944 ehea_drop_multicast_list(dev);
1945 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1949 ehea_error("failed enabling IFF_ALLMULTI");
1953 /* Disable ALLMULTI */
1954 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1958 ehea_error("failed disabling IFF_ALLMULTI");
1962 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1964 struct ehea_mc_list *ehea_mcl_entry;
1967 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1968 if (!ehea_mcl_entry) {
1969 ehea_error("no mem for mcl_entry");
1973 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1975 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1977 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1980 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1982 ehea_error("failed registering mcast MAC");
1983 kfree(ehea_mcl_entry);
1987 static void ehea_set_multicast_list(struct net_device *dev)
1989 struct ehea_port *port = netdev_priv(dev);
1990 struct netdev_hw_addr *ha;
1993 if (dev->flags & IFF_PROMISC) {
1994 ehea_promiscuous(dev, 1);
1997 ehea_promiscuous(dev, 0);
1999 if (dev->flags & IFF_ALLMULTI) {
2000 ehea_allmulti(dev, 1);
2003 ehea_allmulti(dev, 0);
2005 if (!netdev_mc_empty(dev)) {
2006 ret = ehea_drop_multicast_list(dev);
2008 /* Dropping the current multicast list failed.
2009 * Enabling ALL_MULTI is the best we can do.
2011 ehea_allmulti(dev, 1);
2014 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2015 ehea_info("Mcast registration limit reached (0x%llx). "
2017 port->adapter->max_mc_mac);
2021 netdev_for_each_mc_addr(ha, dev)
2022 ehea_add_multicast_entry(port, ha->addr);
2026 ehea_update_bcmc_registrations();
2029 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2031 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2037 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2038 struct ehea_swqe *swqe, u32 lkey)
2040 if (skb->protocol == htons(ETH_P_IP)) {
2041 const struct iphdr *iph = ip_hdr(skb);
2044 swqe->tx_control |= EHEA_SWQE_CRC
2045 | EHEA_SWQE_IP_CHECKSUM
2046 | EHEA_SWQE_TCP_CHECKSUM
2047 | EHEA_SWQE_IMM_DATA_PRESENT
2048 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2050 write_ip_start_end(swqe, skb);
2052 if (iph->protocol == IPPROTO_UDP) {
2053 if ((iph->frag_off & IP_MF) ||
2054 (iph->frag_off & IP_OFFSET))
2055 /* IP fragment, so don't change cs */
2056 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2058 write_udp_offset_end(swqe, skb);
2059 } else if (iph->protocol == IPPROTO_TCP) {
2060 write_tcp_offset_end(swqe, skb);
2063 /* icmp (big data) and ip segmentation packets (all other ip
2064 packets) do not require any special handling */
2067 /* Other Ethernet Protocol */
2068 swqe->tx_control |= EHEA_SWQE_CRC
2069 | EHEA_SWQE_IMM_DATA_PRESENT
2070 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2073 write_swqe2_data(skb, dev, swqe, lkey);
2076 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2077 struct ehea_swqe *swqe)
2079 int nfrags = skb_shinfo(skb)->nr_frags;
2080 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2084 if (skb->protocol == htons(ETH_P_IP)) {
2085 const struct iphdr *iph = ip_hdr(skb);
2088 write_ip_start_end(swqe, skb);
2090 if (iph->protocol == IPPROTO_TCP) {
2091 swqe->tx_control |= EHEA_SWQE_CRC
2092 | EHEA_SWQE_IP_CHECKSUM
2093 | EHEA_SWQE_TCP_CHECKSUM
2094 | EHEA_SWQE_IMM_DATA_PRESENT;
2096 write_tcp_offset_end(swqe, skb);
2098 } else if (iph->protocol == IPPROTO_UDP) {
2099 if ((iph->frag_off & IP_MF) ||
2100 (iph->frag_off & IP_OFFSET))
2101 /* IP fragment, so don't change cs */
2102 swqe->tx_control |= EHEA_SWQE_CRC
2103 | EHEA_SWQE_IMM_DATA_PRESENT;
2105 swqe->tx_control |= EHEA_SWQE_CRC
2106 | EHEA_SWQE_IP_CHECKSUM
2107 | EHEA_SWQE_TCP_CHECKSUM
2108 | EHEA_SWQE_IMM_DATA_PRESENT;
2110 write_udp_offset_end(swqe, skb);
2113 /* icmp (big data) and
2114 ip segmentation packets (all other ip packets) */
2115 swqe->tx_control |= EHEA_SWQE_CRC
2116 | EHEA_SWQE_IP_CHECKSUM
2117 | EHEA_SWQE_IMM_DATA_PRESENT;
2120 /* Other Ethernet Protocol */
2121 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2123 /* copy (immediate) data */
2125 /* data is in a single piece */
2126 skb_copy_from_linear_data(skb, imm_data, skb->len);
2128 /* first copy data from the skb->data buffer ... */
2129 skb_copy_from_linear_data(skb, imm_data,
2131 imm_data += skb_headlen(skb);
2133 /* ... then copy data from the fragments */
2134 for (i = 0; i < nfrags; i++) {
2135 frag = &skb_shinfo(skb)->frags[i];
2137 page_address(frag->page) + frag->page_offset,
2139 imm_data += frag->size;
2142 swqe->immediate_data_length = skb->len;
2146 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2151 if ((skb->protocol == htons(ETH_P_IP)) &&
2152 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2153 tcp = (struct tcphdr *)(skb_network_header(skb) +
2154 (ip_hdr(skb)->ihl * 4));
2155 tmp = (tcp->source + (tcp->dest << 16)) % 31;
2156 tmp += ip_hdr(skb)->daddr % 31;
2157 return tmp % num_qps;
2162 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2164 struct ehea_port *port = netdev_priv(dev);
2165 struct ehea_swqe *swqe;
2166 unsigned long flags;
2169 struct ehea_port_res *pr;
2171 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2173 if (!spin_trylock(&pr->xmit_lock))
2174 return NETDEV_TX_BUSY;
2176 if (pr->queue_stopped) {
2177 spin_unlock(&pr->xmit_lock);
2178 return NETDEV_TX_BUSY;
2181 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2182 memset(swqe, 0, SWQE_HEADER_SIZE);
2183 atomic_dec(&pr->swqe_avail);
2185 if (skb->len <= SWQE3_MAX_IMM) {
2186 u32 sig_iv = port->sig_comp_iv;
2187 u32 swqe_num = pr->swqe_id_counter;
2188 ehea_xmit3(skb, dev, swqe);
2189 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2190 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2191 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2192 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2194 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2195 pr->swqe_ll_count = 0;
2197 pr->swqe_ll_count += 1;
2200 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2201 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2202 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2203 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2204 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2206 pr->sq_skba.index++;
2207 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2209 lkey = pr->send_mr.lkey;
2210 ehea_xmit2(skb, dev, swqe, lkey);
2211 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2213 pr->swqe_id_counter += 1;
2215 if (port->vgrp && vlan_tx_tag_present(skb)) {
2216 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2217 swqe->vlan_tag = vlan_tx_tag_get(skb);
2220 if (netif_msg_tx_queued(port)) {
2221 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2222 ehea_dump(swqe, 512, "swqe");
2225 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2226 netif_stop_queue(dev);
2227 swqe->tx_control |= EHEA_SWQE_PURGE;
2230 ehea_post_swqe(pr->qp, swqe);
2233 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2234 spin_lock_irqsave(&pr->netif_queue, flags);
2235 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2236 pr->p_stats.queue_stopped++;
2237 netif_stop_queue(dev);
2238 pr->queue_stopped = 1;
2240 spin_unlock_irqrestore(&pr->netif_queue, flags);
2242 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2243 spin_unlock(&pr->xmit_lock);
2245 return NETDEV_TX_OK;
2248 static void ehea_vlan_rx_register(struct net_device *dev,
2249 struct vlan_group *grp)
2251 struct ehea_port *port = netdev_priv(dev);
2252 struct ehea_adapter *adapter = port->adapter;
2253 struct hcp_ehea_port_cb1 *cb1;
2258 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2260 ehea_error("no mem for cb1");
2264 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2265 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2266 if (hret != H_SUCCESS)
2267 ehea_error("modify_ehea_port failed");
2269 free_page((unsigned long)cb1);
2274 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2276 struct ehea_port *port = netdev_priv(dev);
2277 struct ehea_adapter *adapter = port->adapter;
2278 struct hcp_ehea_port_cb1 *cb1;
2282 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2284 ehea_error("no mem for cb1");
2288 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2289 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2290 if (hret != H_SUCCESS) {
2291 ehea_error("query_ehea_port failed");
2296 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2298 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2299 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2300 if (hret != H_SUCCESS)
2301 ehea_error("modify_ehea_port failed");
2303 free_page((unsigned long)cb1);
2307 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2309 struct ehea_port *port = netdev_priv(dev);
2310 struct ehea_adapter *adapter = port->adapter;
2311 struct hcp_ehea_port_cb1 *cb1;
2315 vlan_group_set_device(port->vgrp, vid, NULL);
2317 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2319 ehea_error("no mem for cb1");
2323 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2324 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2325 if (hret != H_SUCCESS) {
2326 ehea_error("query_ehea_port failed");
2331 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2333 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2334 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2335 if (hret != H_SUCCESS)
2336 ehea_error("modify_ehea_port failed");
2338 free_page((unsigned long)cb1);
2341 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2347 struct hcp_modify_qp_cb0 *cb0;
2349 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2355 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2356 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2357 if (hret != H_SUCCESS) {
2358 ehea_error("query_ehea_qp failed (1)");
2362 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2363 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2364 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2365 &dummy64, &dummy64, &dummy16, &dummy16);
2366 if (hret != H_SUCCESS) {
2367 ehea_error("modify_ehea_qp failed (1)");
2371 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2372 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2373 if (hret != H_SUCCESS) {
2374 ehea_error("query_ehea_qp failed (2)");
2378 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2379 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2380 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2381 &dummy64, &dummy64, &dummy16, &dummy16);
2382 if (hret != H_SUCCESS) {
2383 ehea_error("modify_ehea_qp failed (2)");
2387 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2388 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2389 if (hret != H_SUCCESS) {
2390 ehea_error("query_ehea_qp failed (3)");
2394 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2395 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2396 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2397 &dummy64, &dummy64, &dummy16, &dummy16);
2398 if (hret != H_SUCCESS) {
2399 ehea_error("modify_ehea_qp failed (3)");
2403 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2404 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2405 if (hret != H_SUCCESS) {
2406 ehea_error("query_ehea_qp failed (4)");
2412 free_page((unsigned long)cb0);
2416 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2420 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2421 enum ehea_eq_type eq_type = EHEA_EQ;
2423 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2424 EHEA_MAX_ENTRIES_EQ, 1);
2427 ehea_error("ehea_create_eq failed (qp_eq)");
2431 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2432 pr_cfg.max_entries_scq = sq_entries * 2;
2433 pr_cfg.max_entries_sq = sq_entries;
2434 pr_cfg.max_entries_rq1 = rq1_entries;
2435 pr_cfg.max_entries_rq2 = rq2_entries;
2436 pr_cfg.max_entries_rq3 = rq3_entries;
2438 pr_cfg_small_rx.max_entries_rcq = 1;
2439 pr_cfg_small_rx.max_entries_scq = sq_entries;
2440 pr_cfg_small_rx.max_entries_sq = sq_entries;
2441 pr_cfg_small_rx.max_entries_rq1 = 1;
2442 pr_cfg_small_rx.max_entries_rq2 = 1;
2443 pr_cfg_small_rx.max_entries_rq3 = 1;
2445 for (i = 0; i < def_qps; i++) {
2446 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2450 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2451 ret = ehea_init_port_res(port, &port->port_res[i],
2452 &pr_cfg_small_rx, i);
2461 ehea_clean_portres(port, &port->port_res[i]);
2464 ehea_destroy_eq(port->qp_eq);
2468 static int ehea_clean_all_portres(struct ehea_port *port)
2473 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2474 ret |= ehea_clean_portres(port, &port->port_res[i]);
2476 ret |= ehea_destroy_eq(port->qp_eq);
2481 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2483 if (adapter->active_ports)
2486 ehea_rem_mr(&adapter->mr);
2489 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2491 if (adapter->active_ports)
2494 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2497 static int ehea_up(struct net_device *dev)
2500 struct ehea_port *port = netdev_priv(dev);
2502 if (port->state == EHEA_PORT_UP)
2505 ret = ehea_port_res_setup(port, port->num_def_qps,
2506 port->num_add_tx_qps);
2508 ehea_error("port_res_failed");
2512 /* Set default QP for this port */
2513 ret = ehea_configure_port(port);
2515 ehea_error("ehea_configure_port failed. ret:%d", ret);
2519 ret = ehea_reg_interrupts(dev);
2521 ehea_error("reg_interrupts failed. ret:%d", ret);
2525 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2526 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2528 ehea_error("activate_qp failed");
2533 for (i = 0; i < port->num_def_qps; i++) {
2534 ret = ehea_fill_port_res(&port->port_res[i]);
2536 ehea_error("out_free_irqs");
2541 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2547 port->state = EHEA_PORT_UP;
2553 ehea_free_interrupts(dev);
2556 ehea_clean_all_portres(port);
2559 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2561 ehea_update_bcmc_registrations();
2562 ehea_update_firmware_handles();
2567 static void port_napi_disable(struct ehea_port *port)
2571 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2572 napi_disable(&port->port_res[i].napi);
2575 static void port_napi_enable(struct ehea_port *port)
2579 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2580 napi_enable(&port->port_res[i].napi);
2583 static int ehea_open(struct net_device *dev)
2586 struct ehea_port *port = netdev_priv(dev);
2588 mutex_lock(&port->port_lock);
2590 if (netif_msg_ifup(port))
2591 ehea_info("enabling port %s", dev->name);
2595 port_napi_enable(port);
2596 netif_start_queue(dev);
2599 mutex_unlock(&port->port_lock);
2604 static int ehea_down(struct net_device *dev)
2607 struct ehea_port *port = netdev_priv(dev);
2609 if (port->state == EHEA_PORT_DOWN)
2612 ehea_drop_multicast_list(dev);
2613 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2615 ehea_free_interrupts(dev);
2617 port->state = EHEA_PORT_DOWN;
2619 ehea_update_bcmc_registrations();
2621 ret = ehea_clean_all_portres(port);
2623 ehea_info("Failed freeing resources for %s. ret=%i",
2626 ehea_update_firmware_handles();
2631 static int ehea_stop(struct net_device *dev)
2634 struct ehea_port *port = netdev_priv(dev);
2636 if (netif_msg_ifdown(port))
2637 ehea_info("disabling port %s", dev->name);
2639 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2640 cancel_work_sync(&port->reset_task);
2641 mutex_lock(&port->port_lock);
2642 netif_stop_queue(dev);
2643 port_napi_disable(port);
2644 ret = ehea_down(dev);
2645 mutex_unlock(&port->port_lock);
2646 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2650 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2652 struct ehea_qp qp = *orig_qp;
2653 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2654 struct ehea_swqe *swqe;
2658 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2659 swqe = ehea_get_swqe(&qp, &wqe_index);
2660 swqe->tx_control |= EHEA_SWQE_PURGE;
2664 static void ehea_flush_sq(struct ehea_port *port)
2668 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2669 struct ehea_port_res *pr = &port->port_res[i];
2670 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2672 while (atomic_read(&pr->swqe_avail) < swqe_max) {
2680 int ehea_stop_qps(struct net_device *dev)
2682 struct ehea_port *port = netdev_priv(dev);
2683 struct ehea_adapter *adapter = port->adapter;
2684 struct hcp_modify_qp_cb0 *cb0;
2692 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2698 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2699 struct ehea_port_res *pr = &port->port_res[i];
2700 struct ehea_qp *qp = pr->qp;
2702 /* Purge send queue */
2705 /* Disable queue pair */
2706 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2707 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2709 if (hret != H_SUCCESS) {
2710 ehea_error("query_ehea_qp failed (1)");
2714 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2715 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2717 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2718 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2720 &dummy64, &dummy16, &dummy16);
2721 if (hret != H_SUCCESS) {
2722 ehea_error("modify_ehea_qp failed (1)");
2726 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2727 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2729 if (hret != H_SUCCESS) {
2730 ehea_error("query_ehea_qp failed (2)");
2734 /* deregister shared memory regions */
2735 dret = ehea_rem_smrs(pr);
2737 ehea_error("unreg shared memory region failed");
2744 free_page((unsigned long)cb0);
2749 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2751 struct ehea_qp qp = *orig_qp;
2752 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2753 struct ehea_rwqe *rwqe;
2754 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2755 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2756 struct sk_buff *skb;
2757 u32 lkey = pr->recv_mr.lkey;
2763 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2764 rwqe = ehea_get_next_rwqe(&qp, 2);
2765 rwqe->sg_list[0].l_key = lkey;
2766 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2767 skb = skba_rq2[index];
2769 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2772 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2773 rwqe = ehea_get_next_rwqe(&qp, 3);
2774 rwqe->sg_list[0].l_key = lkey;
2775 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2776 skb = skba_rq3[index];
2778 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2782 int ehea_restart_qps(struct net_device *dev)
2784 struct ehea_port *port = netdev_priv(dev);
2785 struct ehea_adapter *adapter = port->adapter;
2789 struct hcp_modify_qp_cb0 *cb0;
2794 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2800 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2801 struct ehea_port_res *pr = &port->port_res[i];
2802 struct ehea_qp *qp = pr->qp;
2804 ret = ehea_gen_smrs(pr);
2806 ehea_error("creation of shared memory regions failed");
2810 ehea_update_rqs(qp, pr);
2812 /* Enable queue pair */
2813 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2814 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2816 if (hret != H_SUCCESS) {
2817 ehea_error("query_ehea_qp failed (1)");
2821 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2822 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2824 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2825 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2827 &dummy64, &dummy16, &dummy16);
2828 if (hret != H_SUCCESS) {
2829 ehea_error("modify_ehea_qp failed (1)");
2833 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2834 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2836 if (hret != H_SUCCESS) {
2837 ehea_error("query_ehea_qp failed (2)");
2841 /* refill entire queue */
2842 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2843 ehea_refill_rq2(pr, 0);
2844 ehea_refill_rq3(pr, 0);
2847 free_page((unsigned long)cb0);
2852 static void ehea_reset_port(struct work_struct *work)
2855 struct ehea_port *port =
2856 container_of(work, struct ehea_port, reset_task);
2857 struct net_device *dev = port->netdev;
2860 mutex_lock(&port->port_lock);
2861 netif_stop_queue(dev);
2863 port_napi_disable(port);
2871 ehea_set_multicast_list(dev);
2873 if (netif_msg_timer(port))
2874 ehea_info("Device %s resetted successfully", dev->name);
2876 port_napi_enable(port);
2878 netif_wake_queue(dev);
2880 mutex_unlock(&port->port_lock);
2883 static void ehea_rereg_mrs(struct work_struct *work)
2886 struct ehea_adapter *adapter;
2888 ehea_info("LPAR memory changed - re-initializing driver");
2890 list_for_each_entry(adapter, &adapter_list, list)
2891 if (adapter->active_ports) {
2892 /* Shutdown all ports */
2893 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2894 struct ehea_port *port = adapter->port[i];
2895 struct net_device *dev;
2902 if (dev->flags & IFF_UP) {
2903 mutex_lock(&port->port_lock);
2904 netif_stop_queue(dev);
2905 ehea_flush_sq(port);
2906 ret = ehea_stop_qps(dev);
2908 mutex_unlock(&port->port_lock);
2911 port_napi_disable(port);
2912 mutex_unlock(&port->port_lock);
2916 /* Unregister old memory region */
2917 ret = ehea_rem_mr(&adapter->mr);
2919 ehea_error("unregister MR failed - driver"
2925 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2927 list_for_each_entry(adapter, &adapter_list, list)
2928 if (adapter->active_ports) {
2929 /* Register new memory region */
2930 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2932 ehea_error("register MR failed - driver"
2937 /* Restart all ports */
2938 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2939 struct ehea_port *port = adapter->port[i];
2942 struct net_device *dev = port->netdev;
2944 if (dev->flags & IFF_UP) {
2945 mutex_lock(&port->port_lock);
2946 port_napi_enable(port);
2947 ret = ehea_restart_qps(dev);
2949 netif_wake_queue(dev);
2950 mutex_unlock(&port->port_lock);
2955 ehea_info("re-initializing driver complete");
2960 static void ehea_tx_watchdog(struct net_device *dev)
2962 struct ehea_port *port = netdev_priv(dev);
2964 if (netif_carrier_ok(dev) &&
2965 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2966 ehea_schedule_port_reset(port);
2969 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2971 struct hcp_query_ehea *cb;
2975 cb = (void *)get_zeroed_page(GFP_KERNEL);
2981 hret = ehea_h_query_ehea(adapter->handle, cb);
2983 if (hret != H_SUCCESS) {
2988 adapter->max_mc_mac = cb->max_mc_mac - 1;
2992 free_page((unsigned long)cb);
2997 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2999 struct hcp_ehea_port_cb4 *cb4;
3005 /* (Try to) enable *jumbo frames */
3006 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3008 ehea_error("no mem for cb4");
3012 hret = ehea_h_query_ehea_port(port->adapter->handle,
3013 port->logical_port_id,
3015 H_PORT_CB4_JUMBO, cb4);
3016 if (hret == H_SUCCESS) {
3017 if (cb4->jumbo_frame)
3020 cb4->jumbo_frame = 1;
3021 hret = ehea_h_modify_ehea_port(port->adapter->
3028 if (hret == H_SUCCESS)
3034 free_page((unsigned long)cb4);
3040 static ssize_t ehea_show_port_id(struct device *dev,
3041 struct device_attribute *attr, char *buf)
3043 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3044 return sprintf(buf, "%d", port->logical_port_id);
3047 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3050 static void __devinit logical_port_release(struct device *dev)
3052 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3053 of_node_put(port->ofdev.node);
3056 static struct device *ehea_register_port(struct ehea_port *port,
3057 struct device_node *dn)
3061 port->ofdev.node = of_node_get(dn);
3062 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3063 port->ofdev.dev.bus = &ibmebus_bus_type;
3065 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
3066 port->ofdev.dev.release = logical_port_release;
3068 ret = of_device_register(&port->ofdev);
3070 ehea_error("failed to register device. ret=%d", ret);
3074 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3076 ehea_error("failed to register attributes, ret=%d", ret);
3077 goto out_unreg_of_dev;
3080 return &port->ofdev.dev;
3083 of_device_unregister(&port->ofdev);
3088 static void ehea_unregister_port(struct ehea_port *port)
3090 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3091 of_device_unregister(&port->ofdev);
3094 static const struct net_device_ops ehea_netdev_ops = {
3095 .ndo_open = ehea_open,
3096 .ndo_stop = ehea_stop,
3097 .ndo_start_xmit = ehea_start_xmit,
3098 #ifdef CONFIG_NET_POLL_CONTROLLER
3099 .ndo_poll_controller = ehea_netpoll,
3101 .ndo_get_stats = ehea_get_stats,
3102 .ndo_set_mac_address = ehea_set_mac_addr,
3103 .ndo_validate_addr = eth_validate_addr,
3104 .ndo_set_multicast_list = ehea_set_multicast_list,
3105 .ndo_change_mtu = ehea_change_mtu,
3106 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3107 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3108 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3109 .ndo_tx_timeout = ehea_tx_watchdog,
3112 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3113 u32 logical_port_id,
3114 struct device_node *dn)
3117 struct net_device *dev;
3118 struct ehea_port *port;
3119 struct device *port_dev;
3122 /* allocate memory for the port structures */
3123 dev = alloc_etherdev(sizeof(struct ehea_port));
3126 ehea_error("no mem for net_device");
3131 port = netdev_priv(dev);
3133 mutex_init(&port->port_lock);
3134 port->state = EHEA_PORT_DOWN;
3135 port->sig_comp_iv = sq_entries / 10;
3137 port->adapter = adapter;
3139 port->logical_port_id = logical_port_id;
3141 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3143 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3144 if (!port->mc_list) {
3146 goto out_free_ethdev;
3149 INIT_LIST_HEAD(&port->mc_list->list);
3151 ret = ehea_sense_port_attr(port);
3153 goto out_free_mc_list;
3155 port_dev = ehea_register_port(port, dn);
3157 goto out_free_mc_list;
3159 SET_NETDEV_DEV(dev, port_dev);
3161 /* initialize net_device structure */
3162 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3164 dev->netdev_ops = &ehea_netdev_ops;
3165 ehea_set_ethtool_ops(dev);
3167 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3168 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3169 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3171 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3173 INIT_WORK(&port->reset_task, ehea_reset_port);
3175 ret = register_netdev(dev);
3177 ehea_error("register_netdev failed. ret=%d", ret);
3178 goto out_unreg_port;
3181 port->lro_max_aggr = lro_max_aggr;
3183 ret = ehea_get_jumboframe_status(port, &jumbo);
3185 ehea_error("failed determining jumbo frame status for %s",
3186 port->netdev->name);
3188 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3189 jumbo == 1 ? "en" : "dis");
3191 adapter->active_ports++;
3196 ehea_unregister_port(port);
3199 kfree(port->mc_list);
3205 ehea_error("setting up logical port with id=%d failed, ret=%d",
3206 logical_port_id, ret);
3210 static void ehea_shutdown_single_port(struct ehea_port *port)
3212 struct ehea_adapter *adapter = port->adapter;
3213 unregister_netdev(port->netdev);
3214 ehea_unregister_port(port);
3215 kfree(port->mc_list);
3216 free_netdev(port->netdev);
3217 adapter->active_ports--;
3220 static int ehea_setup_ports(struct ehea_adapter *adapter)
3222 struct device_node *lhea_dn;
3223 struct device_node *eth_dn = NULL;
3225 const u32 *dn_log_port_id;
3228 lhea_dn = adapter->ofdev->node;
3229 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3231 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3233 if (!dn_log_port_id) {
3234 ehea_error("bad device node: eth_dn name=%s",
3239 if (ehea_add_adapter_mr(adapter)) {
3240 ehea_error("creating MR failed");
3241 of_node_put(eth_dn);
3245 adapter->port[i] = ehea_setup_single_port(adapter,
3248 if (adapter->port[i])
3249 ehea_info("%s -> logical port id #%d",
3250 adapter->port[i]->netdev->name,
3253 ehea_remove_adapter_mr(adapter);
3260 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3261 u32 logical_port_id)
3263 struct device_node *lhea_dn;
3264 struct device_node *eth_dn = NULL;
3265 const u32 *dn_log_port_id;
3267 lhea_dn = adapter->ofdev->node;
3268 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3270 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3273 if (*dn_log_port_id == logical_port_id)
3280 static ssize_t ehea_probe_port(struct device *dev,
3281 struct device_attribute *attr,
3282 const char *buf, size_t count)
3284 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3285 struct ehea_port *port;
3286 struct device_node *eth_dn = NULL;
3289 u32 logical_port_id;
3291 sscanf(buf, "%d", &logical_port_id);
3293 port = ehea_get_port(adapter, logical_port_id);
3296 ehea_info("adding port with logical port id=%d failed. port "
3297 "already configured as %s.", logical_port_id,
3298 port->netdev->name);
3302 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3305 ehea_info("no logical port with id %d found", logical_port_id);
3309 if (ehea_add_adapter_mr(adapter)) {
3310 ehea_error("creating MR failed");
3314 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3316 of_node_put(eth_dn);
3319 for (i = 0; i < EHEA_MAX_PORTS; i++)
3320 if (!adapter->port[i]) {
3321 adapter->port[i] = port;
3325 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3328 ehea_remove_adapter_mr(adapter);
3332 return (ssize_t) count;
3335 static ssize_t ehea_remove_port(struct device *dev,
3336 struct device_attribute *attr,
3337 const char *buf, size_t count)
3339 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3340 struct ehea_port *port;
3342 u32 logical_port_id;
3344 sscanf(buf, "%d", &logical_port_id);
3346 port = ehea_get_port(adapter, logical_port_id);
3349 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3352 ehea_shutdown_single_port(port);
3354 for (i = 0; i < EHEA_MAX_PORTS; i++)
3355 if (adapter->port[i] == port) {
3356 adapter->port[i] = NULL;
3360 ehea_error("removing port with logical port id=%d failed. port "
3361 "not configured.", logical_port_id);
3365 ehea_remove_adapter_mr(adapter);
3367 return (ssize_t) count;
3370 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3371 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3373 int ehea_create_device_sysfs(struct of_device *dev)
3375 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3379 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3384 void ehea_remove_device_sysfs(struct of_device *dev)
3386 device_remove_file(&dev->dev, &dev_attr_probe_port);
3387 device_remove_file(&dev->dev, &dev_attr_remove_port);
3390 static int __devinit ehea_probe_adapter(struct of_device *dev,
3391 const struct of_device_id *id)
3393 struct ehea_adapter *adapter;
3394 const u64 *adapter_handle;
3397 if (!dev || !dev->node) {
3398 ehea_error("Invalid ibmebus device probed");
3402 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3405 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3409 list_add(&adapter->list, &adapter_list);
3411 adapter->ofdev = dev;
3413 adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
3416 adapter->handle = *adapter_handle;
3418 if (!adapter->handle) {
3419 dev_err(&dev->dev, "failed getting handle for adapter"
3420 " '%s'\n", dev->node->full_name);
3425 adapter->pd = EHEA_PD_ID;
3427 dev_set_drvdata(&dev->dev, adapter);
3430 /* initialize adapter and ports */
3431 /* get adapter properties */
3432 ret = ehea_sense_adapter_attr(adapter);
3434 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3438 adapter->neq = ehea_create_eq(adapter,
3439 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3440 if (!adapter->neq) {
3442 dev_err(&dev->dev, "NEQ creation failed\n");
3446 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3447 (unsigned long)adapter);
3449 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3450 ehea_interrupt_neq, IRQF_DISABLED,
3451 "ehea_neq", adapter);
3453 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3457 ret = ehea_create_device_sysfs(dev);
3461 ret = ehea_setup_ports(adapter);
3463 dev_err(&dev->dev, "setup_ports failed\n");
3464 goto out_rem_dev_sysfs;
3471 ehea_remove_device_sysfs(dev);
3474 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3477 ehea_destroy_eq(adapter->neq);
3480 list_del(&adapter->list);
3484 ehea_update_firmware_handles();
3489 static int __devexit ehea_remove(struct of_device *dev)
3491 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3494 for (i = 0; i < EHEA_MAX_PORTS; i++)
3495 if (adapter->port[i]) {
3496 ehea_shutdown_single_port(adapter->port[i]);
3497 adapter->port[i] = NULL;
3500 ehea_remove_device_sysfs(dev);
3502 flush_scheduled_work();
3504 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3505 tasklet_kill(&adapter->neq_tasklet);
3507 ehea_destroy_eq(adapter->neq);
3508 ehea_remove_adapter_mr(adapter);
3509 list_del(&adapter->list);
3512 ehea_update_firmware_handles();
3517 void ehea_crash_handler(void)
3521 if (ehea_fw_handles.arr)
3522 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3523 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3524 ehea_fw_handles.arr[i].fwh,
3527 if (ehea_bcmc_regs.arr)
3528 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3529 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3530 ehea_bcmc_regs.arr[i].port_id,
3531 ehea_bcmc_regs.arr[i].reg_type,
3532 ehea_bcmc_regs.arr[i].macaddr,
3536 static int ehea_mem_notifier(struct notifier_block *nb,
3537 unsigned long action, void *data)
3539 int ret = NOTIFY_BAD;
3540 struct memory_notify *arg = data;
3542 if (!mutex_trylock(&dlpar_mem_lock)) {
3543 ehea_info("ehea_mem_notifier must not be called parallelized");
3548 case MEM_CANCEL_OFFLINE:
3549 ehea_info("memory offlining canceled");
3550 /* Readd canceled memory block */
3552 ehea_info("memory is going online");
3553 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3554 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3556 ehea_rereg_mrs(NULL);
3558 case MEM_GOING_OFFLINE:
3559 ehea_info("memory is going offline");
3560 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3561 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3563 ehea_rereg_mrs(NULL);
3569 ehea_update_firmware_handles();
3573 mutex_unlock(&dlpar_mem_lock);
3578 static struct notifier_block ehea_mem_nb = {
3579 .notifier_call = ehea_mem_notifier,
3582 static int ehea_reboot_notifier(struct notifier_block *nb,
3583 unsigned long action, void *unused)
3585 if (action == SYS_RESTART) {
3586 ehea_info("Reboot: freeing all eHEA resources");
3587 ibmebus_unregister_driver(&ehea_driver);
3592 static struct notifier_block ehea_reboot_nb = {
3593 .notifier_call = ehea_reboot_notifier,
3596 static int check_module_parm(void)
3600 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3601 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3602 ehea_info("Bad parameter: rq1_entries");
3605 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3606 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3607 ehea_info("Bad parameter: rq2_entries");
3610 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3611 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3612 ehea_info("Bad parameter: rq3_entries");
3615 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3616 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3617 ehea_info("Bad parameter: sq_entries");
3624 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3627 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3630 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3631 ehea_show_capabilities, NULL);
3633 int __init ehea_module_init(void)
3637 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3641 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3642 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3643 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3645 mutex_init(&ehea_fw_handles.lock);
3646 spin_lock_init(&ehea_bcmc_regs.lock);
3648 ret = check_module_parm();
3652 ret = ehea_create_busmap();
3656 ret = register_reboot_notifier(&ehea_reboot_nb);
3658 ehea_info("failed registering reboot notifier");
3660 ret = register_memory_notifier(&ehea_mem_nb);
3662 ehea_info("failed registering memory remove notifier");
3664 ret = crash_shutdown_register(&ehea_crash_handler);
3666 ehea_info("failed registering crash handler");
3668 ret = ibmebus_register_driver(&ehea_driver);
3670 ehea_error("failed registering eHEA device driver on ebus");
3674 ret = driver_create_file(&ehea_driver.driver,
3675 &driver_attr_capabilities);
3677 ehea_error("failed to register capabilities attribute, ret=%d",
3685 ibmebus_unregister_driver(&ehea_driver);
3687 unregister_memory_notifier(&ehea_mem_nb);
3688 unregister_reboot_notifier(&ehea_reboot_nb);
3689 crash_shutdown_unregister(&ehea_crash_handler);
3694 static void __exit ehea_module_exit(void)
3698 flush_scheduled_work();
3699 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3700 ibmebus_unregister_driver(&ehea_driver);
3701 unregister_reboot_notifier(&ehea_reboot_nb);
3702 ret = crash_shutdown_unregister(&ehea_crash_handler);
3704 ehea_info("failed unregistering crash handler");
3705 unregister_memory_notifier(&ehea_mem_nb);
3706 kfree(ehea_fw_handles.arr);
3707 kfree(ehea_bcmc_regs.arr);
3708 ehea_destroy_busmap();
3711 module_init(ehea_module_init);
3712 module_exit(ehea_module_exit);