2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/slab.h>
36 #include <linux/if_ether.h>
37 #include <linux/notifier.h>
38 #include <linux/reboot.h>
39 #include <linux/memory.h>
40 #include <asm/kexec.h>
41 #include <linux/mutex.h>
47 #include "ehea_phyp.h"
50 MODULE_LICENSE("GPL");
51 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
52 MODULE_DESCRIPTION("IBM eServer HEA Driver");
53 MODULE_VERSION(DRV_VERSION);
56 static int msg_level = -1;
57 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
58 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
59 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
60 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
63 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
64 static int num_tx_qps = EHEA_NUM_TX_QP;
65 static int prop_carrier_state;
67 module_param(msg_level, int, 0);
68 module_param(rq1_entries, int, 0);
69 module_param(rq2_entries, int, 0);
70 module_param(rq3_entries, int, 0);
71 module_param(sq_entries, int, 0);
72 module_param(prop_carrier_state, int, 0);
73 module_param(use_mcs, int, 0);
74 module_param(use_lro, int, 0);
75 module_param(lro_max_aggr, int, 0);
76 module_param(num_tx_qps, int, 0);
78 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
79 MODULE_PARM_DESC(msg_level, "msg_level");
80 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
81 "port to stack. 1:yes, 0:no. Default = 0 ");
82 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
83 "[2^x - 1], x = [6..14]. Default = "
84 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
85 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
86 "[2^x - 1], x = [6..14]. Default = "
87 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
88 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
89 "[2^x - 1], x = [6..14]. Default = "
90 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
91 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
92 "[2^x - 1], x = [6..14]. Default = "
93 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
94 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
96 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
97 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
98 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
101 static int port_name_cnt;
102 static LIST_HEAD(adapter_list);
103 static unsigned long ehea_driver_flags;
104 struct work_struct ehea_rereg_mr_task;
105 static DEFINE_MUTEX(dlpar_mem_lock);
106 struct ehea_fw_handle_array ehea_fw_handles;
107 struct ehea_bcmc_reg_array ehea_bcmc_regs;
110 static int __devinit ehea_probe_adapter(struct platform_device *dev,
111 const struct of_device_id *id);
113 static int __devexit ehea_remove(struct platform_device *dev);
115 static struct of_device_id ehea_device_table[] = {
118 .compatible = "IBM,lhea",
122 MODULE_DEVICE_TABLE(of, ehea_device_table);
124 static struct of_platform_driver ehea_driver = {
127 .owner = THIS_MODULE,
128 .of_match_table = ehea_device_table,
130 .probe = ehea_probe_adapter,
131 .remove = ehea_remove,
134 void ehea_dump(void *adr, int len, char *msg)
137 unsigned char *deb = adr;
138 for (x = 0; x < len; x += 16) {
139 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
140 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
145 void ehea_schedule_port_reset(struct ehea_port *port)
147 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
148 schedule_work(&port->reset_task);
151 static void ehea_update_firmware_handles(void)
153 struct ehea_fw_handle_entry *arr = NULL;
154 struct ehea_adapter *adapter;
155 int num_adapters = 0;
159 int num_fw_handles, k, l;
161 /* Determine number of handles */
162 mutex_lock(&ehea_fw_handles.lock);
164 list_for_each_entry(adapter, &adapter_list, list) {
167 for (k = 0; k < EHEA_MAX_PORTS; k++) {
168 struct ehea_port *port = adapter->port[k];
170 if (!port || (port->state != EHEA_PORT_UP))
174 num_portres += port->num_def_qps + port->num_add_tx_qps;
178 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
179 num_ports * EHEA_NUM_PORT_FW_HANDLES +
180 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
182 if (num_fw_handles) {
183 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
185 goto out; /* Keep the existing array */
189 list_for_each_entry(adapter, &adapter_list, list) {
190 if (num_adapters == 0)
193 for (k = 0; k < EHEA_MAX_PORTS; k++) {
194 struct ehea_port *port = adapter->port[k];
196 if (!port || (port->state != EHEA_PORT_UP) ||
201 l < port->num_def_qps + port->num_add_tx_qps;
203 struct ehea_port_res *pr = &port->port_res[l];
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = pr->qp->fw_handle;
207 arr[i].adh = adapter->handle;
208 arr[i++].fwh = pr->send_cq->fw_handle;
209 arr[i].adh = adapter->handle;
210 arr[i++].fwh = pr->recv_cq->fw_handle;
211 arr[i].adh = adapter->handle;
212 arr[i++].fwh = pr->eq->fw_handle;
213 arr[i].adh = adapter->handle;
214 arr[i++].fwh = pr->send_mr.handle;
215 arr[i].adh = adapter->handle;
216 arr[i++].fwh = pr->recv_mr.handle;
218 arr[i].adh = adapter->handle;
219 arr[i++].fwh = port->qp_eq->fw_handle;
223 arr[i].adh = adapter->handle;
224 arr[i++].fwh = adapter->neq->fw_handle;
226 if (adapter->mr.handle) {
227 arr[i].adh = adapter->handle;
228 arr[i++].fwh = adapter->mr.handle;
234 kfree(ehea_fw_handles.arr);
235 ehea_fw_handles.arr = arr;
236 ehea_fw_handles.num_entries = i;
238 mutex_unlock(&ehea_fw_handles.lock);
241 static void ehea_update_bcmc_registrations(void)
244 struct ehea_bcmc_reg_entry *arr = NULL;
245 struct ehea_adapter *adapter;
246 struct ehea_mc_list *mc_entry;
247 int num_registrations = 0;
251 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
253 /* Determine number of registrations */
254 list_for_each_entry(adapter, &adapter_list, list)
255 for (k = 0; k < EHEA_MAX_PORTS; k++) {
256 struct ehea_port *port = adapter->port[k];
258 if (!port || (port->state != EHEA_PORT_UP))
261 num_registrations += 2; /* Broadcast registrations */
263 list_for_each_entry(mc_entry, &port->mc_list->list,list)
264 num_registrations += 2;
267 if (num_registrations) {
268 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
270 goto out; /* Keep the existing array */
274 list_for_each_entry(adapter, &adapter_list, list) {
275 for (k = 0; k < EHEA_MAX_PORTS; k++) {
276 struct ehea_port *port = adapter->port[k];
278 if (!port || (port->state != EHEA_PORT_UP))
281 if (num_registrations == 0)
284 arr[i].adh = adapter->handle;
285 arr[i].port_id = port->logical_port_id;
286 arr[i].reg_type = EHEA_BCMC_BROADCAST |
288 arr[i++].macaddr = port->mac_addr;
290 arr[i].adh = adapter->handle;
291 arr[i].port_id = port->logical_port_id;
292 arr[i].reg_type = EHEA_BCMC_BROADCAST |
293 EHEA_BCMC_VLANID_ALL;
294 arr[i++].macaddr = port->mac_addr;
295 num_registrations -= 2;
297 list_for_each_entry(mc_entry,
298 &port->mc_list->list, list) {
299 if (num_registrations == 0)
302 arr[i].adh = adapter->handle;
303 arr[i].port_id = port->logical_port_id;
304 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
305 EHEA_BCMC_MULTICAST |
307 arr[i++].macaddr = mc_entry->macaddr;
309 arr[i].adh = adapter->handle;
310 arr[i].port_id = port->logical_port_id;
311 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
312 EHEA_BCMC_MULTICAST |
313 EHEA_BCMC_VLANID_ALL;
314 arr[i++].macaddr = mc_entry->macaddr;
315 num_registrations -= 2;
321 kfree(ehea_bcmc_regs.arr);
322 ehea_bcmc_regs.arr = arr;
323 ehea_bcmc_regs.num_entries = i;
325 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
328 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
330 struct ehea_port *port = netdev_priv(dev);
331 struct net_device_stats *stats = &port->stats;
332 struct hcp_ehea_port_cb2 *cb2;
333 u64 hret, rx_packets, tx_packets;
336 memset(stats, 0, sizeof(*stats));
338 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
340 ehea_error("no mem for cb2");
344 hret = ehea_h_query_ehea_port(port->adapter->handle,
345 port->logical_port_id,
346 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
347 if (hret != H_SUCCESS) {
348 ehea_error("query_ehea_port failed");
352 if (netif_msg_hw(port))
353 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
356 for (i = 0; i < port->num_def_qps; i++)
357 rx_packets += port->port_res[i].rx_packets;
360 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
361 tx_packets += port->port_res[i].tx_packets;
363 stats->tx_packets = tx_packets;
364 stats->multicast = cb2->rxmcp;
365 stats->rx_errors = cb2->rxuerr;
366 stats->rx_bytes = cb2->rxo;
367 stats->tx_bytes = cb2->txo;
368 stats->rx_packets = rx_packets;
371 free_page((unsigned long)cb2);
376 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
378 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
379 struct net_device *dev = pr->port->netdev;
380 int max_index_mask = pr->rq1_skba.len - 1;
381 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
385 pr->rq1_skba.os_skbs = 0;
387 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
389 pr->rq1_skba.index = index;
390 pr->rq1_skba.os_skbs = fill_wqes;
394 for (i = 0; i < fill_wqes; i++) {
395 if (!skb_arr_rq1[index]) {
396 skb_arr_rq1[index] = netdev_alloc_skb(dev,
398 if (!skb_arr_rq1[index]) {
399 pr->rq1_skba.os_skbs = fill_wqes - i;
404 index &= max_index_mask;
412 ehea_update_rq1a(pr->qp, adder);
415 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
417 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
418 struct net_device *dev = pr->port->netdev;
421 for (i = 0; i < pr->rq1_skba.len; i++) {
422 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
427 ehea_update_rq1a(pr->qp, nr_rq1a);
430 static int ehea_refill_rq_def(struct ehea_port_res *pr,
431 struct ehea_q_skb_arr *q_skba, int rq_nr,
432 int num_wqes, int wqe_type, int packet_size)
434 struct net_device *dev = pr->port->netdev;
435 struct ehea_qp *qp = pr->qp;
436 struct sk_buff **skb_arr = q_skba->arr;
437 struct ehea_rwqe *rwqe;
438 int i, index, max_index_mask, fill_wqes;
442 fill_wqes = q_skba->os_skbs + num_wqes;
445 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
446 q_skba->os_skbs = fill_wqes;
450 index = q_skba->index;
451 max_index_mask = q_skba->len - 1;
452 for (i = 0; i < fill_wqes; i++) {
456 skb = netdev_alloc_skb_ip_align(dev, packet_size);
458 q_skba->os_skbs = fill_wqes - i;
459 if (q_skba->os_skbs == q_skba->len - 2) {
460 ehea_info("%s: rq%i ran dry - no mem for skb",
461 pr->port->netdev->name, rq_nr);
467 skb_arr[index] = skb;
468 tmp_addr = ehea_map_vaddr(skb->data);
469 if (tmp_addr == -1) {
471 q_skba->os_skbs = fill_wqes - i;
476 rwqe = ehea_get_next_rwqe(qp, rq_nr);
477 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
478 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
479 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
480 rwqe->sg_list[0].vaddr = tmp_addr;
481 rwqe->sg_list[0].len = packet_size;
482 rwqe->data_segments = 1;
485 index &= max_index_mask;
489 q_skba->index = index;
496 ehea_update_rq2a(pr->qp, adder);
498 ehea_update_rq3a(pr->qp, adder);
504 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
506 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
507 nr_of_wqes, EHEA_RWQE2_TYPE,
512 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
514 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
515 nr_of_wqes, EHEA_RWQE3_TYPE,
516 EHEA_MAX_PACKET_SIZE);
519 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
521 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
522 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
524 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
525 (cqe->header_length == 0))
530 static inline void ehea_fill_skb(struct net_device *dev,
531 struct sk_buff *skb, struct ehea_cqe *cqe)
533 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
535 skb_put(skb, length);
536 skb->protocol = eth_type_trans(skb, dev);
538 /* The packet was not an IPV4 packet so a complemented checksum was
539 calculated. The value is found in the Internet Checksum field. */
540 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
541 skb->ip_summed = CHECKSUM_COMPLETE;
542 skb->csum = csum_unfold(~cqe->inet_checksum_value);
544 skb->ip_summed = CHECKSUM_UNNECESSARY;
547 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
549 struct ehea_cqe *cqe)
551 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
562 prefetchw(pref + EHEA_CACHE_LINE);
564 pref = (skb_array[x]->data);
566 prefetch(pref + EHEA_CACHE_LINE);
567 prefetch(pref + EHEA_CACHE_LINE * 2);
568 prefetch(pref + EHEA_CACHE_LINE * 3);
571 skb = skb_array[skb_index];
572 skb_array[skb_index] = NULL;
576 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
577 int arr_len, int wqe_index)
589 prefetchw(pref + EHEA_CACHE_LINE);
591 pref = (skb_array[x]->data);
593 prefetchw(pref + EHEA_CACHE_LINE);
596 skb = skb_array[wqe_index];
597 skb_array[wqe_index] = NULL;
601 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
602 struct ehea_cqe *cqe, int *processed_rq2,
607 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
608 pr->p_stats.err_tcp_cksum++;
609 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
610 pr->p_stats.err_ip_cksum++;
611 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
612 pr->p_stats.err_frame_crc++;
616 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
618 } else if (rq == 3) {
620 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
624 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
625 if (netif_msg_rx_err(pr->port)) {
626 ehea_error("Critical receive error for QP %d. "
627 "Resetting port.", pr->qp->init_attr.qp_nr);
628 ehea_dump(cqe, sizeof(*cqe), "CQE");
630 ehea_schedule_port_reset(pr->port);
637 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
638 void **tcph, u64 *hdr_flags, void *priv)
640 struct ehea_cqe *cqe = priv;
644 /* non tcp/udp packets */
645 if (!cqe->header_length)
649 skb_reset_network_header(skb);
651 if (iph->protocol != IPPROTO_TCP)
654 ip_len = ip_hdrlen(skb);
655 skb_set_transport_header(skb, ip_len);
656 *tcph = tcp_hdr(skb);
658 /* check if ip header and tcp header are complete */
659 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
662 *hdr_flags = LRO_IPV4 | LRO_TCP;
668 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
671 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
676 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
681 lro_receive_skb(&pr->lro_mgr, skb, cqe);
684 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
687 netif_receive_skb(skb);
691 static int ehea_proc_rwqes(struct net_device *dev,
692 struct ehea_port_res *pr,
695 struct ehea_port *port = pr->port;
696 struct ehea_qp *qp = pr->qp;
697 struct ehea_cqe *cqe;
699 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
700 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
701 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
702 int skb_arr_rq1_len = pr->rq1_skba.len;
703 int skb_arr_rq2_len = pr->rq2_skba.len;
704 int skb_arr_rq3_len = pr->rq3_skba.len;
705 int processed, processed_rq1, processed_rq2, processed_rq3;
706 int wqe_index, last_wqe_index, rq, port_reset;
708 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
711 cqe = ehea_poll_rq1(qp, &wqe_index);
712 while ((processed < budget) && cqe) {
716 if (netif_msg_rx_status(port))
717 ehea_dump(cqe, sizeof(*cqe), "CQE");
719 last_wqe_index = wqe_index;
721 if (!ehea_check_cqe(cqe, &rq)) {
724 skb = get_skb_by_index_ll(skb_arr_rq1,
727 if (unlikely(!skb)) {
728 if (netif_msg_rx_err(port))
729 ehea_error("LL rq1: skb=NULL");
731 skb = netdev_alloc_skb(dev,
736 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
737 cqe->num_bytes_transfered - 4);
738 ehea_fill_skb(dev, skb, cqe);
739 } else if (rq == 2) {
741 skb = get_skb_by_index(skb_arr_rq2,
742 skb_arr_rq2_len, cqe);
743 if (unlikely(!skb)) {
744 if (netif_msg_rx_err(port))
745 ehea_error("rq2: skb=NULL");
748 ehea_fill_skb(dev, skb, cqe);
752 skb = get_skb_by_index(skb_arr_rq3,
753 skb_arr_rq3_len, cqe);
754 if (unlikely(!skb)) {
755 if (netif_msg_rx_err(port))
756 ehea_error("rq3: skb=NULL");
759 ehea_fill_skb(dev, skb, cqe);
763 ehea_proc_skb(pr, cqe, skb);
765 pr->p_stats.poll_receive_errors++;
766 port_reset = ehea_treat_poll_error(pr, rq, cqe,
772 cqe = ehea_poll_rq1(qp, &wqe_index);
775 lro_flush_all(&pr->lro_mgr);
777 pr->rx_packets += processed;
779 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
780 ehea_refill_rq2(pr, processed_rq2);
781 ehea_refill_rq3(pr, processed_rq3);
786 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
788 static void reset_sq_restart_flag(struct ehea_port *port)
792 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
793 struct ehea_port_res *pr = &port->port_res[i];
794 pr->sq_restart_flag = 0;
796 wake_up(&port->restart_wq);
799 static void check_sqs(struct ehea_port *port)
801 struct ehea_swqe *swqe;
805 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
806 struct ehea_port_res *pr = &port->port_res[i];
809 swqe = ehea_get_swqe(pr->qp, &swqe_index);
810 memset(swqe, 0, SWQE_HEADER_SIZE);
811 atomic_dec(&pr->swqe_avail);
813 swqe->tx_control |= EHEA_SWQE_PURGE;
814 swqe->wr_id = SWQE_RESTART_CHECK;
815 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
816 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
817 swqe->immediate_data_length = 80;
819 ehea_post_swqe(pr->qp, swqe);
821 ret = wait_event_timeout(port->restart_wq,
822 pr->sq_restart_flag == 0,
823 msecs_to_jiffies(100));
826 ehea_error("HW/SW queues out of sync");
827 ehea_schedule_port_reset(pr->port);
834 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
837 struct ehea_cq *send_cq = pr->send_cq;
838 struct ehea_cqe *cqe;
839 int quota = my_quota;
845 cqe = ehea_poll_cq(send_cq);
846 while (cqe && (quota > 0)) {
847 ehea_inc_cq(send_cq);
852 if (cqe->wr_id == SWQE_RESTART_CHECK) {
853 pr->sq_restart_flag = 1;
858 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
859 ehea_error("Bad send completion status=0x%04X",
862 if (netif_msg_tx_err(pr->port))
863 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
865 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
866 ehea_error("Resetting port");
867 ehea_schedule_port_reset(pr->port);
872 if (netif_msg_tx_done(pr->port))
873 ehea_dump(cqe, sizeof(*cqe), "CQE");
875 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
876 == EHEA_SWQE2_TYPE)) {
878 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
879 skb = pr->sq_skba.arr[index];
881 pr->sq_skba.arr[index] = NULL;
884 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
887 cqe = ehea_poll_cq(send_cq);
890 ehea_update_feca(send_cq, cqe_counter);
891 atomic_add(swqe_av, &pr->swqe_avail);
893 spin_lock_irqsave(&pr->netif_queue, flags);
895 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
896 >= pr->swqe_refill_th)) {
897 netif_wake_queue(pr->port->netdev);
898 pr->queue_stopped = 0;
900 spin_unlock_irqrestore(&pr->netif_queue, flags);
901 wake_up(&pr->port->swqe_avail_wq);
906 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
907 #define EHEA_POLL_MAX_CQES 65535
909 static int ehea_poll(struct napi_struct *napi, int budget)
911 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
913 struct net_device *dev = pr->port->netdev;
914 struct ehea_cqe *cqe;
915 struct ehea_cqe *cqe_skb = NULL;
916 int force_irq, wqe_index;
919 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
920 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
923 rx += ehea_proc_rwqes(dev, pr, budget - rx);
925 while ((rx != budget) || force_irq) {
926 pr->poll_counter = 0;
929 ehea_reset_cq_ep(pr->recv_cq);
930 ehea_reset_cq_ep(pr->send_cq);
931 ehea_reset_cq_n1(pr->recv_cq);
932 ehea_reset_cq_n1(pr->send_cq);
934 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
935 cqe_skb = ehea_poll_cq(pr->send_cq);
937 if (!cqe && !cqe_skb)
940 if (!napi_reschedule(napi))
943 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
944 rx += ehea_proc_rwqes(dev, pr, budget - rx);
951 #ifdef CONFIG_NET_POLL_CONTROLLER
952 static void ehea_netpoll(struct net_device *dev)
954 struct ehea_port *port = netdev_priv(dev);
957 for (i = 0; i < port->num_def_qps; i++)
958 napi_schedule(&port->port_res[i].napi);
962 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
964 struct ehea_port_res *pr = param;
966 napi_schedule(&pr->napi);
971 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
973 struct ehea_port *port = param;
974 struct ehea_eqe *eqe;
977 u64 resource_type, aer, aerr;
980 eqe = ehea_poll_eq(port->qp_eq);
983 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
984 ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
985 eqe->entry, qp_token);
987 qp = port->port_res[qp_token].qp;
989 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
992 if (resource_type == EHEA_AER_RESTYPE_QP) {
993 if ((aer & EHEA_AER_RESET_MASK) ||
994 (aerr & EHEA_AERR_RESET_MASK))
997 reset_port = 1; /* Reset in case of CQ or EQ error */
999 eqe = ehea_poll_eq(port->qp_eq);
1003 ehea_error("Resetting port");
1004 ehea_schedule_port_reset(port);
1010 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
1015 for (i = 0; i < EHEA_MAX_PORTS; i++)
1016 if (adapter->port[i])
1017 if (adapter->port[i]->logical_port_id == logical_port)
1018 return adapter->port[i];
1022 int ehea_sense_port_attr(struct ehea_port *port)
1026 struct hcp_ehea_port_cb0 *cb0;
1028 /* may be called via ehea_neq_tasklet() */
1029 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1031 ehea_error("no mem for cb0");
1036 hret = ehea_h_query_ehea_port(port->adapter->handle,
1037 port->logical_port_id, H_PORT_CB0,
1038 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1040 if (hret != H_SUCCESS) {
1046 port->mac_addr = cb0->port_mac_addr << 16;
1048 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1049 ret = -EADDRNOTAVAIL;
1054 switch (cb0->port_speed) {
1056 port->port_speed = EHEA_SPEED_10M;
1057 port->full_duplex = 0;
1060 port->port_speed = EHEA_SPEED_10M;
1061 port->full_duplex = 1;
1063 case H_SPEED_100M_H:
1064 port->port_speed = EHEA_SPEED_100M;
1065 port->full_duplex = 0;
1067 case H_SPEED_100M_F:
1068 port->port_speed = EHEA_SPEED_100M;
1069 port->full_duplex = 1;
1072 port->port_speed = EHEA_SPEED_1G;
1073 port->full_duplex = 1;
1076 port->port_speed = EHEA_SPEED_10G;
1077 port->full_duplex = 1;
1080 port->port_speed = 0;
1081 port->full_duplex = 0;
1086 port->num_mcs = cb0->num_default_qps;
1088 /* Number of default QPs */
1090 port->num_def_qps = cb0->num_default_qps;
1092 port->num_def_qps = 1;
1094 if (!port->num_def_qps) {
1099 port->num_tx_qps = num_tx_qps;
1101 if (port->num_def_qps >= port->num_tx_qps)
1102 port->num_add_tx_qps = 0;
1104 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
1108 if (ret || netif_msg_probe(port))
1109 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1110 free_page((unsigned long)cb0);
1115 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1117 struct hcp_ehea_port_cb4 *cb4;
1121 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1123 ehea_error("no mem for cb4");
1128 cb4->port_speed = port_speed;
1130 netif_carrier_off(port->netdev);
1132 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1133 port->logical_port_id,
1134 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1135 if (hret == H_SUCCESS) {
1136 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1138 hret = ehea_h_query_ehea_port(port->adapter->handle,
1139 port->logical_port_id,
1140 H_PORT_CB4, H_PORT_CB4_SPEED,
1142 if (hret == H_SUCCESS) {
1143 switch (cb4->port_speed) {
1145 port->port_speed = EHEA_SPEED_10M;
1146 port->full_duplex = 0;
1149 port->port_speed = EHEA_SPEED_10M;
1150 port->full_duplex = 1;
1152 case H_SPEED_100M_H:
1153 port->port_speed = EHEA_SPEED_100M;
1154 port->full_duplex = 0;
1156 case H_SPEED_100M_F:
1157 port->port_speed = EHEA_SPEED_100M;
1158 port->full_duplex = 1;
1161 port->port_speed = EHEA_SPEED_1G;
1162 port->full_duplex = 1;
1165 port->port_speed = EHEA_SPEED_10G;
1166 port->full_duplex = 1;
1169 port->port_speed = 0;
1170 port->full_duplex = 0;
1174 ehea_error("Failed sensing port speed");
1178 if (hret == H_AUTHORITY) {
1179 ehea_info("Hypervisor denied setting port speed");
1183 ehea_error("Failed setting port speed");
1186 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1187 netif_carrier_on(port->netdev);
1189 free_page((unsigned long)cb4);
1194 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1199 struct ehea_port *port;
1201 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1202 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1203 port = ehea_get_port(adapter, portnum);
1206 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1209 ehea_error("unknown portnum %x", portnum);
1213 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1214 if (!netif_carrier_ok(port->netdev)) {
1215 ret = ehea_sense_port_attr(port);
1217 ehea_error("failed resensing port "
1222 if (netif_msg_link(port))
1223 ehea_info("%s: Logical port up: %dMbps "
1227 port->full_duplex ==
1228 1 ? "Full" : "Half");
1230 netif_carrier_on(port->netdev);
1231 netif_wake_queue(port->netdev);
1234 if (netif_carrier_ok(port->netdev)) {
1235 if (netif_msg_link(port))
1236 ehea_info("%s: Logical port down",
1237 port->netdev->name);
1238 netif_carrier_off(port->netdev);
1239 netif_stop_queue(port->netdev);
1242 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1243 port->phy_link = EHEA_PHY_LINK_UP;
1244 if (netif_msg_link(port))
1245 ehea_info("%s: Physical port up",
1246 port->netdev->name);
1247 if (prop_carrier_state)
1248 netif_carrier_on(port->netdev);
1250 port->phy_link = EHEA_PHY_LINK_DOWN;
1251 if (netif_msg_link(port))
1252 ehea_info("%s: Physical port down",
1253 port->netdev->name);
1254 if (prop_carrier_state)
1255 netif_carrier_off(port->netdev);
1258 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1259 ehea_info("External switch port is primary port");
1261 ehea_info("External switch port is backup port");
1264 case EHEA_EC_ADAPTER_MALFUNC:
1265 ehea_error("Adapter malfunction");
1267 case EHEA_EC_PORT_MALFUNC:
1268 ehea_info("Port malfunction: Device: %s", port->netdev->name);
1269 netif_carrier_off(port->netdev);
1270 netif_stop_queue(port->netdev);
1273 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
1278 static void ehea_neq_tasklet(unsigned long data)
1280 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1281 struct ehea_eqe *eqe;
1284 eqe = ehea_poll_eq(adapter->neq);
1285 ehea_debug("eqe=%p", eqe);
1288 ehea_debug("*eqe=%lx", eqe->entry);
1289 ehea_parse_eqe(adapter, eqe->entry);
1290 eqe = ehea_poll_eq(adapter->neq);
1291 ehea_debug("next eqe=%p", eqe);
1294 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1295 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1296 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1298 ehea_h_reset_events(adapter->handle,
1299 adapter->neq->fw_handle, event_mask);
1302 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1304 struct ehea_adapter *adapter = param;
1305 tasklet_hi_schedule(&adapter->neq_tasklet);
1310 static int ehea_fill_port_res(struct ehea_port_res *pr)
1313 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1315 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1316 - init_attr->act_nr_rwqes_rq2
1317 - init_attr->act_nr_rwqes_rq3 - 1);
1319 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1321 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1326 static int ehea_reg_interrupts(struct net_device *dev)
1328 struct ehea_port *port = netdev_priv(dev);
1329 struct ehea_port_res *pr;
1333 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1336 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1337 ehea_qp_aff_irq_handler,
1338 IRQF_DISABLED, port->int_aff_name, port);
1340 ehea_error("failed registering irq for qp_aff_irq_handler:"
1341 "ist=%X", port->qp_eq->attr.ist1);
1345 if (netif_msg_ifup(port))
1346 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1347 "registered", port->qp_eq->attr.ist1);
1350 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1351 pr = &port->port_res[i];
1352 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1353 "%s-queue%d", dev->name, i);
1354 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1355 ehea_recv_irq_handler,
1356 IRQF_DISABLED, pr->int_send_name,
1359 ehea_error("failed registering irq for ehea_queue "
1360 "port_res_nr:%d, ist=%X", i,
1364 if (netif_msg_ifup(port))
1365 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1366 "%d registered", pr->eq->attr.ist1, i);
1374 u32 ist = port->port_res[i].eq->attr.ist1;
1375 ibmebus_free_irq(ist, &port->port_res[i]);
1379 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1380 i = port->num_def_qps;
1386 static void ehea_free_interrupts(struct net_device *dev)
1388 struct ehea_port *port = netdev_priv(dev);
1389 struct ehea_port_res *pr;
1394 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1395 pr = &port->port_res[i];
1396 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1397 if (netif_msg_intr(port))
1398 ehea_info("free send irq for res %d with handle 0x%X",
1399 i, pr->eq->attr.ist1);
1402 /* associated events */
1403 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1404 if (netif_msg_intr(port))
1405 ehea_info("associated event interrupt for handle 0x%X freed",
1406 port->qp_eq->attr.ist1);
1409 static int ehea_configure_port(struct ehea_port *port)
1413 struct hcp_ehea_port_cb0 *cb0;
1416 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1420 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1421 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1422 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1423 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1424 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1425 PXLY_RC_VLAN_FILTER)
1426 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1428 for (i = 0; i < port->num_mcs; i++)
1430 cb0->default_qpn_arr[i] =
1431 port->port_res[i].qp->init_attr.qp_nr;
1433 cb0->default_qpn_arr[i] =
1434 port->port_res[0].qp->init_attr.qp_nr;
1436 if (netif_msg_ifup(port))
1437 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1439 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1440 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1442 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1443 port->logical_port_id,
1444 H_PORT_CB0, mask, cb0);
1446 if (hret != H_SUCCESS)
1452 free_page((unsigned long)cb0);
1457 int ehea_gen_smrs(struct ehea_port_res *pr)
1460 struct ehea_adapter *adapter = pr->port->adapter;
1462 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1466 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1473 ehea_rem_mr(&pr->send_mr);
1475 ehea_error("Generating SMRS failed\n");
1479 int ehea_rem_smrs(struct ehea_port_res *pr)
1481 if ((ehea_rem_mr(&pr->send_mr)) ||
1482 (ehea_rem_mr(&pr->recv_mr)))
1488 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1490 int arr_size = sizeof(void *) * max_q_entries;
1492 q_skba->arr = vmalloc(arr_size);
1496 memset(q_skba->arr, 0, arr_size);
1498 q_skba->len = max_q_entries;
1500 q_skba->os_skbs = 0;
1505 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1506 struct port_res_cfg *pr_cfg, int queue_token)
1508 struct ehea_adapter *adapter = port->adapter;
1509 enum ehea_eq_type eq_type = EHEA_EQ;
1510 struct ehea_qp_init_attr *init_attr = NULL;
1513 memset(pr, 0, sizeof(struct ehea_port_res));
1516 spin_lock_init(&pr->xmit_lock);
1517 spin_lock_init(&pr->netif_queue);
1519 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1521 ehea_error("create_eq failed (eq)");
1525 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1527 port->logical_port_id);
1529 ehea_error("create_cq failed (cq_recv)");
1533 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1535 port->logical_port_id);
1537 ehea_error("create_cq failed (cq_send)");
1541 if (netif_msg_ifup(port))
1542 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1543 pr->send_cq->attr.act_nr_of_cqes,
1544 pr->recv_cq->attr.act_nr_of_cqes);
1546 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1549 ehea_error("no mem for ehea_qp_init_attr");
1553 init_attr->low_lat_rq1 = 1;
1554 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1555 init_attr->rq_count = 3;
1556 init_attr->qp_token = queue_token;
1557 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1558 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1559 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1560 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1561 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1562 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1563 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1564 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1565 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1566 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1567 init_attr->port_nr = port->logical_port_id;
1568 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1569 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1570 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1572 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1574 ehea_error("create_qp failed");
1579 if (netif_msg_ifup(port))
1580 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1581 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1582 init_attr->act_nr_send_wqes,
1583 init_attr->act_nr_rwqes_rq1,
1584 init_attr->act_nr_rwqes_rq2,
1585 init_attr->act_nr_rwqes_rq3);
1587 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1589 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1590 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1591 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1592 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1596 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1597 if (ehea_gen_smrs(pr) != 0) {
1602 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1606 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1608 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1609 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1610 pr->lro_mgr.lro_arr = pr->lro_desc;
1611 pr->lro_mgr.get_skb_header = get_skb_hdr;
1612 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1613 pr->lro_mgr.dev = port->netdev;
1614 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1615 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1622 vfree(pr->sq_skba.arr);
1623 vfree(pr->rq1_skba.arr);
1624 vfree(pr->rq2_skba.arr);
1625 vfree(pr->rq3_skba.arr);
1626 ehea_destroy_qp(pr->qp);
1627 ehea_destroy_cq(pr->send_cq);
1628 ehea_destroy_cq(pr->recv_cq);
1629 ehea_destroy_eq(pr->eq);
1634 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1639 netif_napi_del(&pr->napi);
1641 ret = ehea_destroy_qp(pr->qp);
1644 ehea_destroy_cq(pr->send_cq);
1645 ehea_destroy_cq(pr->recv_cq);
1646 ehea_destroy_eq(pr->eq);
1648 for (i = 0; i < pr->rq1_skba.len; i++)
1649 if (pr->rq1_skba.arr[i])
1650 dev_kfree_skb(pr->rq1_skba.arr[i]);
1652 for (i = 0; i < pr->rq2_skba.len; i++)
1653 if (pr->rq2_skba.arr[i])
1654 dev_kfree_skb(pr->rq2_skba.arr[i]);
1656 for (i = 0; i < pr->rq3_skba.len; i++)
1657 if (pr->rq3_skba.arr[i])
1658 dev_kfree_skb(pr->rq3_skba.arr[i]);
1660 for (i = 0; i < pr->sq_skba.len; i++)
1661 if (pr->sq_skba.arr[i])
1662 dev_kfree_skb(pr->sq_skba.arr[i]);
1664 vfree(pr->rq1_skba.arr);
1665 vfree(pr->rq2_skba.arr);
1666 vfree(pr->rq3_skba.arr);
1667 vfree(pr->sq_skba.arr);
1668 ret = ehea_rem_smrs(pr);
1674 * The write_* functions store information in swqe which is used by
1675 * the hardware to calculate the ip/tcp/udp checksum
1678 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1679 const struct sk_buff *skb)
1681 swqe->ip_start = skb_network_offset(skb);
1682 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1685 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1686 const struct sk_buff *skb)
1689 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1691 swqe->tcp_end = (u16)skb->len - 1;
1694 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1695 const struct sk_buff *skb)
1698 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1700 swqe->tcp_end = (u16)skb->len - 1;
1704 static void write_swqe2_TSO(struct sk_buff *skb,
1705 struct ehea_swqe *swqe, u32 lkey)
1707 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1708 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1709 int skb_data_size = skb_headlen(skb);
1712 /* Packet is TCP with TSO enabled */
1713 swqe->tx_control |= EHEA_SWQE_TSO;
1714 swqe->mss = skb_shinfo(skb)->gso_size;
1715 /* copy only eth/ip/tcp headers to immediate data and
1716 * the rest of skb->data to sg1entry
1718 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1720 skb_data_size = skb_headlen(skb);
1722 if (skb_data_size >= headersize) {
1723 /* copy immediate data */
1724 skb_copy_from_linear_data(skb, imm_data, headersize);
1725 swqe->immediate_data_length = headersize;
1727 if (skb_data_size > headersize) {
1728 /* set sg1entry data */
1729 sg1entry->l_key = lkey;
1730 sg1entry->len = skb_data_size - headersize;
1732 ehea_map_vaddr(skb->data + headersize);
1733 swqe->descriptors++;
1736 ehea_error("cannot handle fragmented headers");
1739 static void write_swqe2_nonTSO(struct sk_buff *skb,
1740 struct ehea_swqe *swqe, u32 lkey)
1742 int skb_data_size = skb_headlen(skb);
1743 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1744 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1746 /* Packet is any nonTSO type
1748 * Copy as much as possible skb->data to immediate data and
1749 * the rest to sg1entry
1751 if (skb_data_size >= SWQE2_MAX_IMM) {
1752 /* copy immediate data */
1753 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1755 swqe->immediate_data_length = SWQE2_MAX_IMM;
1757 if (skb_data_size > SWQE2_MAX_IMM) {
1758 /* copy sg1entry data */
1759 sg1entry->l_key = lkey;
1760 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1762 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1763 swqe->descriptors++;
1766 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1767 swqe->immediate_data_length = skb_data_size;
1771 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1772 struct ehea_swqe *swqe, u32 lkey)
1774 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1776 int nfrags, sg1entry_contains_frag_data, i;
1778 nfrags = skb_shinfo(skb)->nr_frags;
1779 sg1entry = &swqe->u.immdata_desc.sg_entry;
1780 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1781 swqe->descriptors = 0;
1782 sg1entry_contains_frag_data = 0;
1784 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1785 write_swqe2_TSO(skb, swqe, lkey);
1787 write_swqe2_nonTSO(skb, swqe, lkey);
1789 /* write descriptors */
1791 if (swqe->descriptors == 0) {
1792 /* sg1entry not yet used */
1793 frag = &skb_shinfo(skb)->frags[0];
1795 /* copy sg1entry data */
1796 sg1entry->l_key = lkey;
1797 sg1entry->len = frag->size;
1799 ehea_map_vaddr(page_address(frag->page)
1800 + frag->page_offset);
1801 swqe->descriptors++;
1802 sg1entry_contains_frag_data = 1;
1805 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1807 frag = &skb_shinfo(skb)->frags[i];
1808 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1810 sgentry->l_key = lkey;
1811 sgentry->len = frag->size;
1813 ehea_map_vaddr(page_address(frag->page)
1814 + frag->page_offset);
1815 swqe->descriptors++;
1820 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1826 /* De/Register untagged packets */
1827 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1828 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1829 port->logical_port_id,
1830 reg_type, port->mac_addr, 0, hcallid);
1831 if (hret != H_SUCCESS) {
1832 ehea_error("%sregistering bc address failed (tagged)",
1833 hcallid == H_REG_BCMC ? "" : "de");
1838 /* De/Register VLAN packets */
1839 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1840 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1841 port->logical_port_id,
1842 reg_type, port->mac_addr, 0, hcallid);
1843 if (hret != H_SUCCESS) {
1844 ehea_error("%sregistering bc address failed (vlan)",
1845 hcallid == H_REG_BCMC ? "" : "de");
1852 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1854 struct ehea_port *port = netdev_priv(dev);
1855 struct sockaddr *mac_addr = sa;
1856 struct hcp_ehea_port_cb0 *cb0;
1860 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1861 ret = -EADDRNOTAVAIL;
1865 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1867 ehea_error("no mem for cb0");
1872 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1874 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1876 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1877 port->logical_port_id, H_PORT_CB0,
1878 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1879 if (hret != H_SUCCESS) {
1884 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1886 /* Deregister old MAC in pHYP */
1887 if (port->state == EHEA_PORT_UP) {
1888 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1893 port->mac_addr = cb0->port_mac_addr << 16;
1895 /* Register new MAC in pHYP */
1896 if (port->state == EHEA_PORT_UP) {
1897 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1905 ehea_update_bcmc_registrations();
1907 free_page((unsigned long)cb0);
1912 static void ehea_promiscuous_error(u64 hret, int enable)
1914 if (hret == H_AUTHORITY)
1915 ehea_info("Hypervisor denied %sabling promiscuous mode",
1916 enable == 1 ? "en" : "dis");
1918 ehea_error("failed %sabling promiscuous mode",
1919 enable == 1 ? "en" : "dis");
1922 static void ehea_promiscuous(struct net_device *dev, int enable)
1924 struct ehea_port *port = netdev_priv(dev);
1925 struct hcp_ehea_port_cb7 *cb7;
1928 if (enable == port->promisc)
1931 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1933 ehea_error("no mem for cb7");
1937 /* Modify Pxs_DUCQPN in CB7 */
1938 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1940 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1941 port->logical_port_id,
1942 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1944 ehea_promiscuous_error(hret, enable);
1948 port->promisc = enable;
1950 free_page((unsigned long)cb7);
1953 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1959 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1960 | EHEA_BCMC_UNTAGGED;
1962 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1963 port->logical_port_id,
1964 reg_type, mc_mac_addr, 0, hcallid);
1968 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1969 | EHEA_BCMC_VLANID_ALL;
1971 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1972 port->logical_port_id,
1973 reg_type, mc_mac_addr, 0, hcallid);
1978 static int ehea_drop_multicast_list(struct net_device *dev)
1980 struct ehea_port *port = netdev_priv(dev);
1981 struct ehea_mc_list *mc_entry = port->mc_list;
1982 struct list_head *pos;
1983 struct list_head *temp;
1987 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1988 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1990 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1993 ehea_error("failed deregistering mcast MAC");
2003 static void ehea_allmulti(struct net_device *dev, int enable)
2005 struct ehea_port *port = netdev_priv(dev);
2008 if (!port->allmulti) {
2010 /* Enable ALLMULTI */
2011 ehea_drop_multicast_list(dev);
2012 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
2016 ehea_error("failed enabling IFF_ALLMULTI");
2020 /* Disable ALLMULTI */
2021 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
2025 ehea_error("failed disabling IFF_ALLMULTI");
2029 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2031 struct ehea_mc_list *ehea_mcl_entry;
2034 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
2035 if (!ehea_mcl_entry) {
2036 ehea_error("no mem for mcl_entry");
2040 INIT_LIST_HEAD(&ehea_mcl_entry->list);
2042 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
2044 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
2047 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2049 ehea_error("failed registering mcast MAC");
2050 kfree(ehea_mcl_entry);
2054 static void ehea_set_multicast_list(struct net_device *dev)
2056 struct ehea_port *port = netdev_priv(dev);
2057 struct netdev_hw_addr *ha;
2060 if (dev->flags & IFF_PROMISC) {
2061 ehea_promiscuous(dev, 1);
2064 ehea_promiscuous(dev, 0);
2066 if (dev->flags & IFF_ALLMULTI) {
2067 ehea_allmulti(dev, 1);
2070 ehea_allmulti(dev, 0);
2072 if (!netdev_mc_empty(dev)) {
2073 ret = ehea_drop_multicast_list(dev);
2075 /* Dropping the current multicast list failed.
2076 * Enabling ALL_MULTI is the best we can do.
2078 ehea_allmulti(dev, 1);
2081 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2082 ehea_info("Mcast registration limit reached (0x%llx). "
2084 port->adapter->max_mc_mac);
2088 netdev_for_each_mc_addr(ha, dev)
2089 ehea_add_multicast_entry(port, ha->addr);
2093 ehea_update_bcmc_registrations();
2096 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2098 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2104 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2105 struct ehea_swqe *swqe, u32 lkey)
2107 if (skb->protocol == htons(ETH_P_IP)) {
2108 const struct iphdr *iph = ip_hdr(skb);
2111 swqe->tx_control |= EHEA_SWQE_CRC
2112 | EHEA_SWQE_IP_CHECKSUM
2113 | EHEA_SWQE_TCP_CHECKSUM
2114 | EHEA_SWQE_IMM_DATA_PRESENT
2115 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2117 write_ip_start_end(swqe, skb);
2119 if (iph->protocol == IPPROTO_UDP) {
2120 if ((iph->frag_off & IP_MF) ||
2121 (iph->frag_off & IP_OFFSET))
2122 /* IP fragment, so don't change cs */
2123 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2125 write_udp_offset_end(swqe, skb);
2126 } else if (iph->protocol == IPPROTO_TCP) {
2127 write_tcp_offset_end(swqe, skb);
2130 /* icmp (big data) and ip segmentation packets (all other ip
2131 packets) do not require any special handling */
2134 /* Other Ethernet Protocol */
2135 swqe->tx_control |= EHEA_SWQE_CRC
2136 | EHEA_SWQE_IMM_DATA_PRESENT
2137 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2140 write_swqe2_data(skb, dev, swqe, lkey);
2143 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2144 struct ehea_swqe *swqe)
2146 int nfrags = skb_shinfo(skb)->nr_frags;
2147 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2151 if (skb->protocol == htons(ETH_P_IP)) {
2152 const struct iphdr *iph = ip_hdr(skb);
2155 write_ip_start_end(swqe, skb);
2157 if (iph->protocol == IPPROTO_TCP) {
2158 swqe->tx_control |= EHEA_SWQE_CRC
2159 | EHEA_SWQE_IP_CHECKSUM
2160 | EHEA_SWQE_TCP_CHECKSUM
2161 | EHEA_SWQE_IMM_DATA_PRESENT;
2163 write_tcp_offset_end(swqe, skb);
2165 } else if (iph->protocol == IPPROTO_UDP) {
2166 if ((iph->frag_off & IP_MF) ||
2167 (iph->frag_off & IP_OFFSET))
2168 /* IP fragment, so don't change cs */
2169 swqe->tx_control |= EHEA_SWQE_CRC
2170 | EHEA_SWQE_IMM_DATA_PRESENT;
2172 swqe->tx_control |= EHEA_SWQE_CRC
2173 | EHEA_SWQE_IP_CHECKSUM
2174 | EHEA_SWQE_TCP_CHECKSUM
2175 | EHEA_SWQE_IMM_DATA_PRESENT;
2177 write_udp_offset_end(swqe, skb);
2180 /* icmp (big data) and
2181 ip segmentation packets (all other ip packets) */
2182 swqe->tx_control |= EHEA_SWQE_CRC
2183 | EHEA_SWQE_IP_CHECKSUM
2184 | EHEA_SWQE_IMM_DATA_PRESENT;
2187 /* Other Ethernet Protocol */
2188 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2190 /* copy (immediate) data */
2192 /* data is in a single piece */
2193 skb_copy_from_linear_data(skb, imm_data, skb->len);
2195 /* first copy data from the skb->data buffer ... */
2196 skb_copy_from_linear_data(skb, imm_data,
2198 imm_data += skb_headlen(skb);
2200 /* ... then copy data from the fragments */
2201 for (i = 0; i < nfrags; i++) {
2202 frag = &skb_shinfo(skb)->frags[i];
2204 page_address(frag->page) + frag->page_offset,
2206 imm_data += frag->size;
2209 swqe->immediate_data_length = skb->len;
2213 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2218 if ((skb->protocol == htons(ETH_P_IP)) &&
2219 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2220 tcp = (struct tcphdr *)(skb_network_header(skb) +
2221 (ip_hdr(skb)->ihl * 4));
2222 tmp = (tcp->source + (tcp->dest << 16)) % 31;
2223 tmp += ip_hdr(skb)->daddr % 31;
2224 return tmp % num_qps;
2229 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2231 struct ehea_port *port = netdev_priv(dev);
2232 struct ehea_swqe *swqe;
2233 unsigned long flags;
2236 struct ehea_port_res *pr;
2238 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2240 if (!spin_trylock(&pr->xmit_lock))
2241 return NETDEV_TX_BUSY;
2243 if (pr->queue_stopped) {
2244 spin_unlock(&pr->xmit_lock);
2245 return NETDEV_TX_BUSY;
2248 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2249 memset(swqe, 0, SWQE_HEADER_SIZE);
2250 atomic_dec(&pr->swqe_avail);
2252 if (skb->len <= SWQE3_MAX_IMM) {
2253 u32 sig_iv = port->sig_comp_iv;
2254 u32 swqe_num = pr->swqe_id_counter;
2255 ehea_xmit3(skb, dev, swqe);
2256 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2257 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2258 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2259 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2261 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2262 pr->swqe_ll_count = 0;
2264 pr->swqe_ll_count += 1;
2267 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2268 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2269 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2270 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2271 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2273 pr->sq_skba.index++;
2274 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2276 lkey = pr->send_mr.lkey;
2277 ehea_xmit2(skb, dev, swqe, lkey);
2278 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2280 pr->swqe_id_counter += 1;
2282 if (vlan_tx_tag_present(skb)) {
2283 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2284 swqe->vlan_tag = vlan_tx_tag_get(skb);
2287 if (netif_msg_tx_queued(port)) {
2288 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2289 ehea_dump(swqe, 512, "swqe");
2292 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2293 netif_stop_queue(dev);
2294 swqe->tx_control |= EHEA_SWQE_PURGE;
2297 ehea_post_swqe(pr->qp, swqe);
2300 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2301 spin_lock_irqsave(&pr->netif_queue, flags);
2302 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2303 pr->p_stats.queue_stopped++;
2304 netif_stop_queue(dev);
2305 pr->queue_stopped = 1;
2307 spin_unlock_irqrestore(&pr->netif_queue, flags);
2309 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2310 spin_unlock(&pr->xmit_lock);
2312 return NETDEV_TX_OK;
2315 static void ehea_vlan_rx_register(struct net_device *dev,
2316 struct vlan_group *grp)
2318 struct ehea_port *port = netdev_priv(dev);
2319 struct ehea_adapter *adapter = port->adapter;
2320 struct hcp_ehea_port_cb1 *cb1;
2325 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2327 ehea_error("no mem for cb1");
2331 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2332 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2333 if (hret != H_SUCCESS)
2334 ehea_error("modify_ehea_port failed");
2336 free_page((unsigned long)cb1);
2341 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2343 struct ehea_port *port = netdev_priv(dev);
2344 struct ehea_adapter *adapter = port->adapter;
2345 struct hcp_ehea_port_cb1 *cb1;
2349 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2351 ehea_error("no mem for cb1");
2355 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2356 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2357 if (hret != H_SUCCESS) {
2358 ehea_error("query_ehea_port failed");
2363 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2365 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2366 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2367 if (hret != H_SUCCESS)
2368 ehea_error("modify_ehea_port failed");
2370 free_page((unsigned long)cb1);
2374 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2376 struct ehea_port *port = netdev_priv(dev);
2377 struct ehea_adapter *adapter = port->adapter;
2378 struct hcp_ehea_port_cb1 *cb1;
2382 vlan_group_set_device(port->vgrp, vid, NULL);
2384 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2386 ehea_error("no mem for cb1");
2390 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2391 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2392 if (hret != H_SUCCESS) {
2393 ehea_error("query_ehea_port failed");
2398 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2400 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2401 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2402 if (hret != H_SUCCESS)
2403 ehea_error("modify_ehea_port failed");
2405 free_page((unsigned long)cb1);
2408 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2414 struct hcp_modify_qp_cb0 *cb0;
2416 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2422 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2423 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2424 if (hret != H_SUCCESS) {
2425 ehea_error("query_ehea_qp failed (1)");
2429 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2430 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2431 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2432 &dummy64, &dummy64, &dummy16, &dummy16);
2433 if (hret != H_SUCCESS) {
2434 ehea_error("modify_ehea_qp failed (1)");
2438 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2439 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2440 if (hret != H_SUCCESS) {
2441 ehea_error("query_ehea_qp failed (2)");
2445 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2446 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2447 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2448 &dummy64, &dummy64, &dummy16, &dummy16);
2449 if (hret != H_SUCCESS) {
2450 ehea_error("modify_ehea_qp failed (2)");
2454 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2455 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2456 if (hret != H_SUCCESS) {
2457 ehea_error("query_ehea_qp failed (3)");
2461 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2462 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2463 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2464 &dummy64, &dummy64, &dummy16, &dummy16);
2465 if (hret != H_SUCCESS) {
2466 ehea_error("modify_ehea_qp failed (3)");
2470 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2471 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2472 if (hret != H_SUCCESS) {
2473 ehea_error("query_ehea_qp failed (4)");
2479 free_page((unsigned long)cb0);
2483 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2487 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2488 enum ehea_eq_type eq_type = EHEA_EQ;
2490 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2491 EHEA_MAX_ENTRIES_EQ, 1);
2494 ehea_error("ehea_create_eq failed (qp_eq)");
2498 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2499 pr_cfg.max_entries_scq = sq_entries * 2;
2500 pr_cfg.max_entries_sq = sq_entries;
2501 pr_cfg.max_entries_rq1 = rq1_entries;
2502 pr_cfg.max_entries_rq2 = rq2_entries;
2503 pr_cfg.max_entries_rq3 = rq3_entries;
2505 pr_cfg_small_rx.max_entries_rcq = 1;
2506 pr_cfg_small_rx.max_entries_scq = sq_entries;
2507 pr_cfg_small_rx.max_entries_sq = sq_entries;
2508 pr_cfg_small_rx.max_entries_rq1 = 1;
2509 pr_cfg_small_rx.max_entries_rq2 = 1;
2510 pr_cfg_small_rx.max_entries_rq3 = 1;
2512 for (i = 0; i < def_qps; i++) {
2513 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2517 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2518 ret = ehea_init_port_res(port, &port->port_res[i],
2519 &pr_cfg_small_rx, i);
2528 ehea_clean_portres(port, &port->port_res[i]);
2531 ehea_destroy_eq(port->qp_eq);
2535 static int ehea_clean_all_portres(struct ehea_port *port)
2540 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2541 ret |= ehea_clean_portres(port, &port->port_res[i]);
2543 ret |= ehea_destroy_eq(port->qp_eq);
2548 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2550 if (adapter->active_ports)
2553 ehea_rem_mr(&adapter->mr);
2556 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2558 if (adapter->active_ports)
2561 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2564 static int ehea_up(struct net_device *dev)
2567 struct ehea_port *port = netdev_priv(dev);
2569 if (port->state == EHEA_PORT_UP)
2572 ret = ehea_port_res_setup(port, port->num_def_qps,
2573 port->num_add_tx_qps);
2575 ehea_error("port_res_failed");
2579 /* Set default QP for this port */
2580 ret = ehea_configure_port(port);
2582 ehea_error("ehea_configure_port failed. ret:%d", ret);
2586 ret = ehea_reg_interrupts(dev);
2588 ehea_error("reg_interrupts failed. ret:%d", ret);
2592 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2593 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2595 ehea_error("activate_qp failed");
2600 for (i = 0; i < port->num_def_qps; i++) {
2601 ret = ehea_fill_port_res(&port->port_res[i]);
2603 ehea_error("out_free_irqs");
2608 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2614 port->state = EHEA_PORT_UP;
2620 ehea_free_interrupts(dev);
2623 ehea_clean_all_portres(port);
2626 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2628 ehea_update_bcmc_registrations();
2629 ehea_update_firmware_handles();
2634 static void port_napi_disable(struct ehea_port *port)
2638 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2639 napi_disable(&port->port_res[i].napi);
2642 static void port_napi_enable(struct ehea_port *port)
2646 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2647 napi_enable(&port->port_res[i].napi);
2650 static int ehea_open(struct net_device *dev)
2653 struct ehea_port *port = netdev_priv(dev);
2655 mutex_lock(&port->port_lock);
2657 if (netif_msg_ifup(port))
2658 ehea_info("enabling port %s", dev->name);
2662 port_napi_enable(port);
2663 netif_start_queue(dev);
2666 init_waitqueue_head(&port->swqe_avail_wq);
2667 init_waitqueue_head(&port->restart_wq);
2669 mutex_unlock(&port->port_lock);
2674 static int ehea_down(struct net_device *dev)
2677 struct ehea_port *port = netdev_priv(dev);
2679 if (port->state == EHEA_PORT_DOWN)
2682 ehea_drop_multicast_list(dev);
2683 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2685 ehea_free_interrupts(dev);
2687 port->state = EHEA_PORT_DOWN;
2689 ehea_update_bcmc_registrations();
2691 ret = ehea_clean_all_portres(port);
2693 ehea_info("Failed freeing resources for %s. ret=%i",
2696 ehea_update_firmware_handles();
2701 static int ehea_stop(struct net_device *dev)
2704 struct ehea_port *port = netdev_priv(dev);
2706 if (netif_msg_ifdown(port))
2707 ehea_info("disabling port %s", dev->name);
2709 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2710 cancel_work_sync(&port->reset_task);
2711 mutex_lock(&port->port_lock);
2712 netif_stop_queue(dev);
2713 port_napi_disable(port);
2714 ret = ehea_down(dev);
2715 mutex_unlock(&port->port_lock);
2716 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2720 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2722 struct ehea_qp qp = *orig_qp;
2723 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2724 struct ehea_swqe *swqe;
2728 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2729 swqe = ehea_get_swqe(&qp, &wqe_index);
2730 swqe->tx_control |= EHEA_SWQE_PURGE;
2734 static void ehea_flush_sq(struct ehea_port *port)
2738 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2739 struct ehea_port_res *pr = &port->port_res[i];
2740 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2743 ret = wait_event_timeout(port->swqe_avail_wq,
2744 atomic_read(&pr->swqe_avail) >= swqe_max,
2745 msecs_to_jiffies(100));
2748 ehea_error("WARNING: sq not flushed completely");
2754 int ehea_stop_qps(struct net_device *dev)
2756 struct ehea_port *port = netdev_priv(dev);
2757 struct ehea_adapter *adapter = port->adapter;
2758 struct hcp_modify_qp_cb0 *cb0;
2766 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2772 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2773 struct ehea_port_res *pr = &port->port_res[i];
2774 struct ehea_qp *qp = pr->qp;
2776 /* Purge send queue */
2779 /* Disable queue pair */
2780 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2781 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2783 if (hret != H_SUCCESS) {
2784 ehea_error("query_ehea_qp failed (1)");
2788 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2789 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2791 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2792 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2794 &dummy64, &dummy16, &dummy16);
2795 if (hret != H_SUCCESS) {
2796 ehea_error("modify_ehea_qp failed (1)");
2800 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2801 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2803 if (hret != H_SUCCESS) {
2804 ehea_error("query_ehea_qp failed (2)");
2808 /* deregister shared memory regions */
2809 dret = ehea_rem_smrs(pr);
2811 ehea_error("unreg shared memory region failed");
2818 free_page((unsigned long)cb0);
2823 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2825 struct ehea_qp qp = *orig_qp;
2826 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2827 struct ehea_rwqe *rwqe;
2828 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2829 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2830 struct sk_buff *skb;
2831 u32 lkey = pr->recv_mr.lkey;
2837 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2838 rwqe = ehea_get_next_rwqe(&qp, 2);
2839 rwqe->sg_list[0].l_key = lkey;
2840 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2841 skb = skba_rq2[index];
2843 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2846 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2847 rwqe = ehea_get_next_rwqe(&qp, 3);
2848 rwqe->sg_list[0].l_key = lkey;
2849 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2850 skb = skba_rq3[index];
2852 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2856 int ehea_restart_qps(struct net_device *dev)
2858 struct ehea_port *port = netdev_priv(dev);
2859 struct ehea_adapter *adapter = port->adapter;
2863 struct hcp_modify_qp_cb0 *cb0;
2868 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2874 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2875 struct ehea_port_res *pr = &port->port_res[i];
2876 struct ehea_qp *qp = pr->qp;
2878 ret = ehea_gen_smrs(pr);
2880 ehea_error("creation of shared memory regions failed");
2884 ehea_update_rqs(qp, pr);
2886 /* Enable queue pair */
2887 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2888 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2890 if (hret != H_SUCCESS) {
2891 ehea_error("query_ehea_qp failed (1)");
2895 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2896 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2898 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2899 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2901 &dummy64, &dummy16, &dummy16);
2902 if (hret != H_SUCCESS) {
2903 ehea_error("modify_ehea_qp failed (1)");
2907 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2908 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2910 if (hret != H_SUCCESS) {
2911 ehea_error("query_ehea_qp failed (2)");
2915 /* refill entire queue */
2916 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2917 ehea_refill_rq2(pr, 0);
2918 ehea_refill_rq3(pr, 0);
2921 free_page((unsigned long)cb0);
2926 static void ehea_reset_port(struct work_struct *work)
2929 struct ehea_port *port =
2930 container_of(work, struct ehea_port, reset_task);
2931 struct net_device *dev = port->netdev;
2933 mutex_lock(&dlpar_mem_lock);
2935 mutex_lock(&port->port_lock);
2936 netif_stop_queue(dev);
2938 port_napi_disable(port);
2946 ehea_set_multicast_list(dev);
2948 if (netif_msg_timer(port))
2949 ehea_info("Device %s resetted successfully", dev->name);
2951 port_napi_enable(port);
2953 netif_wake_queue(dev);
2955 mutex_unlock(&port->port_lock);
2956 mutex_unlock(&dlpar_mem_lock);
2959 static void ehea_rereg_mrs(struct work_struct *work)
2962 struct ehea_adapter *adapter;
2964 ehea_info("LPAR memory changed - re-initializing driver");
2966 list_for_each_entry(adapter, &adapter_list, list)
2967 if (adapter->active_ports) {
2968 /* Shutdown all ports */
2969 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2970 struct ehea_port *port = adapter->port[i];
2971 struct net_device *dev;
2978 if (dev->flags & IFF_UP) {
2979 mutex_lock(&port->port_lock);
2980 netif_stop_queue(dev);
2981 ehea_flush_sq(port);
2982 ret = ehea_stop_qps(dev);
2984 mutex_unlock(&port->port_lock);
2987 port_napi_disable(port);
2988 mutex_unlock(&port->port_lock);
2990 reset_sq_restart_flag(port);
2993 /* Unregister old memory region */
2994 ret = ehea_rem_mr(&adapter->mr);
2996 ehea_error("unregister MR failed - driver"
3002 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3004 list_for_each_entry(adapter, &adapter_list, list)
3005 if (adapter->active_ports) {
3006 /* Register new memory region */
3007 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
3009 ehea_error("register MR failed - driver"
3014 /* Restart all ports */
3015 for (i = 0; i < EHEA_MAX_PORTS; i++) {
3016 struct ehea_port *port = adapter->port[i];
3019 struct net_device *dev = port->netdev;
3021 if (dev->flags & IFF_UP) {
3022 mutex_lock(&port->port_lock);
3023 port_napi_enable(port);
3024 ret = ehea_restart_qps(dev);
3027 netif_wake_queue(dev);
3028 mutex_unlock(&port->port_lock);
3033 ehea_info("re-initializing driver complete");
3038 static void ehea_tx_watchdog(struct net_device *dev)
3040 struct ehea_port *port = netdev_priv(dev);
3042 if (netif_carrier_ok(dev) &&
3043 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
3044 ehea_schedule_port_reset(port);
3047 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
3049 struct hcp_query_ehea *cb;
3053 cb = (void *)get_zeroed_page(GFP_KERNEL);
3059 hret = ehea_h_query_ehea(adapter->handle, cb);
3061 if (hret != H_SUCCESS) {
3066 adapter->max_mc_mac = cb->max_mc_mac - 1;
3070 free_page((unsigned long)cb);
3075 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3077 struct hcp_ehea_port_cb4 *cb4;
3083 /* (Try to) enable *jumbo frames */
3084 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3086 ehea_error("no mem for cb4");
3090 hret = ehea_h_query_ehea_port(port->adapter->handle,
3091 port->logical_port_id,
3093 H_PORT_CB4_JUMBO, cb4);
3094 if (hret == H_SUCCESS) {
3095 if (cb4->jumbo_frame)
3098 cb4->jumbo_frame = 1;
3099 hret = ehea_h_modify_ehea_port(port->adapter->
3106 if (hret == H_SUCCESS)
3112 free_page((unsigned long)cb4);
3118 static ssize_t ehea_show_port_id(struct device *dev,
3119 struct device_attribute *attr, char *buf)
3121 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3122 return sprintf(buf, "%d", port->logical_port_id);
3125 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3128 static void __devinit logical_port_release(struct device *dev)
3130 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3131 of_node_put(port->ofdev.dev.of_node);
3134 static struct device *ehea_register_port(struct ehea_port *port,
3135 struct device_node *dn)
3139 port->ofdev.dev.of_node = of_node_get(dn);
3140 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3141 port->ofdev.dev.bus = &ibmebus_bus_type;
3143 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
3144 port->ofdev.dev.release = logical_port_release;
3146 ret = of_device_register(&port->ofdev);
3148 ehea_error("failed to register device. ret=%d", ret);
3152 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3154 ehea_error("failed to register attributes, ret=%d", ret);
3155 goto out_unreg_of_dev;
3158 return &port->ofdev.dev;
3161 of_device_unregister(&port->ofdev);
3166 static void ehea_unregister_port(struct ehea_port *port)
3168 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3169 of_device_unregister(&port->ofdev);
3172 static const struct net_device_ops ehea_netdev_ops = {
3173 .ndo_open = ehea_open,
3174 .ndo_stop = ehea_stop,
3175 .ndo_start_xmit = ehea_start_xmit,
3176 #ifdef CONFIG_NET_POLL_CONTROLLER
3177 .ndo_poll_controller = ehea_netpoll,
3179 .ndo_get_stats = ehea_get_stats,
3180 .ndo_set_mac_address = ehea_set_mac_addr,
3181 .ndo_validate_addr = eth_validate_addr,
3182 .ndo_set_multicast_list = ehea_set_multicast_list,
3183 .ndo_change_mtu = ehea_change_mtu,
3184 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3185 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3186 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3187 .ndo_tx_timeout = ehea_tx_watchdog,
3190 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3191 u32 logical_port_id,
3192 struct device_node *dn)
3195 struct net_device *dev;
3196 struct ehea_port *port;
3197 struct device *port_dev;
3200 /* allocate memory for the port structures */
3201 dev = alloc_etherdev(sizeof(struct ehea_port));
3204 ehea_error("no mem for net_device");
3209 port = netdev_priv(dev);
3211 mutex_init(&port->port_lock);
3212 port->state = EHEA_PORT_DOWN;
3213 port->sig_comp_iv = sq_entries / 10;
3215 port->adapter = adapter;
3217 port->logical_port_id = logical_port_id;
3219 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3221 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3222 if (!port->mc_list) {
3224 goto out_free_ethdev;
3227 INIT_LIST_HEAD(&port->mc_list->list);
3229 ret = ehea_sense_port_attr(port);
3231 goto out_free_mc_list;
3233 port_dev = ehea_register_port(port, dn);
3235 goto out_free_mc_list;
3237 SET_NETDEV_DEV(dev, port_dev);
3239 /* initialize net_device structure */
3240 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3242 dev->netdev_ops = &ehea_netdev_ops;
3243 ehea_set_ethtool_ops(dev);
3245 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3246 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3247 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3249 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3251 INIT_WORK(&port->reset_task, ehea_reset_port);
3253 ret = register_netdev(dev);
3255 ehea_error("register_netdev failed. ret=%d", ret);
3256 goto out_unreg_port;
3259 port->lro_max_aggr = lro_max_aggr;
3261 ret = ehea_get_jumboframe_status(port, &jumbo);
3263 ehea_error("failed determining jumbo frame status for %s",
3264 port->netdev->name);
3266 ehea_info("%s: Jumbo frames are %sabled", dev->name,
3267 jumbo == 1 ? "en" : "dis");
3269 adapter->active_ports++;
3274 ehea_unregister_port(port);
3277 kfree(port->mc_list);
3283 ehea_error("setting up logical port with id=%d failed, ret=%d",
3284 logical_port_id, ret);
3288 static void ehea_shutdown_single_port(struct ehea_port *port)
3290 struct ehea_adapter *adapter = port->adapter;
3291 unregister_netdev(port->netdev);
3292 ehea_unregister_port(port);
3293 kfree(port->mc_list);
3294 free_netdev(port->netdev);
3295 adapter->active_ports--;
3298 static int ehea_setup_ports(struct ehea_adapter *adapter)
3300 struct device_node *lhea_dn;
3301 struct device_node *eth_dn = NULL;
3303 const u32 *dn_log_port_id;
3306 lhea_dn = adapter->ofdev->dev.of_node;
3307 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3309 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3311 if (!dn_log_port_id) {
3312 ehea_error("bad device node: eth_dn name=%s",
3317 if (ehea_add_adapter_mr(adapter)) {
3318 ehea_error("creating MR failed");
3319 of_node_put(eth_dn);
3323 adapter->port[i] = ehea_setup_single_port(adapter,
3326 if (adapter->port[i])
3327 ehea_info("%s -> logical port id #%d",
3328 adapter->port[i]->netdev->name,
3331 ehea_remove_adapter_mr(adapter);
3338 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3339 u32 logical_port_id)
3341 struct device_node *lhea_dn;
3342 struct device_node *eth_dn = NULL;
3343 const u32 *dn_log_port_id;
3345 lhea_dn = adapter->ofdev->dev.of_node;
3346 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3348 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3351 if (*dn_log_port_id == logical_port_id)
3358 static ssize_t ehea_probe_port(struct device *dev,
3359 struct device_attribute *attr,
3360 const char *buf, size_t count)
3362 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3363 struct ehea_port *port;
3364 struct device_node *eth_dn = NULL;
3367 u32 logical_port_id;
3369 sscanf(buf, "%d", &logical_port_id);
3371 port = ehea_get_port(adapter, logical_port_id);
3374 ehea_info("adding port with logical port id=%d failed. port "
3375 "already configured as %s.", logical_port_id,
3376 port->netdev->name);
3380 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3383 ehea_info("no logical port with id %d found", logical_port_id);
3387 if (ehea_add_adapter_mr(adapter)) {
3388 ehea_error("creating MR failed");
3392 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3394 of_node_put(eth_dn);
3397 for (i = 0; i < EHEA_MAX_PORTS; i++)
3398 if (!adapter->port[i]) {
3399 adapter->port[i] = port;
3403 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3406 ehea_remove_adapter_mr(adapter);
3410 return (ssize_t) count;
3413 static ssize_t ehea_remove_port(struct device *dev,
3414 struct device_attribute *attr,
3415 const char *buf, size_t count)
3417 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3418 struct ehea_port *port;
3420 u32 logical_port_id;
3422 sscanf(buf, "%d", &logical_port_id);
3424 port = ehea_get_port(adapter, logical_port_id);
3427 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3430 ehea_shutdown_single_port(port);
3432 for (i = 0; i < EHEA_MAX_PORTS; i++)
3433 if (adapter->port[i] == port) {
3434 adapter->port[i] = NULL;
3438 ehea_error("removing port with logical port id=%d failed. port "
3439 "not configured.", logical_port_id);
3443 ehea_remove_adapter_mr(adapter);
3445 return (ssize_t) count;
3448 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3449 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3451 int ehea_create_device_sysfs(struct platform_device *dev)
3453 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3457 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3462 void ehea_remove_device_sysfs(struct platform_device *dev)
3464 device_remove_file(&dev->dev, &dev_attr_probe_port);
3465 device_remove_file(&dev->dev, &dev_attr_remove_port);
3468 static int __devinit ehea_probe_adapter(struct platform_device *dev,
3469 const struct of_device_id *id)
3471 struct ehea_adapter *adapter;
3472 const u64 *adapter_handle;
3475 if (!dev || !dev->dev.of_node) {
3476 ehea_error("Invalid ibmebus device probed");
3480 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3483 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3487 list_add(&adapter->list, &adapter_list);
3489 adapter->ofdev = dev;
3491 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3494 adapter->handle = *adapter_handle;
3496 if (!adapter->handle) {
3497 dev_err(&dev->dev, "failed getting handle for adapter"
3498 " '%s'\n", dev->dev.of_node->full_name);
3503 adapter->pd = EHEA_PD_ID;
3505 dev_set_drvdata(&dev->dev, adapter);
3508 /* initialize adapter and ports */
3509 /* get adapter properties */
3510 ret = ehea_sense_adapter_attr(adapter);
3512 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3516 adapter->neq = ehea_create_eq(adapter,
3517 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3518 if (!adapter->neq) {
3520 dev_err(&dev->dev, "NEQ creation failed\n");
3524 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3525 (unsigned long)adapter);
3527 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3528 ehea_interrupt_neq, IRQF_DISABLED,
3529 "ehea_neq", adapter);
3531 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3535 ret = ehea_create_device_sysfs(dev);
3539 ret = ehea_setup_ports(adapter);
3541 dev_err(&dev->dev, "setup_ports failed\n");
3542 goto out_rem_dev_sysfs;
3549 ehea_remove_device_sysfs(dev);
3552 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3555 ehea_destroy_eq(adapter->neq);
3558 list_del(&adapter->list);
3562 ehea_update_firmware_handles();
3567 static int __devexit ehea_remove(struct platform_device *dev)
3569 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3572 for (i = 0; i < EHEA_MAX_PORTS; i++)
3573 if (adapter->port[i]) {
3574 ehea_shutdown_single_port(adapter->port[i]);
3575 adapter->port[i] = NULL;
3578 ehea_remove_device_sysfs(dev);
3580 flush_scheduled_work();
3582 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3583 tasklet_kill(&adapter->neq_tasklet);
3585 ehea_destroy_eq(adapter->neq);
3586 ehea_remove_adapter_mr(adapter);
3587 list_del(&adapter->list);
3590 ehea_update_firmware_handles();
3595 void ehea_crash_handler(void)
3599 if (ehea_fw_handles.arr)
3600 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3601 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3602 ehea_fw_handles.arr[i].fwh,
3605 if (ehea_bcmc_regs.arr)
3606 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3607 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3608 ehea_bcmc_regs.arr[i].port_id,
3609 ehea_bcmc_regs.arr[i].reg_type,
3610 ehea_bcmc_regs.arr[i].macaddr,
3614 static int ehea_mem_notifier(struct notifier_block *nb,
3615 unsigned long action, void *data)
3617 int ret = NOTIFY_BAD;
3618 struct memory_notify *arg = data;
3620 mutex_lock(&dlpar_mem_lock);
3623 case MEM_CANCEL_OFFLINE:
3624 ehea_info("memory offlining canceled");
3625 /* Readd canceled memory block */
3627 ehea_info("memory is going online");
3628 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3629 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3631 ehea_rereg_mrs(NULL);
3633 case MEM_GOING_OFFLINE:
3634 ehea_info("memory is going offline");
3635 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3636 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3638 ehea_rereg_mrs(NULL);
3644 ehea_update_firmware_handles();
3648 mutex_unlock(&dlpar_mem_lock);
3652 static struct notifier_block ehea_mem_nb = {
3653 .notifier_call = ehea_mem_notifier,
3656 static int ehea_reboot_notifier(struct notifier_block *nb,
3657 unsigned long action, void *unused)
3659 if (action == SYS_RESTART) {
3660 ehea_info("Reboot: freeing all eHEA resources");
3661 ibmebus_unregister_driver(&ehea_driver);
3666 static struct notifier_block ehea_reboot_nb = {
3667 .notifier_call = ehea_reboot_notifier,
3670 static int check_module_parm(void)
3674 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3675 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3676 ehea_info("Bad parameter: rq1_entries");
3679 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3680 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3681 ehea_info("Bad parameter: rq2_entries");
3684 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3685 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3686 ehea_info("Bad parameter: rq3_entries");
3689 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3690 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3691 ehea_info("Bad parameter: sq_entries");
3698 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3701 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3704 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3705 ehea_show_capabilities, NULL);
3707 int __init ehea_module_init(void)
3711 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3715 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3716 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3717 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3719 mutex_init(&ehea_fw_handles.lock);
3720 spin_lock_init(&ehea_bcmc_regs.lock);
3722 ret = check_module_parm();
3726 ret = ehea_create_busmap();
3730 ret = register_reboot_notifier(&ehea_reboot_nb);
3732 ehea_info("failed registering reboot notifier");
3734 ret = register_memory_notifier(&ehea_mem_nb);
3736 ehea_info("failed registering memory remove notifier");
3738 ret = crash_shutdown_register(ehea_crash_handler);
3740 ehea_info("failed registering crash handler");
3742 ret = ibmebus_register_driver(&ehea_driver);
3744 ehea_error("failed registering eHEA device driver on ebus");
3748 ret = driver_create_file(&ehea_driver.driver,
3749 &driver_attr_capabilities);
3751 ehea_error("failed to register capabilities attribute, ret=%d",
3759 ibmebus_unregister_driver(&ehea_driver);
3761 unregister_memory_notifier(&ehea_mem_nb);
3762 unregister_reboot_notifier(&ehea_reboot_nb);
3763 crash_shutdown_unregister(ehea_crash_handler);
3768 static void __exit ehea_module_exit(void)
3772 flush_scheduled_work();
3773 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3774 ibmebus_unregister_driver(&ehea_driver);
3775 unregister_reboot_notifier(&ehea_reboot_nb);
3776 ret = crash_shutdown_unregister(ehea_crash_handler);
3778 ehea_info("failed unregistering crash handler");
3779 unregister_memory_notifier(&ehea_mem_nb);
3780 kfree(ehea_fw_handles.arr);
3781 kfree(ehea_bcmc_regs.arr);
3782 ehea_destroy_busmap();
3785 module_init(ehea_module_init);
3786 module_exit(ehea_module_exit);