1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/workqueue.h>
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
93 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
94 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
95 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
96 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
97 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
98 union sub_crq *sub_crq);
99 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
100 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
101 static int enable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103 static int disable_scrq_irq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105 static int pending_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
108 struct ibmvnic_sub_crq_queue *);
109 static int ibmvnic_poll(struct napi_struct *napi, int data);
110 static void send_map_query(struct ibmvnic_adapter *adapter);
111 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112 static void send_request_unmap(struct ibmvnic_adapter *, u8);
114 struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
123 static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
148 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
162 /* net_device_ops functions */
164 static void init_rx_pool(struct ibmvnic_adapter *adapter,
165 struct ibmvnic_rx_pool *rx_pool, int num, int index,
166 int buff_size, int active)
168 netdev_dbg(adapter->netdev,
169 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
170 index, num, buff_size);
172 rx_pool->index = index;
173 rx_pool->buff_size = buff_size;
174 rx_pool->active = active;
177 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
178 struct ibmvnic_long_term_buff *ltb, int size)
180 struct device *dev = &adapter->vdev->dev;
183 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
187 dev_err(dev, "Couldn't alloc long term buffer\n");
190 ltb->map_id = adapter->map_id;
193 init_completion(&adapter->fw_done);
194 send_request_map(adapter, ltb->addr,
195 ltb->size, ltb->map_id);
196 wait_for_completion(&adapter->fw_done);
200 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
201 struct ibmvnic_long_term_buff *ltb)
203 struct device *dev = &adapter->vdev->dev;
205 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
206 if (!adapter->failover)
207 send_request_unmap(adapter, ltb->map_id);
210 static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
211 struct ibmvnic_rx_pool *pool)
213 struct device *dev = &adapter->vdev->dev;
216 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
220 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
223 if (!pool->rx_buff) {
224 dev_err(dev, "Couldn't alloc rx buffers\n");
225 kfree(pool->free_map);
229 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
230 pool->size * pool->buff_size)) {
231 kfree(pool->free_map);
232 kfree(pool->rx_buff);
236 for (i = 0; i < pool->size; ++i)
237 pool->free_map[i] = i;
239 atomic_set(&pool->available, 0);
240 pool->next_alloc = 0;
246 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
247 struct ibmvnic_rx_pool *pool)
249 int count = pool->size - atomic_read(&pool->available);
250 struct device *dev = &adapter->vdev->dev;
251 int buffers_added = 0;
252 unsigned long lpar_rc;
253 union sub_crq sub_crq;
263 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
264 be32_to_cpu(adapter->login_rsp_buf->
267 for (i = 0; i < count; ++i) {
268 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
270 dev_err(dev, "Couldn't replenish rx buff\n");
271 adapter->replenish_no_mem++;
275 index = pool->free_map[pool->next_free];
277 if (pool->rx_buff[index].skb)
278 dev_err(dev, "Inconsistent free_map!\n");
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset = index * pool->buff_size;
282 dst = pool->long_term_buff.buff + offset;
283 memset(dst, 0, pool->buff_size);
284 dma_addr = pool->long_term_buff.addr + offset;
285 pool->rx_buff[index].data = dst;
287 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
288 pool->rx_buff[index].dma = dma_addr;
289 pool->rx_buff[index].skb = skb;
290 pool->rx_buff[index].pool_index = pool->index;
291 pool->rx_buff[index].size = pool->buff_size;
293 memset(&sub_crq, 0, sizeof(sub_crq));
294 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
295 sub_crq.rx_add.correlator =
296 cpu_to_be64((u64)&pool->rx_buff[index]);
297 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
298 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
305 #ifdef __LITTLE_ENDIAN__
308 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
310 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
312 if (lpar_rc != H_SUCCESS)
316 adapter->replenish_add_buff_success++;
317 pool->next_free = (pool->next_free + 1) % pool->size;
319 atomic_add(buffers_added, &pool->available);
323 dev_info(dev, "replenish pools failure\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
326 if (!dma_mapping_error(dev, dma_addr))
327 dma_unmap_single(dev, dma_addr, pool->buff_size,
330 dev_kfree_skb_any(skb);
331 adapter->replenish_add_buff_failure++;
332 atomic_add(buffers_added, &pool->available);
335 static void replenish_pools(struct ibmvnic_adapter *adapter)
339 if (adapter->migrated)
342 adapter->replenish_task_cycles++;
343 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
345 if (adapter->rx_pool[i].active)
346 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350 static void free_rx_pool(struct ibmvnic_adapter *adapter,
351 struct ibmvnic_rx_pool *pool)
355 kfree(pool->free_map);
356 pool->free_map = NULL;
361 for (i = 0; i < pool->size; i++) {
362 if (pool->rx_buff[i].skb) {
363 dev_kfree_skb_any(pool->rx_buff[i].skb);
364 pool->rx_buff[i].skb = NULL;
367 kfree(pool->rx_buff);
368 pool->rx_buff = NULL;
371 static int ibmvnic_open(struct net_device *netdev)
373 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
374 struct device *dev = &adapter->vdev->dev;
375 struct ibmvnic_tx_pool *tx_pool;
376 union ibmvnic_crq crq;
383 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
385 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
386 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
387 be32_to_cpu(adapter->login_rsp_buf->
388 off_rxadd_buff_size));
390 adapter->napi = kcalloc(adapter->req_rx_queues,
391 sizeof(struct napi_struct), GFP_KERNEL);
393 goto alloc_napi_failed;
394 for (i = 0; i < adapter->req_rx_queues; i++) {
395 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
397 napi_enable(&adapter->napi[i]);
400 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
402 if (!adapter->rx_pool)
403 goto rx_pool_arr_alloc_failed;
404 send_map_query(adapter);
405 for (i = 0; i < rxadd_subcrqs; i++) {
406 init_rx_pool(adapter, &adapter->rx_pool[i],
407 adapter->req_rx_add_entries_per_subcrq, i,
408 be64_to_cpu(size_array[i]), 1);
409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 dev_err(dev, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed;
415 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
417 if (!adapter->tx_pool)
418 goto tx_pool_arr_alloc_failed;
419 for (i = 0; i < tx_subcrqs; i++) {
420 tx_pool = &adapter->tx_pool[i];
422 kcalloc(adapter->req_tx_entries_per_subcrq,
423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 if (!tx_pool->tx_buff)
425 goto tx_pool_alloc_failed;
427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 adapter->req_tx_entries_per_subcrq *
430 goto tx_ltb_alloc_failed;
433 kcalloc(adapter->req_tx_entries_per_subcrq,
434 sizeof(int), GFP_KERNEL);
435 if (!tx_pool->free_map)
436 goto tx_fm_alloc_failed;
438 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
439 tx_pool->free_map[j] = j;
441 tx_pool->consumer_index = 0;
442 tx_pool->producer_index = 0;
444 adapter->bounce_buffer_size =
445 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
446 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
448 if (!adapter->bounce_buffer)
449 goto bounce_alloc_failed;
451 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
452 adapter->bounce_buffer_size,
454 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
455 dev_err(dev, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed;
458 replenish_pools(adapter);
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
463 for (i = 0; i < adapter->req_rx_queues; i++)
464 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
466 for (i = 0; i < adapter->req_tx_queues; i++)
467 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
469 memset(&crq, 0, sizeof(crq));
470 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
471 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
472 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
473 ibmvnic_send_crq(adapter, &crq);
475 netif_tx_start_all_queues(netdev);
480 kfree(adapter->bounce_buffer);
483 kfree(adapter->tx_pool[i].free_map);
485 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
487 kfree(adapter->tx_pool[i].tx_buff);
488 tx_pool_alloc_failed:
489 for (j = 0; j < i; j++) {
490 kfree(adapter->tx_pool[j].tx_buff);
491 free_long_term_buff(adapter,
492 &adapter->tx_pool[j].long_term_buff);
493 kfree(adapter->tx_pool[j].free_map);
495 kfree(adapter->tx_pool);
496 adapter->tx_pool = NULL;
497 tx_pool_arr_alloc_failed:
499 rx_pool_alloc_failed:
500 for (j = 0; j < i; j++) {
501 free_rx_pool(adapter, &adapter->rx_pool[j]);
502 free_long_term_buff(adapter,
503 &adapter->rx_pool[j].long_term_buff);
505 kfree(adapter->rx_pool);
506 adapter->rx_pool = NULL;
507 rx_pool_arr_alloc_failed:
508 for (i = 0; i < adapter->req_rx_queues; i++)
509 napi_disable(&adapter->napi[i]);
514 static int ibmvnic_close(struct net_device *netdev)
516 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
517 struct device *dev = &adapter->vdev->dev;
518 union ibmvnic_crq crq;
521 adapter->closing = true;
523 for (i = 0; i < adapter->req_rx_queues; i++)
524 napi_disable(&adapter->napi[i]);
526 if (!adapter->failover)
527 netif_tx_stop_all_queues(netdev);
529 if (adapter->bounce_buffer) {
530 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
531 dma_unmap_single(&adapter->vdev->dev,
532 adapter->bounce_buffer_dma,
533 adapter->bounce_buffer_size,
535 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
537 kfree(adapter->bounce_buffer);
538 adapter->bounce_buffer = NULL;
541 memset(&crq, 0, sizeof(crq));
542 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
543 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
544 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
545 ibmvnic_send_crq(adapter, &crq);
547 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
549 kfree(adapter->tx_pool[i].tx_buff);
550 free_long_term_buff(adapter,
551 &adapter->tx_pool[i].long_term_buff);
552 kfree(adapter->tx_pool[i].free_map);
554 kfree(adapter->tx_pool);
555 adapter->tx_pool = NULL;
557 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
559 free_rx_pool(adapter, &adapter->rx_pool[i]);
560 free_long_term_buff(adapter,
561 &adapter->rx_pool[i].long_term_buff);
563 kfree(adapter->rx_pool);
564 adapter->rx_pool = NULL;
566 adapter->closing = false;
572 * build_hdr_data - creates L2/L3/L4 header data buffer
573 * @hdr_field - bitfield determining needed headers
574 * @skb - socket buffer
575 * @hdr_len - array of header lengths
576 * @tot_len - total length of data
578 * Reads hdr_field to determine which headers are needed by firmware.
579 * Builds a buffer containing these headers. Saves individual header
580 * lengths and total buffer length to be used to build descriptors.
582 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
583 int *hdr_len, u8 *hdr_data)
588 hdr_len[0] = sizeof(struct ethhdr);
590 if (skb->protocol == htons(ETH_P_IP)) {
591 hdr_len[1] = ip_hdr(skb)->ihl * 4;
592 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
593 hdr_len[2] = tcp_hdrlen(skb);
594 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
595 hdr_len[2] = sizeof(struct udphdr);
596 } else if (skb->protocol == htons(ETH_P_IPV6)) {
597 hdr_len[1] = sizeof(struct ipv6hdr);
598 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
599 hdr_len[2] = tcp_hdrlen(skb);
600 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
601 hdr_len[2] = sizeof(struct udphdr);
604 memset(hdr_data, 0, 120);
605 if ((hdr_field >> 6) & 1) {
606 hdr = skb_mac_header(skb);
607 memcpy(hdr_data, hdr, hdr_len[0]);
611 if ((hdr_field >> 5) & 1) {
612 hdr = skb_network_header(skb);
613 memcpy(hdr_data + len, hdr, hdr_len[1]);
617 if ((hdr_field >> 4) & 1) {
618 hdr = skb_transport_header(skb);
619 memcpy(hdr_data + len, hdr, hdr_len[2]);
626 * create_hdr_descs - create header and header extension descriptors
627 * @hdr_field - bitfield determining needed headers
628 * @data - buffer containing header data
629 * @len - length of data buffer
630 * @hdr_len - array of individual header lengths
631 * @scrq_arr - descriptor array
633 * Creates header and, if needed, header extension descriptors and
634 * places them in a descriptor array, scrq_arr
637 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
638 union sub_crq *scrq_arr)
640 union sub_crq hdr_desc;
645 while (tmp_len > 0) {
646 cur = hdr_data + len - tmp_len;
648 memset(&hdr_desc, 0, sizeof(hdr_desc));
649 if (cur != hdr_data) {
650 data = hdr_desc.hdr_ext.data;
651 tmp = tmp_len > 29 ? 29 : tmp_len;
652 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
653 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
654 hdr_desc.hdr_ext.len = tmp;
656 data = hdr_desc.hdr.data;
657 tmp = tmp_len > 24 ? 24 : tmp_len;
658 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
659 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
660 hdr_desc.hdr.len = tmp;
661 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
662 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
663 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
664 hdr_desc.hdr.flag = hdr_field << 1;
666 memcpy(data, cur, tmp);
668 *scrq_arr = hdr_desc;
674 * build_hdr_descs_arr - build a header descriptor array
675 * @skb - socket buffer
676 * @num_entries - number of descriptors to be sent
677 * @subcrq - first TX descriptor
678 * @hdr_field - bit field determining which headers will be sent
680 * This function will build a TX descriptor array with applicable
681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
684 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
685 int *num_entries, u8 hdr_field)
687 int hdr_len[3] = {0, 0, 0};
689 u8 *hdr_data = txbuff->hdr_data;
691 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
696 num_entries += len % 29 ? len / 29 + 1 : len / 29;
697 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
698 txbuff->indir_arr + 1);
701 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
703 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
704 int queue_num = skb_get_queue_mapping(skb);
705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
706 struct device *dev = &adapter->vdev->dev;
707 struct ibmvnic_tx_buff *tx_buff = NULL;
708 struct ibmvnic_sub_crq_queue *tx_scrq;
709 struct ibmvnic_tx_pool *tx_pool;
710 unsigned int tx_send_failed = 0;
711 unsigned int tx_map_failed = 0;
712 unsigned int tx_dropped = 0;
713 unsigned int tx_packets = 0;
714 unsigned int tx_bytes = 0;
715 dma_addr_t data_dma_addr;
716 struct netdev_queue *txq;
717 bool used_bounce = false;
718 unsigned long lpar_rc;
719 union sub_crq tx_crq;
727 tx_pool = &adapter->tx_pool[queue_num];
728 tx_scrq = adapter->tx_scrq[queue_num];
729 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
730 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
731 be32_to_cpu(adapter->login_rsp_buf->
732 off_txsubm_subcrqs));
733 if (adapter->migrated) {
736 ret = NETDEV_TX_BUSY;
740 index = tx_pool->free_map[tx_pool->consumer_index];
741 offset = index * adapter->req_mtu;
742 dst = tx_pool->long_term_buff.buff + offset;
743 memset(dst, 0, adapter->req_mtu);
744 skb_copy_from_linear_data(skb, dst, skb->len);
745 data_dma_addr = tx_pool->long_term_buff.addr + offset;
747 tx_pool->consumer_index =
748 (tx_pool->consumer_index + 1) %
749 adapter->req_tx_entries_per_subcrq;
751 tx_buff = &tx_pool->tx_buff[index];
753 tx_buff->data_dma[0] = data_dma_addr;
754 tx_buff->data_len[0] = skb->len;
755 tx_buff->index = index;
756 tx_buff->pool_index = queue_num;
757 tx_buff->last_frag = true;
758 tx_buff->used_bounce = used_bounce;
760 memset(&tx_crq, 0, sizeof(tx_crq));
761 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
762 tx_crq.v1.type = IBMVNIC_TX_DESC;
763 tx_crq.v1.n_crq_elem = 1;
765 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
766 tx_crq.v1.correlator = cpu_to_be32(index);
767 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
768 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
769 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
771 if (adapter->vlan_header_insertion) {
772 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
773 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
776 if (skb->protocol == htons(ETH_P_IP)) {
777 if (ip_hdr(skb)->version == 4)
778 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
779 else if (ip_hdr(skb)->version == 6)
780 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
782 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
783 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
784 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
785 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
788 if (skb->ip_summed == CHECKSUM_PARTIAL) {
789 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
792 /* determine if l2/3/4 headers are sent to firmware */
793 if ((*hdrs >> 7) & 1 &&
794 (skb->protocol == htons(ETH_P_IP) ||
795 skb->protocol == htons(ETH_P_IPV6))) {
796 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
797 tx_crq.v1.n_crq_elem = num_entries;
798 tx_buff->indir_arr[0] = tx_crq;
799 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
800 sizeof(tx_buff->indir_arr),
802 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
803 if (!firmware_has_feature(FW_FEATURE_CMO))
804 dev_err(dev, "tx: unable to map descriptor array\n");
807 ret = NETDEV_TX_BUSY;
810 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
811 (u64)tx_buff->indir_dma,
814 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
817 if (lpar_rc != H_SUCCESS) {
818 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
820 if (tx_pool->consumer_index == 0)
821 tx_pool->consumer_index =
822 adapter->req_tx_entries_per_subcrq - 1;
824 tx_pool->consumer_index--;
828 ret = NETDEV_TX_BUSY;
832 atomic_inc(&tx_scrq->used);
834 if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
835 netdev_info(netdev, "Stopping queue %d\n", queue_num);
836 netif_stop_subqueue(netdev, queue_num);
840 tx_bytes += skb->len;
841 txq->trans_start = jiffies;
845 netdev->stats.tx_dropped += tx_dropped;
846 netdev->stats.tx_bytes += tx_bytes;
847 netdev->stats.tx_packets += tx_packets;
848 adapter->tx_send_failed += tx_send_failed;
849 adapter->tx_map_failed += tx_map_failed;
854 static void ibmvnic_set_multi(struct net_device *netdev)
856 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
857 struct netdev_hw_addr *ha;
858 union ibmvnic_crq crq;
860 memset(&crq, 0, sizeof(crq));
861 crq.request_capability.first = IBMVNIC_CRQ_CMD;
862 crq.request_capability.cmd = REQUEST_CAPABILITY;
864 if (netdev->flags & IFF_PROMISC) {
865 if (!adapter->promisc_supported)
868 if (netdev->flags & IFF_ALLMULTI) {
869 /* Accept all multicast */
870 memset(&crq, 0, sizeof(crq));
871 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
872 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
873 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
874 ibmvnic_send_crq(adapter, &crq);
875 } else if (netdev_mc_empty(netdev)) {
876 /* Reject all multicast */
877 memset(&crq, 0, sizeof(crq));
878 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
879 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
880 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
881 ibmvnic_send_crq(adapter, &crq);
883 /* Accept one or more multicast(s) */
884 netdev_for_each_mc_addr(ha, netdev) {
885 memset(&crq, 0, sizeof(crq));
886 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
887 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
888 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
889 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
891 ibmvnic_send_crq(adapter, &crq);
897 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
899 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
900 struct sockaddr *addr = p;
901 union ibmvnic_crq crq;
903 if (!is_valid_ether_addr(addr->sa_data))
904 return -EADDRNOTAVAIL;
906 memset(&crq, 0, sizeof(crq));
907 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
908 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
909 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
910 ibmvnic_send_crq(adapter, &crq);
911 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
915 static void ibmvnic_tx_timeout(struct net_device *dev)
917 struct ibmvnic_adapter *adapter = netdev_priv(dev);
920 /* Adapter timed out, resetting it */
921 release_sub_crqs(adapter);
922 rc = ibmvnic_reset_crq(adapter);
924 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
926 ibmvnic_send_crq_init(adapter);
929 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
930 struct ibmvnic_rx_buff *rx_buff)
932 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
936 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
937 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
939 atomic_dec(&pool->available);
942 static int ibmvnic_poll(struct napi_struct *napi, int budget)
944 struct net_device *netdev = napi->dev;
945 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
946 int scrq_num = (int)(napi - adapter->napi);
947 int frames_processed = 0;
949 while (frames_processed < budget) {
951 struct ibmvnic_rx_buff *rx_buff;
957 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
959 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
961 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
963 /* do error checking */
964 if (next->rx_comp.rc) {
965 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
967 next->rx_comp.first = 0;
968 remove_buff_from_pool(adapter, rx_buff);
972 length = be32_to_cpu(next->rx_comp.len);
973 offset = be16_to_cpu(next->rx_comp.off_frame_data);
974 flags = next->rx_comp.flags;
976 skb_copy_to_linear_data(skb, rx_buff->data + offset,
978 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
980 next->rx_comp.first = 0;
981 remove_buff_from_pool(adapter, rx_buff);
983 skb_put(skb, length);
984 skb->protocol = eth_type_trans(skb, netdev);
986 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
987 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
988 skb->ip_summed = CHECKSUM_UNNECESSARY;
992 napi_gro_receive(napi, skb); /* send it up */
993 netdev->stats.rx_packets++;
994 netdev->stats.rx_bytes += length;
997 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
999 if (frames_processed < budget) {
1000 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1001 napi_complete_done(napi, frames_processed);
1002 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1003 napi_reschedule(napi)) {
1004 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1008 return frames_processed;
1011 #ifdef CONFIG_NET_POLL_CONTROLLER
1012 static void ibmvnic_netpoll_controller(struct net_device *dev)
1014 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1017 replenish_pools(netdev_priv(dev));
1018 for (i = 0; i < adapter->req_rx_queues; i++)
1019 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1020 adapter->rx_scrq[i]);
1024 static const struct net_device_ops ibmvnic_netdev_ops = {
1025 .ndo_open = ibmvnic_open,
1026 .ndo_stop = ibmvnic_close,
1027 .ndo_start_xmit = ibmvnic_xmit,
1028 .ndo_set_rx_mode = ibmvnic_set_multi,
1029 .ndo_set_mac_address = ibmvnic_set_mac,
1030 .ndo_validate_addr = eth_validate_addr,
1031 .ndo_tx_timeout = ibmvnic_tx_timeout,
1032 #ifdef CONFIG_NET_POLL_CONTROLLER
1033 .ndo_poll_controller = ibmvnic_netpoll_controller,
1037 /* ethtool functions */
1039 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1040 struct ethtool_link_ksettings *cmd)
1042 u32 supported, advertising;
1044 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1046 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1048 cmd->base.speed = SPEED_1000;
1049 cmd->base.duplex = DUPLEX_FULL;
1050 cmd->base.port = PORT_FIBRE;
1051 cmd->base.phy_address = 0;
1052 cmd->base.autoneg = AUTONEG_ENABLE;
1054 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1056 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1062 static void ibmvnic_get_drvinfo(struct net_device *dev,
1063 struct ethtool_drvinfo *info)
1065 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1066 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1069 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1071 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1073 return adapter->msg_enable;
1076 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1078 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1080 adapter->msg_enable = data;
1083 static u32 ibmvnic_get_link(struct net_device *netdev)
1085 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1087 /* Don't need to send a query because we request a logical link up at
1088 * init and then we wait for link state indications
1090 return adapter->logical_link_state;
1093 static void ibmvnic_get_ringparam(struct net_device *netdev,
1094 struct ethtool_ringparam *ring)
1096 ring->rx_max_pending = 0;
1097 ring->tx_max_pending = 0;
1098 ring->rx_mini_max_pending = 0;
1099 ring->rx_jumbo_max_pending = 0;
1100 ring->rx_pending = 0;
1101 ring->tx_pending = 0;
1102 ring->rx_mini_pending = 0;
1103 ring->rx_jumbo_pending = 0;
1106 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1110 if (stringset != ETH_SS_STATS)
1113 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1114 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1117 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1121 return ARRAY_SIZE(ibmvnic_stats);
1127 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1128 struct ethtool_stats *stats, u64 *data)
1130 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1131 union ibmvnic_crq crq;
1134 memset(&crq, 0, sizeof(crq));
1135 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1136 crq.request_statistics.cmd = REQUEST_STATISTICS;
1137 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1138 crq.request_statistics.len =
1139 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1141 /* Wait for data to be written */
1142 init_completion(&adapter->stats_done);
1143 ibmvnic_send_crq(adapter, &crq);
1144 wait_for_completion(&adapter->stats_done);
1146 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1147 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1150 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1151 .get_drvinfo = ibmvnic_get_drvinfo,
1152 .get_msglevel = ibmvnic_get_msglevel,
1153 .set_msglevel = ibmvnic_set_msglevel,
1154 .get_link = ibmvnic_get_link,
1155 .get_ringparam = ibmvnic_get_ringparam,
1156 .get_strings = ibmvnic_get_strings,
1157 .get_sset_count = ibmvnic_get_sset_count,
1158 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1159 .get_link_ksettings = ibmvnic_get_link_ksettings,
1162 /* Routines for managing CRQs/sCRQs */
1164 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1165 struct ibmvnic_sub_crq_queue *scrq)
1167 struct device *dev = &adapter->vdev->dev;
1170 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1172 /* Close the sub-crqs */
1174 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1175 adapter->vdev->unit_address,
1177 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1179 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1181 free_pages((unsigned long)scrq->msgs, 2);
1185 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1188 struct device *dev = &adapter->vdev->dev;
1189 struct ibmvnic_sub_crq_queue *scrq;
1192 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1196 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
1197 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1199 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1200 goto zero_page_failed;
1203 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1205 if (dma_mapping_error(dev, scrq->msg_token)) {
1206 dev_warn(dev, "Couldn't map crq queue messages page\n");
1210 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1211 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1213 if (rc == H_RESOURCE)
1214 rc = ibmvnic_reset_crq(adapter);
1216 if (rc == H_CLOSED) {
1217 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1219 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1223 scrq->adapter = adapter;
1224 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1226 atomic_set(&scrq->used, 0);
1227 scrq->rx_skb_top = NULL;
1228 spin_lock_init(&scrq->lock);
1230 netdev_dbg(adapter->netdev,
1231 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1232 scrq->crq_num, scrq->hw_irq, scrq->irq);
1237 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1240 free_pages((unsigned long)scrq->msgs, 2);
1247 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1251 if (adapter->tx_scrq) {
1252 for (i = 0; i < adapter->req_tx_queues; i++)
1253 if (adapter->tx_scrq[i]) {
1254 free_irq(adapter->tx_scrq[i]->irq,
1255 adapter->tx_scrq[i]);
1256 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1257 release_sub_crq_queue(adapter,
1258 adapter->tx_scrq[i]);
1260 adapter->tx_scrq = NULL;
1263 if (adapter->rx_scrq) {
1264 for (i = 0; i < adapter->req_rx_queues; i++)
1265 if (adapter->rx_scrq[i]) {
1266 free_irq(adapter->rx_scrq[i]->irq,
1267 adapter->rx_scrq[i]);
1268 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1269 release_sub_crq_queue(adapter,
1270 adapter->rx_scrq[i]);
1272 adapter->rx_scrq = NULL;
1276 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1280 if (adapter->tx_scrq) {
1281 for (i = 0; i < adapter->req_tx_queues; i++)
1282 if (adapter->tx_scrq[i])
1283 release_sub_crq_queue(adapter,
1284 adapter->tx_scrq[i]);
1285 adapter->tx_scrq = NULL;
1288 if (adapter->rx_scrq) {
1289 for (i = 0; i < adapter->req_rx_queues; i++)
1290 if (adapter->rx_scrq[i])
1291 release_sub_crq_queue(adapter,
1292 adapter->rx_scrq[i]);
1293 adapter->rx_scrq = NULL;
1297 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1298 struct ibmvnic_sub_crq_queue *scrq)
1300 struct device *dev = &adapter->vdev->dev;
1303 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1304 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1306 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1311 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1312 struct ibmvnic_sub_crq_queue *scrq)
1314 struct device *dev = &adapter->vdev->dev;
1317 if (scrq->hw_irq > 0x100000000ULL) {
1318 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1322 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1323 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1325 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1330 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1331 struct ibmvnic_sub_crq_queue *scrq)
1333 struct device *dev = &adapter->vdev->dev;
1334 struct ibmvnic_tx_buff *txbuff;
1335 union sub_crq *next;
1341 while (pending_scrq(adapter, scrq)) {
1342 unsigned int pool = scrq->pool_index;
1344 next = ibmvnic_next_scrq(adapter, scrq);
1345 for (i = 0; i < next->tx_comp.num_comps; i++) {
1346 if (next->tx_comp.rcs[i]) {
1347 dev_err(dev, "tx error %x\n",
1348 next->tx_comp.rcs[i]);
1351 index = be32_to_cpu(next->tx_comp.correlators[i]);
1352 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1354 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1355 if (!txbuff->data_dma[j])
1358 txbuff->data_dma[j] = 0;
1359 txbuff->used_bounce = false;
1361 /* if sub_crq was sent indirectly */
1362 first = txbuff->indir_arr[0].generic.first;
1363 if (first == IBMVNIC_CRQ_CMD) {
1364 dma_unmap_single(dev, txbuff->indir_dma,
1365 sizeof(txbuff->indir_arr),
1369 if (txbuff->last_frag) {
1370 atomic_dec(&scrq->used);
1372 if (atomic_read(&scrq->used) <=
1373 (adapter->req_tx_entries_per_subcrq / 2) &&
1374 netif_subqueue_stopped(adapter->netdev,
1376 netif_wake_subqueue(adapter->netdev,
1378 netdev_dbg(adapter->netdev,
1379 "Started queue %d\n",
1383 dev_kfree_skb_any(txbuff->skb);
1386 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1387 producer_index] = index;
1388 adapter->tx_pool[pool].producer_index =
1389 (adapter->tx_pool[pool].producer_index + 1) %
1390 adapter->req_tx_entries_per_subcrq;
1392 /* remove tx_comp scrq*/
1393 next->tx_comp.first = 0;
1396 enable_scrq_irq(adapter, scrq);
1398 if (pending_scrq(adapter, scrq)) {
1399 disable_scrq_irq(adapter, scrq);
1406 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1408 struct ibmvnic_sub_crq_queue *scrq = instance;
1409 struct ibmvnic_adapter *adapter = scrq->adapter;
1411 disable_scrq_irq(adapter, scrq);
1412 ibmvnic_complete_tx(adapter, scrq);
1417 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1419 struct ibmvnic_sub_crq_queue *scrq = instance;
1420 struct ibmvnic_adapter *adapter = scrq->adapter;
1422 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1423 disable_scrq_irq(adapter, scrq);
1424 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1430 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1432 struct device *dev = &adapter->vdev->dev;
1433 struct ibmvnic_sub_crq_queue *scrq;
1437 for (i = 0; i < adapter->req_tx_queues; i++) {
1438 scrq = adapter->tx_scrq[i];
1439 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1443 dev_err(dev, "Error mapping irq\n");
1444 goto req_tx_irq_failed;
1447 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1448 0, "ibmvnic_tx", scrq);
1451 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1453 irq_dispose_mapping(scrq->irq);
1454 goto req_rx_irq_failed;
1458 for (i = 0; i < adapter->req_rx_queues; i++) {
1459 scrq = adapter->rx_scrq[i];
1460 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1463 dev_err(dev, "Error mapping irq\n");
1464 goto req_rx_irq_failed;
1466 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1467 0, "ibmvnic_rx", scrq);
1469 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1471 irq_dispose_mapping(scrq->irq);
1472 goto req_rx_irq_failed;
1478 for (j = 0; j < i; j++) {
1479 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1480 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1482 i = adapter->req_tx_queues;
1484 for (j = 0; j < i; j++) {
1485 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1486 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1488 release_sub_crqs_no_irqs(adapter);
1492 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1494 struct device *dev = &adapter->vdev->dev;
1495 struct ibmvnic_sub_crq_queue **allqueues;
1496 int registered_queues = 0;
1497 union ibmvnic_crq crq;
1503 /* Sub-CRQ entries are 32 byte long */
1504 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1506 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1507 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1508 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1509 goto allqueues_failed;
1512 /* Get the minimum between the queried max and the entries
1513 * that fit in our PAGE_SIZE
1515 adapter->req_tx_entries_per_subcrq =
1516 adapter->max_tx_entries_per_subcrq > entries_page ?
1517 entries_page : adapter->max_tx_entries_per_subcrq;
1518 adapter->req_rx_add_entries_per_subcrq =
1519 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1520 entries_page : adapter->max_rx_add_entries_per_subcrq;
1522 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1523 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1524 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1526 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1529 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1531 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1533 goto allqueues_failed;
1535 for (i = 0; i < total_queues; i++) {
1536 allqueues[i] = init_sub_crq_queue(adapter);
1537 if (!allqueues[i]) {
1538 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1541 registered_queues++;
1544 /* Make sure we were able to register the minimum number of queues */
1545 if (registered_queues <
1546 adapter->min_tx_queues + adapter->min_rx_queues) {
1547 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1551 /* Distribute the failed allocated queues*/
1552 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1553 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1556 if (adapter->req_rx_queues > adapter->min_rx_queues)
1557 adapter->req_rx_queues--;
1562 if (adapter->req_tx_queues > adapter->min_tx_queues)
1563 adapter->req_tx_queues--;
1570 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1571 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1572 if (!adapter->tx_scrq)
1575 for (i = 0; i < adapter->req_tx_queues; i++) {
1576 adapter->tx_scrq[i] = allqueues[i];
1577 adapter->tx_scrq[i]->pool_index = i;
1580 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1581 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1582 if (!adapter->rx_scrq)
1585 for (i = 0; i < adapter->req_rx_queues; i++) {
1586 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1587 adapter->rx_scrq[i]->scrq_num = i;
1590 memset(&crq, 0, sizeof(crq));
1591 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1592 crq.request_capability.cmd = REQUEST_CAPABILITY;
1594 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1595 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1596 atomic_inc(&adapter->running_cap_crqs);
1597 ibmvnic_send_crq(adapter, &crq);
1599 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1600 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1601 atomic_inc(&adapter->running_cap_crqs);
1602 ibmvnic_send_crq(adapter, &crq);
1604 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1605 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1606 atomic_inc(&adapter->running_cap_crqs);
1607 ibmvnic_send_crq(adapter, &crq);
1609 crq.request_capability.capability =
1610 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1611 crq.request_capability.number =
1612 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1613 atomic_inc(&adapter->running_cap_crqs);
1614 ibmvnic_send_crq(adapter, &crq);
1616 crq.request_capability.capability =
1617 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1618 crq.request_capability.number =
1619 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1620 atomic_inc(&adapter->running_cap_crqs);
1621 ibmvnic_send_crq(adapter, &crq);
1623 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1624 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1625 atomic_inc(&adapter->running_cap_crqs);
1626 ibmvnic_send_crq(adapter, &crq);
1628 if (adapter->netdev->flags & IFF_PROMISC) {
1629 if (adapter->promisc_supported) {
1630 crq.request_capability.capability =
1631 cpu_to_be16(PROMISC_REQUESTED);
1632 crq.request_capability.number = cpu_to_be64(1);
1633 atomic_inc(&adapter->running_cap_crqs);
1634 ibmvnic_send_crq(adapter, &crq);
1637 crq.request_capability.capability =
1638 cpu_to_be16(PROMISC_REQUESTED);
1639 crq.request_capability.number = cpu_to_be64(0);
1640 atomic_inc(&adapter->running_cap_crqs);
1641 ibmvnic_send_crq(adapter, &crq);
1649 kfree(adapter->tx_scrq);
1650 adapter->tx_scrq = NULL;
1652 for (i = 0; i < registered_queues; i++)
1653 release_sub_crq_queue(adapter, allqueues[i]);
1656 ibmvnic_remove(adapter->vdev);
1659 static int pending_scrq(struct ibmvnic_adapter *adapter,
1660 struct ibmvnic_sub_crq_queue *scrq)
1662 union sub_crq *entry = &scrq->msgs[scrq->cur];
1664 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1670 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1671 struct ibmvnic_sub_crq_queue *scrq)
1673 union sub_crq *entry;
1674 unsigned long flags;
1676 spin_lock_irqsave(&scrq->lock, flags);
1677 entry = &scrq->msgs[scrq->cur];
1678 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1679 if (++scrq->cur == scrq->size)
1684 spin_unlock_irqrestore(&scrq->lock, flags);
1689 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1691 struct ibmvnic_crq_queue *queue = &adapter->crq;
1692 union ibmvnic_crq *crq;
1694 crq = &queue->msgs[queue->cur];
1695 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1696 if (++queue->cur == queue->size)
1705 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1706 union sub_crq *sub_crq)
1708 unsigned int ua = adapter->vdev->unit_address;
1709 struct device *dev = &adapter->vdev->dev;
1710 u64 *u64_crq = (u64 *)sub_crq;
1713 netdev_dbg(adapter->netdev,
1714 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1715 (unsigned long int)cpu_to_be64(remote_handle),
1716 (unsigned long int)cpu_to_be64(u64_crq[0]),
1717 (unsigned long int)cpu_to_be64(u64_crq[1]),
1718 (unsigned long int)cpu_to_be64(u64_crq[2]),
1719 (unsigned long int)cpu_to_be64(u64_crq[3]));
1721 /* Make sure the hypervisor sees the complete request */
1724 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1725 cpu_to_be64(remote_handle),
1726 cpu_to_be64(u64_crq[0]),
1727 cpu_to_be64(u64_crq[1]),
1728 cpu_to_be64(u64_crq[2]),
1729 cpu_to_be64(u64_crq[3]));
1733 dev_warn(dev, "CRQ Queue closed\n");
1734 dev_err(dev, "Send error (rc=%d)\n", rc);
1740 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1741 u64 remote_handle, u64 ioba, u64 num_entries)
1743 unsigned int ua = adapter->vdev->unit_address;
1744 struct device *dev = &adapter->vdev->dev;
1747 /* Make sure the hypervisor sees the complete request */
1749 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1750 cpu_to_be64(remote_handle),
1755 dev_warn(dev, "CRQ Queue closed\n");
1756 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1762 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1763 union ibmvnic_crq *crq)
1765 unsigned int ua = adapter->vdev->unit_address;
1766 struct device *dev = &adapter->vdev->dev;
1767 u64 *u64_crq = (u64 *)crq;
1770 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1771 (unsigned long int)cpu_to_be64(u64_crq[0]),
1772 (unsigned long int)cpu_to_be64(u64_crq[1]));
1774 /* Make sure the hypervisor sees the complete request */
1777 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1778 cpu_to_be64(u64_crq[0]),
1779 cpu_to_be64(u64_crq[1]));
1783 dev_warn(dev, "CRQ Queue closed\n");
1784 dev_warn(dev, "Send error (rc=%d)\n", rc);
1790 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1792 union ibmvnic_crq crq;
1794 memset(&crq, 0, sizeof(crq));
1795 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1796 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1797 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1799 return ibmvnic_send_crq(adapter, &crq);
1802 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1804 union ibmvnic_crq crq;
1806 memset(&crq, 0, sizeof(crq));
1807 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1808 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1809 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1811 return ibmvnic_send_crq(adapter, &crq);
1814 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1816 union ibmvnic_crq crq;
1818 memset(&crq, 0, sizeof(crq));
1819 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1820 crq.version_exchange.cmd = VERSION_EXCHANGE;
1821 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1823 return ibmvnic_send_crq(adapter, &crq);
1826 static void send_login(struct ibmvnic_adapter *adapter)
1828 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1829 struct ibmvnic_login_buffer *login_buffer;
1830 struct ibmvnic_inflight_cmd *inflight_cmd;
1831 struct device *dev = &adapter->vdev->dev;
1832 dma_addr_t rsp_buffer_token;
1833 dma_addr_t buffer_token;
1834 size_t rsp_buffer_size;
1835 union ibmvnic_crq crq;
1836 unsigned long flags;
1843 sizeof(struct ibmvnic_login_buffer) +
1844 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1846 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1848 goto buf_alloc_failed;
1850 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1852 if (dma_mapping_error(dev, buffer_token)) {
1853 dev_err(dev, "Couldn't map login buffer\n");
1854 goto buf_map_failed;
1857 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1858 sizeof(u64) * adapter->req_tx_queues +
1859 sizeof(u64) * adapter->req_rx_queues +
1860 sizeof(u64) * adapter->req_rx_queues +
1861 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1863 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1864 if (!login_rsp_buffer)
1865 goto buf_rsp_alloc_failed;
1867 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1868 rsp_buffer_size, DMA_FROM_DEVICE);
1869 if (dma_mapping_error(dev, rsp_buffer_token)) {
1870 dev_err(dev, "Couldn't map login rsp buffer\n");
1871 goto buf_rsp_map_failed;
1873 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1874 if (!inflight_cmd) {
1875 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1876 goto inflight_alloc_failed;
1878 adapter->login_buf = login_buffer;
1879 adapter->login_buf_token = buffer_token;
1880 adapter->login_buf_sz = buffer_size;
1881 adapter->login_rsp_buf = login_rsp_buffer;
1882 adapter->login_rsp_buf_token = rsp_buffer_token;
1883 adapter->login_rsp_buf_sz = rsp_buffer_size;
1885 login_buffer->len = cpu_to_be32(buffer_size);
1886 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1887 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1888 login_buffer->off_txcomp_subcrqs =
1889 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1890 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1891 login_buffer->off_rxcomp_subcrqs =
1892 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1893 sizeof(u64) * adapter->req_tx_queues);
1894 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1895 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1897 tx_list_p = (__be64 *)((char *)login_buffer +
1898 sizeof(struct ibmvnic_login_buffer));
1899 rx_list_p = (__be64 *)((char *)login_buffer +
1900 sizeof(struct ibmvnic_login_buffer) +
1901 sizeof(u64) * adapter->req_tx_queues);
1903 for (i = 0; i < adapter->req_tx_queues; i++) {
1904 if (adapter->tx_scrq[i]) {
1905 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1910 for (i = 0; i < adapter->req_rx_queues; i++) {
1911 if (adapter->rx_scrq[i]) {
1912 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1917 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1918 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1919 netdev_dbg(adapter->netdev, "%016lx\n",
1920 ((unsigned long int *)(adapter->login_buf))[i]);
1923 memset(&crq, 0, sizeof(crq));
1924 crq.login.first = IBMVNIC_CRQ_CMD;
1925 crq.login.cmd = LOGIN;
1926 crq.login.ioba = cpu_to_be32(buffer_token);
1927 crq.login.len = cpu_to_be32(buffer_size);
1929 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1931 spin_lock_irqsave(&adapter->inflight_lock, flags);
1932 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1933 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1935 ibmvnic_send_crq(adapter, &crq);
1939 inflight_alloc_failed:
1940 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1943 kfree(login_rsp_buffer);
1944 buf_rsp_alloc_failed:
1945 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1947 kfree(login_buffer);
1952 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1955 union ibmvnic_crq crq;
1957 memset(&crq, 0, sizeof(crq));
1958 crq.request_map.first = IBMVNIC_CRQ_CMD;
1959 crq.request_map.cmd = REQUEST_MAP;
1960 crq.request_map.map_id = map_id;
1961 crq.request_map.ioba = cpu_to_be32(addr);
1962 crq.request_map.len = cpu_to_be32(len);
1963 ibmvnic_send_crq(adapter, &crq);
1966 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1968 union ibmvnic_crq crq;
1970 memset(&crq, 0, sizeof(crq));
1971 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1972 crq.request_unmap.cmd = REQUEST_UNMAP;
1973 crq.request_unmap.map_id = map_id;
1974 ibmvnic_send_crq(adapter, &crq);
1977 static void send_map_query(struct ibmvnic_adapter *adapter)
1979 union ibmvnic_crq crq;
1981 memset(&crq, 0, sizeof(crq));
1982 crq.query_map.first = IBMVNIC_CRQ_CMD;
1983 crq.query_map.cmd = QUERY_MAP;
1984 ibmvnic_send_crq(adapter, &crq);
1987 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1988 static void send_cap_queries(struct ibmvnic_adapter *adapter)
1990 union ibmvnic_crq crq;
1992 atomic_set(&adapter->running_cap_crqs, 0);
1993 memset(&crq, 0, sizeof(crq));
1994 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1995 crq.query_capability.cmd = QUERY_CAPABILITY;
1997 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1998 atomic_inc(&adapter->running_cap_crqs);
1999 ibmvnic_send_crq(adapter, &crq);
2001 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2002 atomic_inc(&adapter->running_cap_crqs);
2003 ibmvnic_send_crq(adapter, &crq);
2005 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2006 atomic_inc(&adapter->running_cap_crqs);
2007 ibmvnic_send_crq(adapter, &crq);
2009 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2010 atomic_inc(&adapter->running_cap_crqs);
2011 ibmvnic_send_crq(adapter, &crq);
2013 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2014 atomic_inc(&adapter->running_cap_crqs);
2015 ibmvnic_send_crq(adapter, &crq);
2017 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2018 atomic_inc(&adapter->running_cap_crqs);
2019 ibmvnic_send_crq(adapter, &crq);
2021 crq.query_capability.capability =
2022 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2023 atomic_inc(&adapter->running_cap_crqs);
2024 ibmvnic_send_crq(adapter, &crq);
2026 crq.query_capability.capability =
2027 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2028 atomic_inc(&adapter->running_cap_crqs);
2029 ibmvnic_send_crq(adapter, &crq);
2031 crq.query_capability.capability =
2032 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2033 atomic_inc(&adapter->running_cap_crqs);
2034 ibmvnic_send_crq(adapter, &crq);
2036 crq.query_capability.capability =
2037 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2038 atomic_inc(&adapter->running_cap_crqs);
2039 ibmvnic_send_crq(adapter, &crq);
2041 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2042 atomic_inc(&adapter->running_cap_crqs);
2043 ibmvnic_send_crq(adapter, &crq);
2045 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2046 atomic_inc(&adapter->running_cap_crqs);
2047 ibmvnic_send_crq(adapter, &crq);
2049 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2050 atomic_inc(&adapter->running_cap_crqs);
2051 ibmvnic_send_crq(adapter, &crq);
2053 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2054 atomic_inc(&adapter->running_cap_crqs);
2055 ibmvnic_send_crq(adapter, &crq);
2057 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2058 atomic_inc(&adapter->running_cap_crqs);
2059 ibmvnic_send_crq(adapter, &crq);
2061 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2062 atomic_inc(&adapter->running_cap_crqs);
2063 ibmvnic_send_crq(adapter, &crq);
2065 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2066 atomic_inc(&adapter->running_cap_crqs);
2067 ibmvnic_send_crq(adapter, &crq);
2069 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2070 atomic_inc(&adapter->running_cap_crqs);
2071 ibmvnic_send_crq(adapter, &crq);
2073 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2074 atomic_inc(&adapter->running_cap_crqs);
2075 ibmvnic_send_crq(adapter, &crq);
2077 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2078 atomic_inc(&adapter->running_cap_crqs);
2079 ibmvnic_send_crq(adapter, &crq);
2081 crq.query_capability.capability =
2082 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2083 atomic_inc(&adapter->running_cap_crqs);
2084 ibmvnic_send_crq(adapter, &crq);
2086 crq.query_capability.capability =
2087 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2088 atomic_inc(&adapter->running_cap_crqs);
2089 ibmvnic_send_crq(adapter, &crq);
2091 crq.query_capability.capability =
2092 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2093 atomic_inc(&adapter->running_cap_crqs);
2094 ibmvnic_send_crq(adapter, &crq);
2096 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2097 atomic_inc(&adapter->running_cap_crqs);
2098 ibmvnic_send_crq(adapter, &crq);
2101 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2103 struct device *dev = &adapter->vdev->dev;
2104 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2105 union ibmvnic_crq crq;
2108 dma_unmap_single(dev, adapter->ip_offload_tok,
2109 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2111 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2112 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2113 netdev_dbg(adapter->netdev, "%016lx\n",
2114 ((unsigned long int *)(buf))[i]);
2116 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2117 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2118 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2119 buf->tcp_ipv4_chksum);
2120 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2121 buf->tcp_ipv6_chksum);
2122 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2123 buf->udp_ipv4_chksum);
2124 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2125 buf->udp_ipv6_chksum);
2126 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2127 buf->large_tx_ipv4);
2128 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2129 buf->large_tx_ipv6);
2130 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2131 buf->large_rx_ipv4);
2132 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2133 buf->large_rx_ipv6);
2134 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2135 buf->max_ipv4_header_size);
2136 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2137 buf->max_ipv6_header_size);
2138 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2139 buf->max_tcp_header_size);
2140 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2141 buf->max_udp_header_size);
2142 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2143 buf->max_large_tx_size);
2144 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2145 buf->max_large_rx_size);
2146 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2147 buf->ipv6_extension_header);
2148 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2149 buf->tcp_pseudosum_req);
2150 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2151 buf->num_ipv6_ext_headers);
2152 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2153 buf->off_ipv6_ext_headers);
2155 adapter->ip_offload_ctrl_tok =
2156 dma_map_single(dev, &adapter->ip_offload_ctrl,
2157 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2159 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2160 dev_err(dev, "Couldn't map ip offload control buffer\n");
2164 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2165 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2166 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2167 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2168 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2170 /* large_tx/rx disabled for now, additional features needed */
2171 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2172 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2173 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2174 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2176 adapter->netdev->features = NETIF_F_GSO;
2178 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2179 adapter->netdev->features |= NETIF_F_IP_CSUM;
2181 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2182 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2184 if ((adapter->netdev->features &
2185 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2186 adapter->netdev->features |= NETIF_F_RXCSUM;
2188 memset(&crq, 0, sizeof(crq));
2189 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2190 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2191 crq.control_ip_offload.len =
2192 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2193 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2194 ibmvnic_send_crq(adapter, &crq);
2197 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2198 struct ibmvnic_adapter *adapter)
2200 struct device *dev = &adapter->vdev->dev;
2201 struct ibmvnic_error_buff *error_buff, *tmp;
2202 unsigned long flags;
2206 if (!crq->request_error_rsp.rc.code) {
2207 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2208 crq->request_error_rsp.rc.code);
2212 spin_lock_irqsave(&adapter->error_list_lock, flags);
2213 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2214 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2216 list_del(&error_buff->list);
2219 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2222 dev_err(dev, "Couldn't find error id %x\n",
2223 be32_to_cpu(crq->request_error_rsp.error_id));
2227 dev_err(dev, "Detailed info for error id %x:",
2228 be32_to_cpu(crq->request_error_rsp.error_id));
2230 for (i = 0; i < error_buff->len; i++) {
2231 pr_cont("%02x", (int)error_buff->buff[i]);
2237 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2239 kfree(error_buff->buff);
2243 static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2244 struct ibmvnic_adapter *adapter)
2246 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2247 struct ibmvnic_inflight_cmd *inflight_cmd;
2248 struct device *dev = &adapter->vdev->dev;
2249 union ibmvnic_crq newcrq;
2250 unsigned long flags;
2252 /* allocate and map buffer */
2253 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2254 if (!adapter->dump_data) {
2255 complete(&adapter->fw_done);
2259 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2262 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2263 if (!firmware_has_feature(FW_FEATURE_CMO))
2264 dev_err(dev, "Couldn't map dump data\n");
2265 kfree(adapter->dump_data);
2266 complete(&adapter->fw_done);
2270 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2271 if (!inflight_cmd) {
2272 dma_unmap_single(dev, adapter->dump_data_token, len,
2274 kfree(adapter->dump_data);
2275 complete(&adapter->fw_done);
2279 memset(&newcrq, 0, sizeof(newcrq));
2280 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2281 newcrq.request_dump.cmd = REQUEST_DUMP;
2282 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2283 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2285 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2287 spin_lock_irqsave(&adapter->inflight_lock, flags);
2288 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2289 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2291 ibmvnic_send_crq(adapter, &newcrq);
2294 static void handle_error_indication(union ibmvnic_crq *crq,
2295 struct ibmvnic_adapter *adapter)
2297 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2298 struct ibmvnic_inflight_cmd *inflight_cmd;
2299 struct device *dev = &adapter->vdev->dev;
2300 struct ibmvnic_error_buff *error_buff;
2301 union ibmvnic_crq new_crq;
2302 unsigned long flags;
2304 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2305 crq->error_indication.
2306 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2307 be32_to_cpu(crq->error_indication.error_id),
2308 be16_to_cpu(crq->error_indication.error_cause));
2310 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2314 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2315 if (!error_buff->buff) {
2320 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2322 if (dma_mapping_error(dev, error_buff->dma)) {
2323 if (!firmware_has_feature(FW_FEATURE_CMO))
2324 dev_err(dev, "Couldn't map error buffer\n");
2325 kfree(error_buff->buff);
2330 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2331 if (!inflight_cmd) {
2332 dma_unmap_single(dev, error_buff->dma, detail_len,
2334 kfree(error_buff->buff);
2339 error_buff->len = detail_len;
2340 error_buff->error_id = crq->error_indication.error_id;
2342 spin_lock_irqsave(&adapter->error_list_lock, flags);
2343 list_add_tail(&error_buff->list, &adapter->errors);
2344 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2346 memset(&new_crq, 0, sizeof(new_crq));
2347 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2348 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2349 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2350 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2351 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2353 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2355 spin_lock_irqsave(&adapter->inflight_lock, flags);
2356 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2357 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2359 ibmvnic_send_crq(adapter, &new_crq);
2362 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2363 struct ibmvnic_adapter *adapter)
2365 struct net_device *netdev = adapter->netdev;
2366 struct device *dev = &adapter->vdev->dev;
2369 rc = crq->change_mac_addr_rsp.rc.code;
2371 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2374 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2378 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2379 struct ibmvnic_adapter *adapter)
2381 struct device *dev = &adapter->vdev->dev;
2385 atomic_dec(&adapter->running_cap_crqs);
2386 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2388 req_value = &adapter->req_tx_queues;
2392 req_value = &adapter->req_rx_queues;
2395 case REQ_RX_ADD_QUEUES:
2396 req_value = &adapter->req_rx_add_queues;
2399 case REQ_TX_ENTRIES_PER_SUBCRQ:
2400 req_value = &adapter->req_tx_entries_per_subcrq;
2401 name = "tx_entries_per_subcrq";
2403 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2404 req_value = &adapter->req_rx_add_entries_per_subcrq;
2405 name = "rx_add_entries_per_subcrq";
2408 req_value = &adapter->req_mtu;
2411 case PROMISC_REQUESTED:
2412 req_value = &adapter->promisc;
2416 dev_err(dev, "Got invalid cap request rsp %d\n",
2417 crq->request_capability.capability);
2421 switch (crq->request_capability_rsp.rc.code) {
2424 case PARTIALSUCCESS:
2425 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2427 (long int)be64_to_cpu(crq->request_capability_rsp.
2429 release_sub_crqs_no_irqs(adapter);
2430 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2431 init_sub_crqs(adapter, 1);
2434 dev_err(dev, "Error %d in request cap rsp\n",
2435 crq->request_capability_rsp.rc.code);
2439 /* Done receiving requested capabilities, query IP offload support */
2440 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2441 union ibmvnic_crq newcrq;
2442 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2443 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2444 &adapter->ip_offload_buf;
2446 adapter->wait_capability = false;
2447 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2451 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2452 if (!firmware_has_feature(FW_FEATURE_CMO))
2453 dev_err(dev, "Couldn't map offload buffer\n");
2457 memset(&newcrq, 0, sizeof(newcrq));
2458 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2459 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2460 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2461 newcrq.query_ip_offload.ioba =
2462 cpu_to_be32(adapter->ip_offload_tok);
2464 ibmvnic_send_crq(adapter, &newcrq);
2468 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2469 struct ibmvnic_adapter *adapter)
2471 struct device *dev = &adapter->vdev->dev;
2472 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2473 struct ibmvnic_login_buffer *login = adapter->login_buf;
2474 union ibmvnic_crq crq;
2477 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2479 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2480 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2482 /* If the number of queues requested can't be allocated by the
2483 * server, the login response will return with code 1. We will need
2484 * to resend the login buffer with fewer queues requested.
2486 if (login_rsp_crq->generic.rc.code) {
2487 adapter->renegotiate = true;
2488 complete(&adapter->init_done);
2492 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2493 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2494 netdev_dbg(adapter->netdev, "%016lx\n",
2495 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2499 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2500 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2501 adapter->req_rx_add_queues !=
2502 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2503 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2504 ibmvnic_remove(adapter->vdev);
2507 complete(&adapter->init_done);
2509 memset(&crq, 0, sizeof(crq));
2510 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2511 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2512 ibmvnic_send_crq(adapter, &crq);
2517 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2518 struct ibmvnic_adapter *adapter)
2520 struct device *dev = &adapter->vdev->dev;
2521 u8 map_id = crq->request_map_rsp.map_id;
2527 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2528 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2530 rc = crq->request_map_rsp.rc.code;
2532 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2534 /* need to find and zero tx/rx_pool map_id */
2535 for (i = 0; i < tx_subcrqs; i++) {
2536 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2537 adapter->tx_pool[i].long_term_buff.map_id = 0;
2539 for (i = 0; i < rx_subcrqs; i++) {
2540 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2541 adapter->rx_pool[i].long_term_buff.map_id = 0;
2544 complete(&adapter->fw_done);
2547 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2548 struct ibmvnic_adapter *adapter)
2550 struct device *dev = &adapter->vdev->dev;
2553 rc = crq->request_unmap_rsp.rc.code;
2555 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2558 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2559 struct ibmvnic_adapter *adapter)
2561 struct net_device *netdev = adapter->netdev;
2562 struct device *dev = &adapter->vdev->dev;
2565 rc = crq->query_map_rsp.rc.code;
2567 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2570 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2571 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2572 crq->query_map_rsp.free_pages);
2575 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2576 struct ibmvnic_adapter *adapter)
2578 struct net_device *netdev = adapter->netdev;
2579 struct device *dev = &adapter->vdev->dev;
2582 atomic_dec(&adapter->running_cap_crqs);
2583 netdev_dbg(netdev, "Outstanding queries: %d\n",
2584 atomic_read(&adapter->running_cap_crqs));
2585 rc = crq->query_capability.rc.code;
2587 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2591 switch (be16_to_cpu(crq->query_capability.capability)) {
2593 adapter->min_tx_queues =
2594 be64_to_cpu(crq->query_capability.number);
2595 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2596 adapter->min_tx_queues);
2599 adapter->min_rx_queues =
2600 be64_to_cpu(crq->query_capability.number);
2601 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2602 adapter->min_rx_queues);
2604 case MIN_RX_ADD_QUEUES:
2605 adapter->min_rx_add_queues =
2606 be64_to_cpu(crq->query_capability.number);
2607 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2608 adapter->min_rx_add_queues);
2611 adapter->max_tx_queues =
2612 be64_to_cpu(crq->query_capability.number);
2613 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2614 adapter->max_tx_queues);
2617 adapter->max_rx_queues =
2618 be64_to_cpu(crq->query_capability.number);
2619 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2620 adapter->max_rx_queues);
2622 case MAX_RX_ADD_QUEUES:
2623 adapter->max_rx_add_queues =
2624 be64_to_cpu(crq->query_capability.number);
2625 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2626 adapter->max_rx_add_queues);
2628 case MIN_TX_ENTRIES_PER_SUBCRQ:
2629 adapter->min_tx_entries_per_subcrq =
2630 be64_to_cpu(crq->query_capability.number);
2631 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2632 adapter->min_tx_entries_per_subcrq);
2634 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2635 adapter->min_rx_add_entries_per_subcrq =
2636 be64_to_cpu(crq->query_capability.number);
2637 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2638 adapter->min_rx_add_entries_per_subcrq);
2640 case MAX_TX_ENTRIES_PER_SUBCRQ:
2641 adapter->max_tx_entries_per_subcrq =
2642 be64_to_cpu(crq->query_capability.number);
2643 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2644 adapter->max_tx_entries_per_subcrq);
2646 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2647 adapter->max_rx_add_entries_per_subcrq =
2648 be64_to_cpu(crq->query_capability.number);
2649 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2650 adapter->max_rx_add_entries_per_subcrq);
2652 case TCP_IP_OFFLOAD:
2653 adapter->tcp_ip_offload =
2654 be64_to_cpu(crq->query_capability.number);
2655 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2656 adapter->tcp_ip_offload);
2658 case PROMISC_SUPPORTED:
2659 adapter->promisc_supported =
2660 be64_to_cpu(crq->query_capability.number);
2661 netdev_dbg(netdev, "promisc_supported = %lld\n",
2662 adapter->promisc_supported);
2665 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2666 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2667 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2670 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2671 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2672 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2674 case MAX_MULTICAST_FILTERS:
2675 adapter->max_multicast_filters =
2676 be64_to_cpu(crq->query_capability.number);
2677 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2678 adapter->max_multicast_filters);
2680 case VLAN_HEADER_INSERTION:
2681 adapter->vlan_header_insertion =
2682 be64_to_cpu(crq->query_capability.number);
2683 if (adapter->vlan_header_insertion)
2684 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2685 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2686 adapter->vlan_header_insertion);
2688 case MAX_TX_SG_ENTRIES:
2689 adapter->max_tx_sg_entries =
2690 be64_to_cpu(crq->query_capability.number);
2691 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2692 adapter->max_tx_sg_entries);
2694 case RX_SG_SUPPORTED:
2695 adapter->rx_sg_supported =
2696 be64_to_cpu(crq->query_capability.number);
2697 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2698 adapter->rx_sg_supported);
2700 case OPT_TX_COMP_SUB_QUEUES:
2701 adapter->opt_tx_comp_sub_queues =
2702 be64_to_cpu(crq->query_capability.number);
2703 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2704 adapter->opt_tx_comp_sub_queues);
2706 case OPT_RX_COMP_QUEUES:
2707 adapter->opt_rx_comp_queues =
2708 be64_to_cpu(crq->query_capability.number);
2709 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2710 adapter->opt_rx_comp_queues);
2712 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2713 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2714 be64_to_cpu(crq->query_capability.number);
2715 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2716 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2718 case OPT_TX_ENTRIES_PER_SUBCRQ:
2719 adapter->opt_tx_entries_per_subcrq =
2720 be64_to_cpu(crq->query_capability.number);
2721 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2722 adapter->opt_tx_entries_per_subcrq);
2724 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2725 adapter->opt_rxba_entries_per_subcrq =
2726 be64_to_cpu(crq->query_capability.number);
2727 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2728 adapter->opt_rxba_entries_per_subcrq);
2730 case TX_RX_DESC_REQ:
2731 adapter->tx_rx_desc_req = crq->query_capability.number;
2732 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2733 adapter->tx_rx_desc_req);
2737 netdev_err(netdev, "Got invalid cap rsp %d\n",
2738 crq->query_capability.capability);
2742 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2743 adapter->wait_capability = false;
2744 init_sub_crqs(adapter, 0);
2745 /* We're done querying the capabilities, initialize sub-crqs */
2749 static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2750 struct ibmvnic_adapter *adapter)
2752 u8 correlator = crq->control_ras_rsp.correlator;
2753 struct device *dev = &adapter->vdev->dev;
2757 if (crq->control_ras_rsp.rc.code) {
2758 dev_warn(dev, "Control ras failed rc=%d\n",
2759 crq->control_ras_rsp.rc.code);
2763 for (i = 0; i < adapter->ras_comp_num; i++) {
2764 if (adapter->ras_comps[i].correlator == correlator) {
2771 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2775 switch (crq->control_ras_rsp.op) {
2776 case IBMVNIC_TRACE_LEVEL:
2777 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2779 case IBMVNIC_ERROR_LEVEL:
2780 adapter->ras_comps[i].error_check_level =
2781 crq->control_ras.level;
2783 case IBMVNIC_TRACE_PAUSE:
2784 adapter->ras_comp_int[i].paused = 1;
2786 case IBMVNIC_TRACE_RESUME:
2787 adapter->ras_comp_int[i].paused = 0;
2789 case IBMVNIC_TRACE_ON:
2790 adapter->ras_comps[i].trace_on = 1;
2792 case IBMVNIC_TRACE_OFF:
2793 adapter->ras_comps[i].trace_on = 0;
2795 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2796 /* trace_buff_sz is 3 bytes, stuff it into an int */
2797 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2798 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2799 crq->control_ras_rsp.trace_buff_sz[0];
2800 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2801 crq->control_ras_rsp.trace_buff_sz[1];
2802 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2803 crq->control_ras_rsp.trace_buff_sz[2];
2806 dev_err(dev, "invalid op %d on control_ras_rsp",
2807 crq->control_ras_rsp.op);
2811 static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2814 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2815 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2816 struct device *dev = &adapter->vdev->dev;
2817 struct ibmvnic_fw_trace_entry *trace;
2818 int num = ras_comp_int->num;
2819 union ibmvnic_crq crq;
2820 dma_addr_t trace_tok;
2822 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2826 dma_alloc_coherent(dev,
2827 be32_to_cpu(adapter->ras_comps[num].
2828 trace_buff_size), &trace_tok,
2831 dev_err(dev, "Couldn't alloc trace buffer\n");
2835 memset(&crq, 0, sizeof(crq));
2836 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2837 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2838 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2839 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2840 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2842 init_completion(&adapter->fw_done);
2843 ibmvnic_send_crq(adapter, &crq);
2844 wait_for_completion(&adapter->fw_done);
2846 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2848 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2851 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2853 dma_free_coherent(dev,
2854 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2860 static const struct file_operations trace_ops = {
2861 .owner = THIS_MODULE,
2862 .open = simple_open,
2866 static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2869 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2870 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2871 int num = ras_comp_int->num;
2872 char buff[5]; /* 1 or 0 plus \n and \0 */
2875 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2880 copy_to_user(user_buf, buff, size);
2885 static ssize_t paused_write(struct file *file, const char __user *user_buf,
2886 size_t len, loff_t *ppos)
2888 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2889 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2890 int num = ras_comp_int->num;
2891 union ibmvnic_crq crq;
2893 char buff[9]; /* decimal max int plus \n and \0 */
2895 copy_from_user(buff, user_buf, sizeof(buff));
2896 val = kstrtoul(buff, 10, NULL);
2898 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2900 memset(&crq, 0, sizeof(crq));
2901 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2902 crq.control_ras.cmd = CONTROL_RAS;
2903 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2904 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2905 ibmvnic_send_crq(adapter, &crq);
2910 static const struct file_operations paused_ops = {
2911 .owner = THIS_MODULE,
2912 .open = simple_open,
2913 .read = paused_read,
2914 .write = paused_write,
2917 static ssize_t tracing_read(struct file *file, char __user *user_buf,
2918 size_t len, loff_t *ppos)
2920 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2921 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2922 int num = ras_comp_int->num;
2923 char buff[5]; /* 1 or 0 plus \n and \0 */
2926 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2931 copy_to_user(user_buf, buff, size);
2936 static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2937 size_t len, loff_t *ppos)
2939 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2940 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2941 int num = ras_comp_int->num;
2942 union ibmvnic_crq crq;
2944 char buff[9]; /* decimal max int plus \n and \0 */
2946 copy_from_user(buff, user_buf, sizeof(buff));
2947 val = kstrtoul(buff, 10, NULL);
2949 memset(&crq, 0, sizeof(crq));
2950 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2951 crq.control_ras.cmd = CONTROL_RAS;
2952 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2953 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2958 static const struct file_operations tracing_ops = {
2959 .owner = THIS_MODULE,
2960 .open = simple_open,
2961 .read = tracing_read,
2962 .write = tracing_write,
2965 static ssize_t error_level_read(struct file *file, char __user *user_buf,
2966 size_t len, loff_t *ppos)
2968 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2969 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2970 int num = ras_comp_int->num;
2971 char buff[5]; /* decimal max char plus \n and \0 */
2974 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2979 copy_to_user(user_buf, buff, size);
2984 static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2985 size_t len, loff_t *ppos)
2987 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2988 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2989 int num = ras_comp_int->num;
2990 union ibmvnic_crq crq;
2992 char buff[9]; /* decimal max int plus \n and \0 */
2994 copy_from_user(buff, user_buf, sizeof(buff));
2995 val = kstrtoul(buff, 10, NULL);
3000 memset(&crq, 0, sizeof(crq));
3001 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3002 crq.control_ras.cmd = CONTROL_RAS;
3003 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
3004 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
3005 crq.control_ras.level = val;
3006 ibmvnic_send_crq(adapter, &crq);
3011 static const struct file_operations error_level_ops = {
3012 .owner = THIS_MODULE,
3013 .open = simple_open,
3014 .read = error_level_read,
3015 .write = error_level_write,
3018 static ssize_t trace_level_read(struct file *file, char __user *user_buf,
3019 size_t len, loff_t *ppos)
3021 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3022 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3023 int num = ras_comp_int->num;
3024 char buff[5]; /* decimal max char plus \n and \0 */
3027 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
3031 copy_to_user(user_buf, buff, size);
3036 static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3037 size_t len, loff_t *ppos)
3039 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3040 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3041 union ibmvnic_crq crq;
3043 char buff[9]; /* decimal max int plus \n and \0 */
3045 copy_from_user(buff, user_buf, sizeof(buff));
3046 val = kstrtoul(buff, 10, NULL);
3050 memset(&crq, 0, sizeof(crq));
3051 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3052 crq.control_ras.cmd = CONTROL_RAS;
3053 crq.control_ras.correlator =
3054 adapter->ras_comps[ras_comp_int->num].correlator;
3055 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3056 crq.control_ras.level = val;
3057 ibmvnic_send_crq(adapter, &crq);
3062 static const struct file_operations trace_level_ops = {
3063 .owner = THIS_MODULE,
3064 .open = simple_open,
3065 .read = trace_level_read,
3066 .write = trace_level_write,
3069 static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3070 size_t len, loff_t *ppos)
3072 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3073 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3074 int num = ras_comp_int->num;
3075 char buff[9]; /* decimal max int plus \n and \0 */
3078 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3082 copy_to_user(user_buf, buff, size);
3087 static ssize_t trace_buff_size_write(struct file *file,
3088 const char __user *user_buf, size_t len,
3091 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3092 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3093 union ibmvnic_crq crq;
3095 char buff[9]; /* decimal max int plus \n and \0 */
3097 copy_from_user(buff, user_buf, sizeof(buff));
3098 val = kstrtoul(buff, 10, NULL);
3100 memset(&crq, 0, sizeof(crq));
3101 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3102 crq.control_ras.cmd = CONTROL_RAS;
3103 crq.control_ras.correlator =
3104 adapter->ras_comps[ras_comp_int->num].correlator;
3105 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3106 /* trace_buff_sz is 3 bytes, stuff an int into it */
3107 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3108 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3109 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3110 ibmvnic_send_crq(adapter, &crq);
3115 static const struct file_operations trace_size_ops = {
3116 .owner = THIS_MODULE,
3117 .open = simple_open,
3118 .read = trace_buff_size_read,
3119 .write = trace_buff_size_write,
3122 static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3123 struct ibmvnic_adapter *adapter)
3125 struct device *dev = &adapter->vdev->dev;
3126 struct dentry *dir_ent;
3130 debugfs_remove_recursive(adapter->ras_comps_ent);
3132 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3133 adapter->debugfs_dir);
3134 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3135 dev_info(dev, "debugfs create ras_comps dir failed\n");
3139 for (i = 0; i < adapter->ras_comp_num; i++) {
3140 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3141 adapter->ras_comps_ent);
3142 if (!dir_ent || IS_ERR(dir_ent)) {
3143 dev_info(dev, "debugfs create %s dir failed\n",
3144 adapter->ras_comps[i].name);
3148 adapter->ras_comp_int[i].adapter = adapter;
3149 adapter->ras_comp_int[i].num = i;
3150 adapter->ras_comp_int[i].desc_blob.data =
3151 &adapter->ras_comps[i].description;
3152 adapter->ras_comp_int[i].desc_blob.size =
3153 sizeof(adapter->ras_comps[i].description);
3155 /* Don't need to remember the dentry's because the debugfs dir
3156 * gets removed recursively
3158 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3159 &adapter->ras_comp_int[i].desc_blob);
3160 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3161 dir_ent, &adapter->ras_comp_int[i],
3163 ent = debugfs_create_file("trace_level",
3165 (adapter->ras_comps[i].trace_level !=
3166 0xFF ? S_IWUSR : 0),
3167 dir_ent, &adapter->ras_comp_int[i],
3169 ent = debugfs_create_file("error_level",
3172 ras_comps[i].error_check_level !=
3173 0xFF ? S_IWUSR : 0),
3174 dir_ent, &adapter->ras_comp_int[i],
3176 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3177 dir_ent, &adapter->ras_comp_int[i],
3179 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3180 dir_ent, &adapter->ras_comp_int[i],
3182 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3183 &adapter->ras_comp_int[i],
3188 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3189 struct ibmvnic_adapter *adapter)
3191 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3192 struct device *dev = &adapter->vdev->dev;
3193 union ibmvnic_crq newcrq;
3195 adapter->ras_comps = dma_alloc_coherent(dev, len,
3196 &adapter->ras_comps_tok,
3198 if (!adapter->ras_comps) {
3199 if (!firmware_has_feature(FW_FEATURE_CMO))
3200 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3204 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3205 sizeof(struct ibmvnic_fw_comp_internal),
3207 if (!adapter->ras_comp_int)
3208 dma_free_coherent(dev, len, adapter->ras_comps,
3209 adapter->ras_comps_tok);
3211 memset(&newcrq, 0, sizeof(newcrq));
3212 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3213 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3214 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3215 newcrq.request_ras_comps.len = cpu_to_be32(len);
3216 ibmvnic_send_crq(adapter, &newcrq);
3219 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3221 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
3222 struct device *dev = &adapter->vdev->dev;
3223 struct ibmvnic_error_buff *error_buff, *tmp2;
3224 unsigned long flags;
3225 unsigned long flags2;
3227 spin_lock_irqsave(&adapter->inflight_lock, flags);
3228 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
3229 switch (inflight_cmd->crq.generic.cmd) {
3231 dma_unmap_single(dev, adapter->login_buf_token,
3232 adapter->login_buf_sz,
3234 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3235 adapter->login_rsp_buf_sz,
3237 kfree(adapter->login_rsp_buf);
3238 kfree(adapter->login_buf);
3241 complete(&adapter->fw_done);
3243 case REQUEST_ERROR_INFO:
3244 spin_lock_irqsave(&adapter->error_list_lock, flags2);
3245 list_for_each_entry_safe(error_buff, tmp2,
3246 &adapter->errors, list) {
3247 dma_unmap_single(dev, error_buff->dma,
3250 kfree(error_buff->buff);
3251 list_del(&error_buff->list);
3254 spin_unlock_irqrestore(&adapter->error_list_lock,
3258 list_del(&inflight_cmd->list);
3259 kfree(inflight_cmd);
3261 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3264 static void ibmvnic_xport_event(struct work_struct *work)
3266 struct ibmvnic_adapter *adapter = container_of(work,
3267 struct ibmvnic_adapter,
3269 struct device *dev = &adapter->vdev->dev;
3272 ibmvnic_free_inflight(adapter);
3273 release_sub_crqs(adapter);
3274 if (adapter->migrated) {
3275 rc = ibmvnic_reenable_crq_queue(adapter);
3277 dev_err(dev, "Error after enable rc=%ld\n", rc);
3278 adapter->migrated = false;
3279 rc = ibmvnic_send_crq_init(adapter);
3281 dev_err(dev, "Error sending init rc=%ld\n", rc);
3285 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3286 struct ibmvnic_adapter *adapter)
3288 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3289 struct net_device *netdev = adapter->netdev;
3290 struct device *dev = &adapter->vdev->dev;
3293 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3294 ((unsigned long int *)crq)[0],
3295 ((unsigned long int *)crq)[1]);
3296 switch (gen_crq->first) {
3297 case IBMVNIC_CRQ_INIT_RSP:
3298 switch (gen_crq->cmd) {
3299 case IBMVNIC_CRQ_INIT:
3300 dev_info(dev, "Partner initialized\n");
3301 /* Send back a response */
3302 rc = ibmvnic_send_crq_init_complete(adapter);
3304 schedule_work(&adapter->vnic_crq_init);
3306 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3308 case IBMVNIC_CRQ_INIT_COMPLETE:
3309 dev_info(dev, "Partner initialization complete\n");
3310 send_version_xchg(adapter);
3313 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3316 case IBMVNIC_CRQ_XPORT_EVENT:
3317 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3318 dev_info(dev, "Re-enabling adapter\n");
3319 adapter->migrated = true;
3320 schedule_work(&adapter->ibmvnic_xport);
3321 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3322 dev_info(dev, "Backing device failover detected\n");
3323 netif_carrier_off(netdev);
3324 adapter->failover = true;
3326 /* The adapter lost the connection */
3327 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3329 schedule_work(&adapter->ibmvnic_xport);
3332 case IBMVNIC_CRQ_CMD_RSP:
3335 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3340 switch (gen_crq->cmd) {
3341 case VERSION_EXCHANGE_RSP:
3342 rc = crq->version_exchange_rsp.rc.code;
3344 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3347 dev_info(dev, "Partner protocol version is %d\n",
3348 crq->version_exchange_rsp.version);
3349 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3352 be16_to_cpu(crq->version_exchange_rsp.version);
3353 send_cap_queries(adapter);
3355 case QUERY_CAPABILITY_RSP:
3356 handle_query_cap_rsp(crq, adapter);
3359 handle_query_map_rsp(crq, adapter);
3361 case REQUEST_MAP_RSP:
3362 handle_request_map_rsp(crq, adapter);
3364 case REQUEST_UNMAP_RSP:
3365 handle_request_unmap_rsp(crq, adapter);
3367 case REQUEST_CAPABILITY_RSP:
3368 handle_request_cap_rsp(crq, adapter);
3371 netdev_dbg(netdev, "Got Login Response\n");
3372 handle_login_rsp(crq, adapter);
3374 case LOGICAL_LINK_STATE_RSP:
3375 netdev_dbg(netdev, "Got Logical Link State Response\n");
3376 adapter->logical_link_state =
3377 crq->logical_link_state_rsp.link_state;
3379 case LINK_STATE_INDICATION:
3380 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3381 adapter->phys_link_state =
3382 crq->link_state_indication.phys_link_state;
3383 adapter->logical_link_state =
3384 crq->link_state_indication.logical_link_state;
3386 case CHANGE_MAC_ADDR_RSP:
3387 netdev_dbg(netdev, "Got MAC address change Response\n");
3388 handle_change_mac_rsp(crq, adapter);
3390 case ERROR_INDICATION:
3391 netdev_dbg(netdev, "Got Error Indication\n");
3392 handle_error_indication(crq, adapter);
3394 case REQUEST_ERROR_RSP:
3395 netdev_dbg(netdev, "Got Error Detail Response\n");
3396 handle_error_info_rsp(crq, adapter);
3398 case REQUEST_STATISTICS_RSP:
3399 netdev_dbg(netdev, "Got Statistics Response\n");
3400 complete(&adapter->stats_done);
3402 case REQUEST_DUMP_SIZE_RSP:
3403 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3404 handle_dump_size_rsp(crq, adapter);
3406 case REQUEST_DUMP_RSP:
3407 netdev_dbg(netdev, "Got Request Dump Response\n");
3408 complete(&adapter->fw_done);
3410 case QUERY_IP_OFFLOAD_RSP:
3411 netdev_dbg(netdev, "Got Query IP offload Response\n");
3412 handle_query_ip_offload_rsp(adapter);
3414 case MULTICAST_CTRL_RSP:
3415 netdev_dbg(netdev, "Got multicast control Response\n");
3417 case CONTROL_IP_OFFLOAD_RSP:
3418 netdev_dbg(netdev, "Got Control IP offload Response\n");
3419 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3420 sizeof(adapter->ip_offload_ctrl),
3422 /* We're done with the queries, perform the login */
3423 send_login(adapter);
3425 case REQUEST_RAS_COMP_NUM_RSP:
3426 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3427 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3428 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3431 adapter->ras_comp_num =
3432 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3433 handle_request_ras_comp_num_rsp(crq, adapter);
3435 case REQUEST_RAS_COMPS_RSP:
3436 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3437 handle_request_ras_comps_rsp(crq, adapter);
3439 case CONTROL_RAS_RSP:
3440 netdev_dbg(netdev, "Got Control RAS Response\n");
3441 handle_control_ras_rsp(crq, adapter);
3443 case COLLECT_FW_TRACE_RSP:
3444 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3445 complete(&adapter->fw_done);
3448 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3453 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3455 struct ibmvnic_adapter *adapter = instance;
3456 unsigned long flags;
3458 spin_lock_irqsave(&adapter->crq.lock, flags);
3459 vio_disable_interrupts(adapter->vdev);
3460 tasklet_schedule(&adapter->tasklet);
3461 spin_unlock_irqrestore(&adapter->crq.lock, flags);
3465 static void ibmvnic_tasklet(void *data)
3467 struct ibmvnic_adapter *adapter = data;
3468 struct ibmvnic_crq_queue *queue = &adapter->crq;
3469 struct vio_dev *vdev = adapter->vdev;
3470 union ibmvnic_crq *crq;
3471 unsigned long flags;
3474 spin_lock_irqsave(&queue->lock, flags);
3475 vio_disable_interrupts(vdev);
3477 /* Pull all the valid messages off the CRQ */
3478 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3479 ibmvnic_handle_crq(crq, adapter);
3480 crq->generic.first = 0;
3482 vio_enable_interrupts(vdev);
3483 crq = ibmvnic_next_crq(adapter);
3485 vio_disable_interrupts(vdev);
3486 ibmvnic_handle_crq(crq, adapter);
3487 crq->generic.first = 0;
3489 /* remain in tasklet until all
3490 * capabilities responses are received
3492 if (!adapter->wait_capability)
3496 /* if capabilities CRQ's were sent in this tasklet, the following
3497 * tasklet must wait until all responses are received
3499 if (atomic_read(&adapter->running_cap_crqs) != 0)
3500 adapter->wait_capability = true;
3501 spin_unlock_irqrestore(&queue->lock, flags);
3504 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3506 struct vio_dev *vdev = adapter->vdev;
3510 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3511 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3514 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3519 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3521 struct ibmvnic_crq_queue *crq = &adapter->crq;
3522 struct device *dev = &adapter->vdev->dev;
3523 struct vio_dev *vdev = adapter->vdev;
3528 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3529 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3531 /* Clean out the queue */
3532 memset(crq->msgs, 0, PAGE_SIZE);
3535 /* And re-open it again */
3536 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3537 crq->msg_token, PAGE_SIZE);
3540 /* Adapter is good, but other end is not ready */
3541 dev_warn(dev, "Partner adapter not ready\n");
3543 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3548 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3550 struct ibmvnic_crq_queue *crq = &adapter->crq;
3551 struct vio_dev *vdev = adapter->vdev;
3554 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3555 free_irq(vdev->irq, adapter);
3556 tasklet_kill(&adapter->tasklet);
3558 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3559 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3561 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3563 free_page((unsigned long)crq->msgs);
3566 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3568 struct ibmvnic_crq_queue *crq = &adapter->crq;
3569 struct device *dev = &adapter->vdev->dev;
3570 struct vio_dev *vdev = adapter->vdev;
3571 int rc, retrc = -ENOMEM;
3573 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3574 /* Should we allocate more than one page? */
3579 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3580 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3582 if (dma_mapping_error(dev, crq->msg_token))
3585 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3586 crq->msg_token, PAGE_SIZE);
3588 if (rc == H_RESOURCE)
3589 /* maybe kexecing and resource is busy. try a reset */
3590 rc = ibmvnic_reset_crq(adapter);
3593 if (rc == H_CLOSED) {
3594 dev_warn(dev, "Partner adapter not ready\n");
3596 dev_warn(dev, "Error %d opening adapter\n", rc);
3597 goto reg_crq_failed;
3602 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3603 (unsigned long)adapter);
3605 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3606 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3609 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3611 goto req_irq_failed;
3614 rc = vio_enable_interrupts(vdev);
3616 dev_err(dev, "Error %d enabling interrupts\n", rc);
3617 goto req_irq_failed;
3621 spin_lock_init(&crq->lock);
3626 tasklet_kill(&adapter->tasklet);
3628 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3629 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3631 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3633 free_page((unsigned long)crq->msgs);
3637 /* debugfs for dump */
3638 static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3640 struct net_device *netdev = seq->private;
3641 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3642 struct device *dev = &adapter->vdev->dev;
3643 union ibmvnic_crq crq;
3645 memset(&crq, 0, sizeof(crq));
3646 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3647 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3649 init_completion(&adapter->fw_done);
3650 ibmvnic_send_crq(adapter, &crq);
3651 wait_for_completion(&adapter->fw_done);
3653 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3655 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3658 kfree(adapter->dump_data);
3663 static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3665 return single_open(file, ibmvnic_dump_show, inode->i_private);
3668 static const struct file_operations ibmvnic_dump_ops = {
3669 .owner = THIS_MODULE,
3670 .open = ibmvnic_dump_open,
3672 .llseek = seq_lseek,
3673 .release = single_release,
3676 static void handle_crq_init_rsp(struct work_struct *work)
3678 struct ibmvnic_adapter *adapter = container_of(work,
3679 struct ibmvnic_adapter,
3681 struct device *dev = &adapter->vdev->dev;
3682 struct net_device *netdev = adapter->netdev;
3683 unsigned long timeout = msecs_to_jiffies(30000);
3684 bool restart = false;
3687 if (adapter->failover) {
3688 release_sub_crqs(adapter);
3689 if (netif_running(netdev)) {
3690 netif_tx_disable(netdev);
3691 ibmvnic_close(netdev);
3696 reinit_completion(&adapter->init_done);
3697 send_version_xchg(adapter);
3698 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3699 dev_err(dev, "Passive init timeout\n");
3704 if (adapter->renegotiate) {
3705 adapter->renegotiate = false;
3706 release_sub_crqs_no_irqs(adapter);
3708 reinit_completion(&adapter->init_done);
3709 send_cap_queries(adapter);
3710 if (!wait_for_completion_timeout(&adapter->init_done,
3712 dev_err(dev, "Passive init timeout\n");
3716 } while (adapter->renegotiate);
3717 rc = init_sub_crq_irqs(adapter);
3722 netdev->real_num_tx_queues = adapter->req_tx_queues;
3723 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3725 if (adapter->failover) {
3726 adapter->failover = false;
3728 rc = ibmvnic_open(netdev);
3730 goto restart_failed;
3732 netif_carrier_on(netdev);
3736 rc = register_netdev(netdev);
3739 "failed to register netdev rc=%d\n", rc);
3740 goto register_failed;
3742 dev_info(dev, "ibmvnic registered\n");
3747 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3749 release_sub_crqs(adapter);
3751 dev_err(dev, "Passive initialization was not successful\n");
3754 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3756 unsigned long timeout = msecs_to_jiffies(30000);
3757 struct ibmvnic_adapter *adapter;
3758 struct net_device *netdev;
3759 unsigned char *mac_addr_p;
3761 char buf[17]; /* debugfs name buf */
3764 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3767 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3768 VETH_MAC_ADDR, NULL);
3771 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3772 __FILE__, __LINE__);
3776 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3777 IBMVNIC_MAX_TX_QUEUES);
3781 adapter = netdev_priv(netdev);
3782 dev_set_drvdata(&dev->dev, netdev);
3783 adapter->vdev = dev;
3784 adapter->netdev = netdev;
3785 adapter->failover = false;
3787 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3788 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3789 netdev->irq = dev->irq;
3790 netdev->netdev_ops = &ibmvnic_netdev_ops;
3791 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3792 SET_NETDEV_DEV(netdev, &dev->dev);
3794 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3795 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3797 spin_lock_init(&adapter->stats_lock);
3799 rc = ibmvnic_init_crq_queue(adapter);
3801 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3805 INIT_LIST_HEAD(&adapter->errors);
3806 INIT_LIST_HEAD(&adapter->inflight);
3807 spin_lock_init(&adapter->error_list_lock);
3808 spin_lock_init(&adapter->inflight_lock);
3810 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3811 sizeof(struct ibmvnic_statistics),
3813 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3814 if (!firmware_has_feature(FW_FEATURE_CMO))
3815 dev_err(&dev->dev, "Couldn't map stats buffer\n");
3820 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3821 ent = debugfs_create_dir(buf, NULL);
3822 if (!ent || IS_ERR(ent)) {
3823 dev_info(&dev->dev, "debugfs create directory failed\n");
3824 adapter->debugfs_dir = NULL;
3826 adapter->debugfs_dir = ent;
3827 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3828 netdev, &ibmvnic_dump_ops);
3829 if (!ent || IS_ERR(ent)) {
3831 "debugfs create dump file failed\n");
3832 adapter->debugfs_dump = NULL;
3834 adapter->debugfs_dump = ent;
3838 init_completion(&adapter->init_done);
3839 ibmvnic_send_crq_init(adapter);
3840 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3844 if (adapter->renegotiate) {
3845 adapter->renegotiate = false;
3846 release_sub_crqs_no_irqs(adapter);
3848 reinit_completion(&adapter->init_done);
3849 send_cap_queries(adapter);
3850 if (!wait_for_completion_timeout(&adapter->init_done,
3854 } while (adapter->renegotiate);
3856 rc = init_sub_crq_irqs(adapter);
3858 dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3862 netdev->real_num_tx_queues = adapter->req_tx_queues;
3863 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3865 rc = register_netdev(netdev);
3867 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3870 dev_info(&dev->dev, "ibmvnic registered\n");
3875 release_sub_crqs(adapter);
3877 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3878 debugfs_remove_recursive(adapter->debugfs_dir);
3880 ibmvnic_release_crq_queue(adapter);
3882 free_netdev(netdev);
3886 static int ibmvnic_remove(struct vio_dev *dev)
3888 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3891 unregister_netdev(netdev);
3893 release_sub_crqs(adapter);
3895 ibmvnic_release_crq_queue(adapter);
3897 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3898 debugfs_remove_recursive(adapter->debugfs_dir);
3900 dma_unmap_single(&dev->dev, adapter->stats_token,
3901 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3903 if (adapter->ras_comps)
3904 dma_free_coherent(&dev->dev,
3905 adapter->ras_comp_num *
3906 sizeof(struct ibmvnic_fw_component),
3907 adapter->ras_comps, adapter->ras_comps_tok);
3909 kfree(adapter->ras_comp_int);
3911 free_netdev(netdev);
3912 dev_set_drvdata(&dev->dev, NULL);
3917 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3919 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3920 struct ibmvnic_adapter *adapter;
3921 struct iommu_table *tbl;
3922 unsigned long ret = 0;
3925 tbl = get_iommu_table_base(&vdev->dev);
3927 /* netdev inits at probe time along with the structures we need below*/
3929 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3931 adapter = netdev_priv(netdev);
3933 ret += PAGE_SIZE; /* the crq message queue */
3934 ret += adapter->bounce_buffer_size;
3935 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3937 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3938 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3940 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3942 ret += adapter->rx_pool[i].size *
3943 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3948 static int ibmvnic_resume(struct device *dev)
3950 struct net_device *netdev = dev_get_drvdata(dev);
3951 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3954 /* kick the interrupt handlers just in case we lost an interrupt */
3955 for (i = 0; i < adapter->req_rx_queues; i++)
3956 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3957 adapter->rx_scrq[i]);
3962 static struct vio_device_id ibmvnic_device_table[] = {
3963 {"network", "IBM,vnic"},
3966 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3968 static const struct dev_pm_ops ibmvnic_pm_ops = {
3969 .resume = ibmvnic_resume
3972 static struct vio_driver ibmvnic_driver = {
3973 .id_table = ibmvnic_device_table,
3974 .probe = ibmvnic_probe,
3975 .remove = ibmvnic_remove,
3976 .get_desired_dma = ibmvnic_get_desired_dma,
3977 .name = ibmvnic_driver_name,
3978 .pm = &ibmvnic_pm_ops,
3981 /* module functions */
3982 static int __init ibmvnic_module_init(void)
3984 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3985 IBMVNIC_DRIVER_VERSION);
3987 return vio_register_driver(&ibmvnic_driver);
3990 static void __exit ibmvnic_module_exit(void)
3992 vio_unregister_driver(&ibmvnic_driver);
3995 module_init(ibmvnic_module_init);
3996 module_exit(ibmvnic_module_exit);