1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
112 static void send_login(struct ibmvnic_adapter *adapter);
113 static void send_cap_queries(struct ibmvnic_adapter *adapter);
114 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115 static int ibmvnic_init(struct ibmvnic_adapter *);
116 static void release_crq_queue(struct ibmvnic_adapter *);
118 struct ibmvnic_stat {
119 char name[ETH_GSTRING_LEN];
123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124 offsetof(struct ibmvnic_statistics, stat))
125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
127 static const struct ibmvnic_stat ibmvnic_stats[] = {
128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
152 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
153 unsigned long length, unsigned long *number,
156 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
159 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
166 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
167 struct ibmvnic_long_term_buff *ltb, int size)
169 struct device *dev = &adapter->vdev->dev;
172 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
176 dev_err(dev, "Couldn't alloc long term buffer\n");
179 ltb->map_id = adapter->map_id;
182 init_completion(&adapter->fw_done);
183 send_request_map(adapter, ltb->addr,
184 ltb->size, ltb->map_id);
185 wait_for_completion(&adapter->fw_done);
189 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
190 struct ibmvnic_long_term_buff *ltb)
192 struct device *dev = &adapter->vdev->dev;
197 if (!adapter->failover)
198 send_request_unmap(adapter, ltb->map_id);
199 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
202 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
203 struct ibmvnic_rx_pool *pool)
205 int count = pool->size - atomic_read(&pool->available);
206 struct device *dev = &adapter->vdev->dev;
207 int buffers_added = 0;
208 unsigned long lpar_rc;
209 union sub_crq sub_crq;
219 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
220 be32_to_cpu(adapter->login_rsp_buf->
223 for (i = 0; i < count; ++i) {
224 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
226 dev_err(dev, "Couldn't replenish rx buff\n");
227 adapter->replenish_no_mem++;
231 index = pool->free_map[pool->next_free];
233 if (pool->rx_buff[index].skb)
234 dev_err(dev, "Inconsistent free_map!\n");
236 /* Copy the skb to the long term mapped DMA buffer */
237 offset = index * pool->buff_size;
238 dst = pool->long_term_buff.buff + offset;
239 memset(dst, 0, pool->buff_size);
240 dma_addr = pool->long_term_buff.addr + offset;
241 pool->rx_buff[index].data = dst;
243 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
244 pool->rx_buff[index].dma = dma_addr;
245 pool->rx_buff[index].skb = skb;
246 pool->rx_buff[index].pool_index = pool->index;
247 pool->rx_buff[index].size = pool->buff_size;
249 memset(&sub_crq, 0, sizeof(sub_crq));
250 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
251 sub_crq.rx_add.correlator =
252 cpu_to_be64((u64)&pool->rx_buff[index]);
253 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
254 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
256 /* The length field of the sCRQ is defined to be 24 bits so the
257 * buffer size needs to be left shifted by a byte before it is
258 * converted to big endian to prevent the last byte from being
261 #ifdef __LITTLE_ENDIAN__
264 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
266 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
268 if (lpar_rc != H_SUCCESS)
272 adapter->replenish_add_buff_success++;
273 pool->next_free = (pool->next_free + 1) % pool->size;
275 atomic_add(buffers_added, &pool->available);
279 dev_info(dev, "replenish pools failure\n");
280 pool->free_map[pool->next_free] = index;
281 pool->rx_buff[index].skb = NULL;
282 if (!dma_mapping_error(dev, dma_addr))
283 dma_unmap_single(dev, dma_addr, pool->buff_size,
286 dev_kfree_skb_any(skb);
287 adapter->replenish_add_buff_failure++;
288 atomic_add(buffers_added, &pool->available);
291 static void replenish_pools(struct ibmvnic_adapter *adapter)
295 if (adapter->migrated)
298 adapter->replenish_task_cycles++;
299 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
301 if (adapter->rx_pool[i].active)
302 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
306 static void release_stats_token(struct ibmvnic_adapter *adapter)
308 struct device *dev = &adapter->vdev->dev;
310 if (!adapter->stats_token)
313 dma_unmap_single(dev, adapter->stats_token,
314 sizeof(struct ibmvnic_statistics),
316 adapter->stats_token = 0;
319 static int init_stats_token(struct ibmvnic_adapter *adapter)
321 struct device *dev = &adapter->vdev->dev;
324 stok = dma_map_single(dev, &adapter->stats,
325 sizeof(struct ibmvnic_statistics),
327 if (dma_mapping_error(dev, stok)) {
328 dev_err(dev, "Couldn't map stats buffer\n");
332 adapter->stats_token = stok;
336 static void release_rx_pools(struct ibmvnic_adapter *adapter)
338 struct ibmvnic_rx_pool *rx_pool;
342 if (!adapter->rx_pool)
345 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
346 for (i = 0; i < rx_scrqs; i++) {
347 rx_pool = &adapter->rx_pool[i];
349 kfree(rx_pool->free_map);
350 free_long_term_buff(adapter, &rx_pool->long_term_buff);
352 if (!rx_pool->rx_buff)
355 for (j = 0; j < rx_pool->size; j++) {
356 if (rx_pool->rx_buff[j].skb) {
357 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
358 rx_pool->rx_buff[i].skb = NULL;
362 kfree(rx_pool->rx_buff);
365 kfree(adapter->rx_pool);
366 adapter->rx_pool = NULL;
369 static int init_rx_pools(struct net_device *netdev)
371 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
372 struct device *dev = &adapter->vdev->dev;
373 struct ibmvnic_rx_pool *rx_pool;
379 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
380 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
381 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
383 adapter->rx_pool = kcalloc(rxadd_subcrqs,
384 sizeof(struct ibmvnic_rx_pool),
386 if (!adapter->rx_pool) {
387 dev_err(dev, "Failed to allocate rx pools\n");
391 for (i = 0; i < rxadd_subcrqs; i++) {
392 rx_pool = &adapter->rx_pool[i];
394 netdev_dbg(adapter->netdev,
395 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
396 i, adapter->req_rx_add_entries_per_subcrq,
397 be64_to_cpu(size_array[i]));
399 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
401 rx_pool->buff_size = be64_to_cpu(size_array[i]);
404 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
406 if (!rx_pool->free_map) {
407 release_rx_pools(adapter);
411 rx_pool->rx_buff = kcalloc(rx_pool->size,
412 sizeof(struct ibmvnic_rx_buff),
414 if (!rx_pool->rx_buff) {
415 dev_err(dev, "Couldn't alloc rx buffers\n");
416 release_rx_pools(adapter);
420 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
421 rx_pool->size * rx_pool->buff_size)) {
422 release_rx_pools(adapter);
426 for (j = 0; j < rx_pool->size; ++j)
427 rx_pool->free_map[j] = j;
429 atomic_set(&rx_pool->available, 0);
430 rx_pool->next_alloc = 0;
431 rx_pool->next_free = 0;
437 static void release_tx_pools(struct ibmvnic_adapter *adapter)
439 struct ibmvnic_tx_pool *tx_pool;
442 if (!adapter->tx_pool)
445 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
446 for (i = 0; i < tx_scrqs; i++) {
447 tx_pool = &adapter->tx_pool[i];
448 kfree(tx_pool->tx_buff);
449 free_long_term_buff(adapter, &tx_pool->long_term_buff);
450 kfree(tx_pool->free_map);
453 kfree(adapter->tx_pool);
454 adapter->tx_pool = NULL;
457 static int init_tx_pools(struct net_device *netdev)
459 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
460 struct device *dev = &adapter->vdev->dev;
461 struct ibmvnic_tx_pool *tx_pool;
465 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
466 adapter->tx_pool = kcalloc(tx_subcrqs,
467 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
468 if (!adapter->tx_pool)
471 for (i = 0; i < tx_subcrqs; i++) {
472 tx_pool = &adapter->tx_pool[i];
473 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
474 sizeof(struct ibmvnic_tx_buff),
476 if (!tx_pool->tx_buff) {
477 dev_err(dev, "tx pool buffer allocation failed\n");
478 release_tx_pools(adapter);
482 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
483 adapter->req_tx_entries_per_subcrq *
485 release_tx_pools(adapter);
489 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
490 sizeof(int), GFP_KERNEL);
491 if (!tx_pool->free_map) {
492 release_tx_pools(adapter);
496 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
497 tx_pool->free_map[j] = j;
499 tx_pool->consumer_index = 0;
500 tx_pool->producer_index = 0;
506 static void release_error_buffers(struct ibmvnic_adapter *adapter)
508 struct device *dev = &adapter->vdev->dev;
509 struct ibmvnic_error_buff *error_buff, *tmp;
512 spin_lock_irqsave(&adapter->error_list_lock, flags);
513 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
514 list_del(&error_buff->list);
515 dma_unmap_single(dev, error_buff->dma, error_buff->len,
517 kfree(error_buff->buff);
520 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
523 static int ibmvnic_login(struct net_device *netdev)
525 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
526 unsigned long timeout = msecs_to_jiffies(30000);
527 struct device *dev = &adapter->vdev->dev;
530 if (adapter->renegotiate) {
531 adapter->renegotiate = false;
532 release_sub_crqs(adapter);
534 reinit_completion(&adapter->init_done);
535 send_cap_queries(adapter);
536 if (!wait_for_completion_timeout(&adapter->init_done,
538 dev_err(dev, "Capabilities query timeout\n");
543 reinit_completion(&adapter->init_done);
545 if (!wait_for_completion_timeout(&adapter->init_done,
547 dev_err(dev, "Login timeout\n");
550 } while (adapter->renegotiate);
555 static void release_resources(struct ibmvnic_adapter *adapter)
557 release_tx_pools(adapter);
558 release_rx_pools(adapter);
560 release_stats_token(adapter);
561 release_error_buffers(adapter);
564 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
566 struct net_device *netdev = adapter->netdev;
567 unsigned long timeout = msecs_to_jiffies(30000);
568 union ibmvnic_crq crq;
572 if (adapter->logical_link_state == link_state) {
573 netdev_dbg(netdev, "Link state already %d\n", link_state);
577 netdev_err(netdev, "setting link state %d\n", link_state);
578 memset(&crq, 0, sizeof(crq));
579 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
580 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
581 crq.logical_link_state.link_state = link_state;
586 reinit_completion(&adapter->init_done);
587 rc = ibmvnic_send_crq(adapter, &crq);
589 netdev_err(netdev, "Failed to set link state\n");
593 if (!wait_for_completion_timeout(&adapter->init_done,
595 netdev_err(netdev, "timeout setting link state\n");
599 if (adapter->init_done_rc == 1) {
600 /* Partuial success, delay and re-send */
609 static int set_real_num_queues(struct net_device *netdev)
611 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
614 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
616 netdev_err(netdev, "failed to set the number of tx queues\n");
620 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
622 netdev_err(netdev, "failed to set the number of rx queues\n");
627 static int init_resources(struct ibmvnic_adapter *adapter)
629 struct net_device *netdev = adapter->netdev;
632 rc = set_real_num_queues(netdev);
636 rc = init_sub_crq_irqs(adapter);
638 netdev_err(netdev, "failed to initialize sub crq irqs\n");
642 rc = init_stats_token(adapter);
647 adapter->napi = kcalloc(adapter->req_rx_queues,
648 sizeof(struct napi_struct), GFP_KERNEL);
652 for (i = 0; i < adapter->req_rx_queues; i++) {
653 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
657 send_map_query(adapter);
659 rc = init_rx_pools(netdev);
663 rc = init_tx_pools(netdev);
667 static int ibmvnic_open(struct net_device *netdev)
669 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
672 if (adapter->is_closed) {
673 rc = ibmvnic_init(adapter);
678 rc = ibmvnic_login(netdev);
682 rc = init_resources(adapter);
686 replenish_pools(adapter);
688 for (i = 0; i < adapter->req_rx_queues; i++)
689 napi_enable(&adapter->napi[i]);
691 /* We're ready to receive frames, enable the sub-crq interrupts and
692 * set the logical link state to up
694 for (i = 0; i < adapter->req_rx_queues; i++)
695 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
697 for (i = 0; i < adapter->req_tx_queues; i++)
698 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
700 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
702 for (i = 0; i < adapter->req_rx_queues; i++)
703 napi_disable(&adapter->napi[i]);
704 release_resources(adapter);
706 netif_tx_start_all_queues(netdev);
707 adapter->is_closed = false;
713 static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
717 if (adapter->tx_scrq) {
718 for (i = 0; i < adapter->req_tx_queues; i++)
719 if (adapter->tx_scrq[i])
720 disable_irq(adapter->tx_scrq[i]->irq);
723 if (adapter->rx_scrq) {
724 for (i = 0; i < adapter->req_rx_queues; i++)
725 if (adapter->rx_scrq[i])
726 disable_irq(adapter->rx_scrq[i]->irq);
730 static int ibmvnic_close(struct net_device *netdev)
732 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
736 adapter->closing = true;
737 disable_sub_crqs(adapter);
740 for (i = 0; i < adapter->req_rx_queues; i++)
741 napi_disable(&adapter->napi[i]);
744 if (!adapter->failover)
745 netif_tx_stop_all_queues(netdev);
747 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
749 release_resources(adapter);
751 adapter->is_closed = true;
752 adapter->closing = false;
757 * build_hdr_data - creates L2/L3/L4 header data buffer
758 * @hdr_field - bitfield determining needed headers
759 * @skb - socket buffer
760 * @hdr_len - array of header lengths
761 * @tot_len - total length of data
763 * Reads hdr_field to determine which headers are needed by firmware.
764 * Builds a buffer containing these headers. Saves individual header
765 * lengths and total buffer length to be used to build descriptors.
767 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
768 int *hdr_len, u8 *hdr_data)
773 hdr_len[0] = sizeof(struct ethhdr);
775 if (skb->protocol == htons(ETH_P_IP)) {
776 hdr_len[1] = ip_hdr(skb)->ihl * 4;
777 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
778 hdr_len[2] = tcp_hdrlen(skb);
779 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
780 hdr_len[2] = sizeof(struct udphdr);
781 } else if (skb->protocol == htons(ETH_P_IPV6)) {
782 hdr_len[1] = sizeof(struct ipv6hdr);
783 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
784 hdr_len[2] = tcp_hdrlen(skb);
785 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
786 hdr_len[2] = sizeof(struct udphdr);
789 memset(hdr_data, 0, 120);
790 if ((hdr_field >> 6) & 1) {
791 hdr = skb_mac_header(skb);
792 memcpy(hdr_data, hdr, hdr_len[0]);
796 if ((hdr_field >> 5) & 1) {
797 hdr = skb_network_header(skb);
798 memcpy(hdr_data + len, hdr, hdr_len[1]);
802 if ((hdr_field >> 4) & 1) {
803 hdr = skb_transport_header(skb);
804 memcpy(hdr_data + len, hdr, hdr_len[2]);
811 * create_hdr_descs - create header and header extension descriptors
812 * @hdr_field - bitfield determining needed headers
813 * @data - buffer containing header data
814 * @len - length of data buffer
815 * @hdr_len - array of individual header lengths
816 * @scrq_arr - descriptor array
818 * Creates header and, if needed, header extension descriptors and
819 * places them in a descriptor array, scrq_arr
822 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
823 union sub_crq *scrq_arr)
825 union sub_crq hdr_desc;
830 while (tmp_len > 0) {
831 cur = hdr_data + len - tmp_len;
833 memset(&hdr_desc, 0, sizeof(hdr_desc));
834 if (cur != hdr_data) {
835 data = hdr_desc.hdr_ext.data;
836 tmp = tmp_len > 29 ? 29 : tmp_len;
837 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
838 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
839 hdr_desc.hdr_ext.len = tmp;
841 data = hdr_desc.hdr.data;
842 tmp = tmp_len > 24 ? 24 : tmp_len;
843 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
844 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
845 hdr_desc.hdr.len = tmp;
846 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
847 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
848 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
849 hdr_desc.hdr.flag = hdr_field << 1;
851 memcpy(data, cur, tmp);
853 *scrq_arr = hdr_desc;
859 * build_hdr_descs_arr - build a header descriptor array
860 * @skb - socket buffer
861 * @num_entries - number of descriptors to be sent
862 * @subcrq - first TX descriptor
863 * @hdr_field - bit field determining which headers will be sent
865 * This function will build a TX descriptor array with applicable
866 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
869 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
870 int *num_entries, u8 hdr_field)
872 int hdr_len[3] = {0, 0, 0};
874 u8 *hdr_data = txbuff->hdr_data;
876 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
881 num_entries += len % 29 ? len / 29 + 1 : len / 29;
882 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
883 txbuff->indir_arr + 1);
886 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
888 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
889 int queue_num = skb_get_queue_mapping(skb);
890 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
891 struct device *dev = &adapter->vdev->dev;
892 struct ibmvnic_tx_buff *tx_buff = NULL;
893 struct ibmvnic_sub_crq_queue *tx_scrq;
894 struct ibmvnic_tx_pool *tx_pool;
895 unsigned int tx_send_failed = 0;
896 unsigned int tx_map_failed = 0;
897 unsigned int tx_dropped = 0;
898 unsigned int tx_packets = 0;
899 unsigned int tx_bytes = 0;
900 dma_addr_t data_dma_addr;
901 struct netdev_queue *txq;
902 unsigned long lpar_rc;
903 union sub_crq tx_crq;
911 tx_pool = &adapter->tx_pool[queue_num];
912 tx_scrq = adapter->tx_scrq[queue_num];
913 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
914 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
915 be32_to_cpu(adapter->login_rsp_buf->
916 off_txsubm_subcrqs));
917 if (adapter->migrated) {
918 if (!netif_subqueue_stopped(netdev, skb))
919 netif_stop_subqueue(netdev, queue_num);
920 dev_kfree_skb_any(skb);
928 index = tx_pool->free_map[tx_pool->consumer_index];
929 offset = index * adapter->req_mtu;
930 dst = tx_pool->long_term_buff.buff + offset;
931 memset(dst, 0, adapter->req_mtu);
932 skb_copy_from_linear_data(skb, dst, skb->len);
933 data_dma_addr = tx_pool->long_term_buff.addr + offset;
935 tx_pool->consumer_index =
936 (tx_pool->consumer_index + 1) %
937 adapter->req_tx_entries_per_subcrq;
939 tx_buff = &tx_pool->tx_buff[index];
941 tx_buff->data_dma[0] = data_dma_addr;
942 tx_buff->data_len[0] = skb->len;
943 tx_buff->index = index;
944 tx_buff->pool_index = queue_num;
945 tx_buff->last_frag = true;
947 memset(&tx_crq, 0, sizeof(tx_crq));
948 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
949 tx_crq.v1.type = IBMVNIC_TX_DESC;
950 tx_crq.v1.n_crq_elem = 1;
952 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
953 tx_crq.v1.correlator = cpu_to_be32(index);
954 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
955 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
956 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
958 if (adapter->vlan_header_insertion) {
959 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
960 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
963 if (skb->protocol == htons(ETH_P_IP)) {
964 if (ip_hdr(skb)->version == 4)
965 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
966 else if (ip_hdr(skb)->version == 6)
967 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
969 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
970 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
971 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
972 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
975 if (skb->ip_summed == CHECKSUM_PARTIAL) {
976 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
979 /* determine if l2/3/4 headers are sent to firmware */
980 if ((*hdrs >> 7) & 1 &&
981 (skb->protocol == htons(ETH_P_IP) ||
982 skb->protocol == htons(ETH_P_IPV6))) {
983 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
984 tx_crq.v1.n_crq_elem = num_entries;
985 tx_buff->indir_arr[0] = tx_crq;
986 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
987 sizeof(tx_buff->indir_arr),
989 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
990 dev_kfree_skb_any(skb);
992 if (!firmware_has_feature(FW_FEATURE_CMO))
993 dev_err(dev, "tx: unable to map descriptor array\n");
999 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1000 (u64)tx_buff->indir_dma,
1003 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1006 if (lpar_rc != H_SUCCESS) {
1007 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1009 if (tx_pool->consumer_index == 0)
1010 tx_pool->consumer_index =
1011 adapter->req_tx_entries_per_subcrq - 1;
1013 tx_pool->consumer_index--;
1015 dev_kfree_skb_any(skb);
1016 tx_buff->skb = NULL;
1018 if (lpar_rc == H_CLOSED)
1019 netif_stop_subqueue(netdev, queue_num);
1027 if (atomic_inc_return(&tx_scrq->used)
1028 >= adapter->req_tx_entries_per_subcrq) {
1029 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1030 netif_stop_subqueue(netdev, queue_num);
1034 tx_bytes += skb->len;
1035 txq->trans_start = jiffies;
1039 netdev->stats.tx_dropped += tx_dropped;
1040 netdev->stats.tx_bytes += tx_bytes;
1041 netdev->stats.tx_packets += tx_packets;
1042 adapter->tx_send_failed += tx_send_failed;
1043 adapter->tx_map_failed += tx_map_failed;
1048 static void ibmvnic_set_multi(struct net_device *netdev)
1050 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1051 struct netdev_hw_addr *ha;
1052 union ibmvnic_crq crq;
1054 memset(&crq, 0, sizeof(crq));
1055 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1056 crq.request_capability.cmd = REQUEST_CAPABILITY;
1058 if (netdev->flags & IFF_PROMISC) {
1059 if (!adapter->promisc_supported)
1062 if (netdev->flags & IFF_ALLMULTI) {
1063 /* Accept all multicast */
1064 memset(&crq, 0, sizeof(crq));
1065 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1066 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1067 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1068 ibmvnic_send_crq(adapter, &crq);
1069 } else if (netdev_mc_empty(netdev)) {
1070 /* Reject all multicast */
1071 memset(&crq, 0, sizeof(crq));
1072 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1073 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1074 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1075 ibmvnic_send_crq(adapter, &crq);
1077 /* Accept one or more multicast(s) */
1078 netdev_for_each_mc_addr(ha, netdev) {
1079 memset(&crq, 0, sizeof(crq));
1080 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1081 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1082 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1083 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1085 ibmvnic_send_crq(adapter, &crq);
1091 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1093 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1094 struct sockaddr *addr = p;
1095 union ibmvnic_crq crq;
1097 if (!is_valid_ether_addr(addr->sa_data))
1098 return -EADDRNOTAVAIL;
1100 memset(&crq, 0, sizeof(crq));
1101 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1102 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1103 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1104 ibmvnic_send_crq(adapter, &crq);
1105 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1109 static void ibmvnic_tx_timeout(struct net_device *dev)
1111 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1114 /* Adapter timed out, resetting it */
1115 release_sub_crqs(adapter);
1116 rc = ibmvnic_reset_crq(adapter);
1118 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
1120 ibmvnic_send_crq_init(adapter);
1123 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1124 struct ibmvnic_rx_buff *rx_buff)
1126 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1128 rx_buff->skb = NULL;
1130 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1131 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1133 atomic_dec(&pool->available);
1136 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1138 struct net_device *netdev = napi->dev;
1139 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1140 int scrq_num = (int)(napi - adapter->napi);
1141 int frames_processed = 0;
1143 while (frames_processed < budget) {
1144 struct sk_buff *skb;
1145 struct ibmvnic_rx_buff *rx_buff;
1146 union sub_crq *next;
1151 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1153 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1155 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1156 rx_comp.correlator);
1157 /* do error checking */
1158 if (next->rx_comp.rc) {
1159 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1160 /* free the entry */
1161 next->rx_comp.first = 0;
1162 remove_buff_from_pool(adapter, rx_buff);
1166 length = be32_to_cpu(next->rx_comp.len);
1167 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1168 flags = next->rx_comp.flags;
1170 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1173 /* VLAN Header has been stripped by the system firmware and
1174 * needs to be inserted by the driver
1176 if (adapter->rx_vlan_header_insertion &&
1177 (flags & IBMVNIC_VLAN_STRIPPED))
1178 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1179 ntohs(next->rx_comp.vlan_tci));
1181 /* free the entry */
1182 next->rx_comp.first = 0;
1183 remove_buff_from_pool(adapter, rx_buff);
1185 skb_put(skb, length);
1186 skb->protocol = eth_type_trans(skb, netdev);
1188 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1189 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1190 skb->ip_summed = CHECKSUM_UNNECESSARY;
1194 napi_gro_receive(napi, skb); /* send it up */
1195 netdev->stats.rx_packets++;
1196 netdev->stats.rx_bytes += length;
1199 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1201 if (frames_processed < budget) {
1202 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1203 napi_complete_done(napi, frames_processed);
1204 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1205 napi_reschedule(napi)) {
1206 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1210 return frames_processed;
1213 #ifdef CONFIG_NET_POLL_CONTROLLER
1214 static void ibmvnic_netpoll_controller(struct net_device *dev)
1216 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1219 replenish_pools(netdev_priv(dev));
1220 for (i = 0; i < adapter->req_rx_queues; i++)
1221 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1222 adapter->rx_scrq[i]);
1226 static const struct net_device_ops ibmvnic_netdev_ops = {
1227 .ndo_open = ibmvnic_open,
1228 .ndo_stop = ibmvnic_close,
1229 .ndo_start_xmit = ibmvnic_xmit,
1230 .ndo_set_rx_mode = ibmvnic_set_multi,
1231 .ndo_set_mac_address = ibmvnic_set_mac,
1232 .ndo_validate_addr = eth_validate_addr,
1233 .ndo_tx_timeout = ibmvnic_tx_timeout,
1234 #ifdef CONFIG_NET_POLL_CONTROLLER
1235 .ndo_poll_controller = ibmvnic_netpoll_controller,
1239 /* ethtool functions */
1241 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1242 struct ethtool_link_ksettings *cmd)
1244 u32 supported, advertising;
1246 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1248 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1250 cmd->base.speed = SPEED_1000;
1251 cmd->base.duplex = DUPLEX_FULL;
1252 cmd->base.port = PORT_FIBRE;
1253 cmd->base.phy_address = 0;
1254 cmd->base.autoneg = AUTONEG_ENABLE;
1256 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1258 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1264 static void ibmvnic_get_drvinfo(struct net_device *dev,
1265 struct ethtool_drvinfo *info)
1267 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1268 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1271 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1273 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1275 return adapter->msg_enable;
1278 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1280 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1282 adapter->msg_enable = data;
1285 static u32 ibmvnic_get_link(struct net_device *netdev)
1287 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1289 /* Don't need to send a query because we request a logical link up at
1290 * init and then we wait for link state indications
1292 return adapter->logical_link_state;
1295 static void ibmvnic_get_ringparam(struct net_device *netdev,
1296 struct ethtool_ringparam *ring)
1298 ring->rx_max_pending = 0;
1299 ring->tx_max_pending = 0;
1300 ring->rx_mini_max_pending = 0;
1301 ring->rx_jumbo_max_pending = 0;
1302 ring->rx_pending = 0;
1303 ring->tx_pending = 0;
1304 ring->rx_mini_pending = 0;
1305 ring->rx_jumbo_pending = 0;
1308 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1312 if (stringset != ETH_SS_STATS)
1315 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1316 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1319 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1323 return ARRAY_SIZE(ibmvnic_stats);
1329 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1330 struct ethtool_stats *stats, u64 *data)
1332 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1333 union ibmvnic_crq crq;
1336 memset(&crq, 0, sizeof(crq));
1337 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1338 crq.request_statistics.cmd = REQUEST_STATISTICS;
1339 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1340 crq.request_statistics.len =
1341 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1343 /* Wait for data to be written */
1344 init_completion(&adapter->stats_done);
1345 ibmvnic_send_crq(adapter, &crq);
1346 wait_for_completion(&adapter->stats_done);
1348 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1349 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1352 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1353 .get_drvinfo = ibmvnic_get_drvinfo,
1354 .get_msglevel = ibmvnic_get_msglevel,
1355 .set_msglevel = ibmvnic_set_msglevel,
1356 .get_link = ibmvnic_get_link,
1357 .get_ringparam = ibmvnic_get_ringparam,
1358 .get_strings = ibmvnic_get_strings,
1359 .get_sset_count = ibmvnic_get_sset_count,
1360 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1361 .get_link_ksettings = ibmvnic_get_link_ksettings,
1364 /* Routines for managing CRQs/sCRQs */
1366 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1367 struct ibmvnic_sub_crq_queue *scrq)
1369 struct device *dev = &adapter->vdev->dev;
1372 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1374 /* Close the sub-crqs */
1376 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1377 adapter->vdev->unit_address,
1379 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1382 netdev_err(adapter->netdev,
1383 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1387 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1389 free_pages((unsigned long)scrq->msgs, 2);
1393 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1396 struct device *dev = &adapter->vdev->dev;
1397 struct ibmvnic_sub_crq_queue *scrq;
1400 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
1405 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
1407 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1408 goto zero_page_failed;
1411 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1413 if (dma_mapping_error(dev, scrq->msg_token)) {
1414 dev_warn(dev, "Couldn't map crq queue messages page\n");
1418 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1419 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1421 if (rc == H_RESOURCE)
1422 rc = ibmvnic_reset_crq(adapter);
1424 if (rc == H_CLOSED) {
1425 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1427 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1431 scrq->adapter = adapter;
1432 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1433 spin_lock_init(&scrq->lock);
1435 netdev_dbg(adapter->netdev,
1436 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1437 scrq->crq_num, scrq->hw_irq, scrq->irq);
1442 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1445 free_pages((unsigned long)scrq->msgs, 2);
1452 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1456 if (adapter->tx_scrq) {
1457 for (i = 0; i < adapter->req_tx_queues; i++) {
1458 if (!adapter->tx_scrq[i])
1461 if (adapter->tx_scrq[i]->irq) {
1462 free_irq(adapter->tx_scrq[i]->irq,
1463 adapter->tx_scrq[i]);
1464 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1465 adapter->tx_scrq[i]->irq = 0;
1468 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1471 kfree(adapter->tx_scrq);
1472 adapter->tx_scrq = NULL;
1475 if (adapter->rx_scrq) {
1476 for (i = 0; i < adapter->req_rx_queues; i++) {
1477 if (!adapter->rx_scrq[i])
1480 if (adapter->rx_scrq[i]->irq) {
1481 free_irq(adapter->rx_scrq[i]->irq,
1482 adapter->rx_scrq[i]);
1483 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1484 adapter->rx_scrq[i]->irq = 0;
1487 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1490 kfree(adapter->rx_scrq);
1491 adapter->rx_scrq = NULL;
1495 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1496 struct ibmvnic_sub_crq_queue *scrq)
1498 struct device *dev = &adapter->vdev->dev;
1501 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1502 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1504 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1509 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1510 struct ibmvnic_sub_crq_queue *scrq)
1512 struct device *dev = &adapter->vdev->dev;
1515 if (scrq->hw_irq > 0x100000000ULL) {
1516 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1520 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1521 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1523 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1528 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1529 struct ibmvnic_sub_crq_queue *scrq)
1531 struct device *dev = &adapter->vdev->dev;
1532 struct ibmvnic_tx_buff *txbuff;
1533 union sub_crq *next;
1539 while (pending_scrq(adapter, scrq)) {
1540 unsigned int pool = scrq->pool_index;
1542 next = ibmvnic_next_scrq(adapter, scrq);
1543 for (i = 0; i < next->tx_comp.num_comps; i++) {
1544 if (next->tx_comp.rcs[i]) {
1545 dev_err(dev, "tx error %x\n",
1546 next->tx_comp.rcs[i]);
1549 index = be32_to_cpu(next->tx_comp.correlators[i]);
1550 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1552 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1553 if (!txbuff->data_dma[j])
1556 txbuff->data_dma[j] = 0;
1558 /* if sub_crq was sent indirectly */
1559 first = txbuff->indir_arr[0].generic.first;
1560 if (first == IBMVNIC_CRQ_CMD) {
1561 dma_unmap_single(dev, txbuff->indir_dma,
1562 sizeof(txbuff->indir_arr),
1566 if (txbuff->last_frag) {
1567 if (atomic_sub_return(next->tx_comp.num_comps,
1569 (adapter->req_tx_entries_per_subcrq / 2) &&
1570 netif_subqueue_stopped(adapter->netdev,
1572 netif_wake_subqueue(adapter->netdev,
1574 netdev_dbg(adapter->netdev,
1575 "Started queue %d\n",
1579 dev_kfree_skb_any(txbuff->skb);
1582 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1583 producer_index] = index;
1584 adapter->tx_pool[pool].producer_index =
1585 (adapter->tx_pool[pool].producer_index + 1) %
1586 adapter->req_tx_entries_per_subcrq;
1588 /* remove tx_comp scrq*/
1589 next->tx_comp.first = 0;
1592 enable_scrq_irq(adapter, scrq);
1594 if (pending_scrq(adapter, scrq)) {
1595 disable_scrq_irq(adapter, scrq);
1602 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1604 struct ibmvnic_sub_crq_queue *scrq = instance;
1605 struct ibmvnic_adapter *adapter = scrq->adapter;
1607 disable_scrq_irq(adapter, scrq);
1608 ibmvnic_complete_tx(adapter, scrq);
1613 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1615 struct ibmvnic_sub_crq_queue *scrq = instance;
1616 struct ibmvnic_adapter *adapter = scrq->adapter;
1618 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1619 disable_scrq_irq(adapter, scrq);
1620 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1626 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1628 struct device *dev = &adapter->vdev->dev;
1629 struct ibmvnic_sub_crq_queue *scrq;
1633 for (i = 0; i < adapter->req_tx_queues; i++) {
1634 scrq = adapter->tx_scrq[i];
1635 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1639 dev_err(dev, "Error mapping irq\n");
1640 goto req_tx_irq_failed;
1643 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1644 0, "ibmvnic_tx", scrq);
1647 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1649 irq_dispose_mapping(scrq->irq);
1650 goto req_rx_irq_failed;
1654 for (i = 0; i < adapter->req_rx_queues; i++) {
1655 scrq = adapter->rx_scrq[i];
1656 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1659 dev_err(dev, "Error mapping irq\n");
1660 goto req_rx_irq_failed;
1662 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1663 0, "ibmvnic_rx", scrq);
1665 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1667 irq_dispose_mapping(scrq->irq);
1668 goto req_rx_irq_failed;
1674 for (j = 0; j < i; j++) {
1675 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1676 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1678 i = adapter->req_tx_queues;
1680 for (j = 0; j < i; j++) {
1681 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1682 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1684 release_sub_crqs(adapter);
1688 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
1690 struct device *dev = &adapter->vdev->dev;
1691 struct ibmvnic_sub_crq_queue **allqueues;
1692 int registered_queues = 0;
1697 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1699 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
1703 for (i = 0; i < total_queues; i++) {
1704 allqueues[i] = init_sub_crq_queue(adapter);
1705 if (!allqueues[i]) {
1706 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1709 registered_queues++;
1712 /* Make sure we were able to register the minimum number of queues */
1713 if (registered_queues <
1714 adapter->min_tx_queues + adapter->min_rx_queues) {
1715 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1719 /* Distribute the failed allocated queues*/
1720 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1721 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1724 if (adapter->req_rx_queues > adapter->min_rx_queues)
1725 adapter->req_rx_queues--;
1730 if (adapter->req_tx_queues > adapter->min_tx_queues)
1731 adapter->req_tx_queues--;
1738 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1739 sizeof(*adapter->tx_scrq), GFP_KERNEL);
1740 if (!adapter->tx_scrq)
1743 for (i = 0; i < adapter->req_tx_queues; i++) {
1744 adapter->tx_scrq[i] = allqueues[i];
1745 adapter->tx_scrq[i]->pool_index = i;
1748 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1749 sizeof(*adapter->rx_scrq), GFP_KERNEL);
1750 if (!adapter->rx_scrq)
1753 for (i = 0; i < adapter->req_rx_queues; i++) {
1754 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1755 adapter->rx_scrq[i]->scrq_num = i;
1762 kfree(adapter->tx_scrq);
1763 adapter->tx_scrq = NULL;
1765 for (i = 0; i < registered_queues; i++)
1766 release_sub_crq_queue(adapter, allqueues[i]);
1771 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
1773 struct device *dev = &adapter->vdev->dev;
1774 union ibmvnic_crq crq;
1777 /* Sub-CRQ entries are 32 byte long */
1778 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1780 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1781 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1782 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1786 /* Get the minimum between the queried max and the entries
1787 * that fit in our PAGE_SIZE
1789 adapter->req_tx_entries_per_subcrq =
1790 adapter->max_tx_entries_per_subcrq > entries_page ?
1791 entries_page : adapter->max_tx_entries_per_subcrq;
1792 adapter->req_rx_add_entries_per_subcrq =
1793 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1794 entries_page : adapter->max_rx_add_entries_per_subcrq;
1796 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1797 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1798 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1800 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1803 memset(&crq, 0, sizeof(crq));
1804 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1805 crq.request_capability.cmd = REQUEST_CAPABILITY;
1807 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1808 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1809 atomic_inc(&adapter->running_cap_crqs);
1810 ibmvnic_send_crq(adapter, &crq);
1812 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1813 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1814 atomic_inc(&adapter->running_cap_crqs);
1815 ibmvnic_send_crq(adapter, &crq);
1817 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1818 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1819 atomic_inc(&adapter->running_cap_crqs);
1820 ibmvnic_send_crq(adapter, &crq);
1822 crq.request_capability.capability =
1823 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1824 crq.request_capability.number =
1825 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1826 atomic_inc(&adapter->running_cap_crqs);
1827 ibmvnic_send_crq(adapter, &crq);
1829 crq.request_capability.capability =
1830 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1831 crq.request_capability.number =
1832 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1833 atomic_inc(&adapter->running_cap_crqs);
1834 ibmvnic_send_crq(adapter, &crq);
1836 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1837 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1838 atomic_inc(&adapter->running_cap_crqs);
1839 ibmvnic_send_crq(adapter, &crq);
1841 if (adapter->netdev->flags & IFF_PROMISC) {
1842 if (adapter->promisc_supported) {
1843 crq.request_capability.capability =
1844 cpu_to_be16(PROMISC_REQUESTED);
1845 crq.request_capability.number = cpu_to_be64(1);
1846 atomic_inc(&adapter->running_cap_crqs);
1847 ibmvnic_send_crq(adapter, &crq);
1850 crq.request_capability.capability =
1851 cpu_to_be16(PROMISC_REQUESTED);
1852 crq.request_capability.number = cpu_to_be64(0);
1853 atomic_inc(&adapter->running_cap_crqs);
1854 ibmvnic_send_crq(adapter, &crq);
1858 static int pending_scrq(struct ibmvnic_adapter *adapter,
1859 struct ibmvnic_sub_crq_queue *scrq)
1861 union sub_crq *entry = &scrq->msgs[scrq->cur];
1863 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1869 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1870 struct ibmvnic_sub_crq_queue *scrq)
1872 union sub_crq *entry;
1873 unsigned long flags;
1875 spin_lock_irqsave(&scrq->lock, flags);
1876 entry = &scrq->msgs[scrq->cur];
1877 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1878 if (++scrq->cur == scrq->size)
1883 spin_unlock_irqrestore(&scrq->lock, flags);
1888 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1890 struct ibmvnic_crq_queue *queue = &adapter->crq;
1891 union ibmvnic_crq *crq;
1893 crq = &queue->msgs[queue->cur];
1894 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1895 if (++queue->cur == queue->size)
1904 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1905 union sub_crq *sub_crq)
1907 unsigned int ua = adapter->vdev->unit_address;
1908 struct device *dev = &adapter->vdev->dev;
1909 u64 *u64_crq = (u64 *)sub_crq;
1912 netdev_dbg(adapter->netdev,
1913 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1914 (unsigned long int)cpu_to_be64(remote_handle),
1915 (unsigned long int)cpu_to_be64(u64_crq[0]),
1916 (unsigned long int)cpu_to_be64(u64_crq[1]),
1917 (unsigned long int)cpu_to_be64(u64_crq[2]),
1918 (unsigned long int)cpu_to_be64(u64_crq[3]));
1920 /* Make sure the hypervisor sees the complete request */
1923 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1924 cpu_to_be64(remote_handle),
1925 cpu_to_be64(u64_crq[0]),
1926 cpu_to_be64(u64_crq[1]),
1927 cpu_to_be64(u64_crq[2]),
1928 cpu_to_be64(u64_crq[3]));
1932 dev_warn(dev, "CRQ Queue closed\n");
1933 dev_err(dev, "Send error (rc=%d)\n", rc);
1939 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1940 u64 remote_handle, u64 ioba, u64 num_entries)
1942 unsigned int ua = adapter->vdev->unit_address;
1943 struct device *dev = &adapter->vdev->dev;
1946 /* Make sure the hypervisor sees the complete request */
1948 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1949 cpu_to_be64(remote_handle),
1954 dev_warn(dev, "CRQ Queue closed\n");
1955 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1961 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1962 union ibmvnic_crq *crq)
1964 unsigned int ua = adapter->vdev->unit_address;
1965 struct device *dev = &adapter->vdev->dev;
1966 u64 *u64_crq = (u64 *)crq;
1969 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1970 (unsigned long int)cpu_to_be64(u64_crq[0]),
1971 (unsigned long int)cpu_to_be64(u64_crq[1]));
1973 /* Make sure the hypervisor sees the complete request */
1976 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1977 cpu_to_be64(u64_crq[0]),
1978 cpu_to_be64(u64_crq[1]));
1982 dev_warn(dev, "CRQ Queue closed\n");
1983 dev_warn(dev, "Send error (rc=%d)\n", rc);
1989 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1991 union ibmvnic_crq crq;
1993 memset(&crq, 0, sizeof(crq));
1994 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1995 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1996 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1998 return ibmvnic_send_crq(adapter, &crq);
2001 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
2003 union ibmvnic_crq crq;
2005 memset(&crq, 0, sizeof(crq));
2006 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
2007 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
2008 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
2010 return ibmvnic_send_crq(adapter, &crq);
2013 static int send_version_xchg(struct ibmvnic_adapter *adapter)
2015 union ibmvnic_crq crq;
2017 memset(&crq, 0, sizeof(crq));
2018 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
2019 crq.version_exchange.cmd = VERSION_EXCHANGE;
2020 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
2022 return ibmvnic_send_crq(adapter, &crq);
2025 static void send_login(struct ibmvnic_adapter *adapter)
2027 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
2028 struct ibmvnic_login_buffer *login_buffer;
2029 struct device *dev = &adapter->vdev->dev;
2030 dma_addr_t rsp_buffer_token;
2031 dma_addr_t buffer_token;
2032 size_t rsp_buffer_size;
2033 union ibmvnic_crq crq;
2040 sizeof(struct ibmvnic_login_buffer) +
2041 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
2043 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
2045 goto buf_alloc_failed;
2047 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
2049 if (dma_mapping_error(dev, buffer_token)) {
2050 dev_err(dev, "Couldn't map login buffer\n");
2051 goto buf_map_failed;
2054 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
2055 sizeof(u64) * adapter->req_tx_queues +
2056 sizeof(u64) * adapter->req_rx_queues +
2057 sizeof(u64) * adapter->req_rx_queues +
2058 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
2060 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2061 if (!login_rsp_buffer)
2062 goto buf_rsp_alloc_failed;
2064 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2065 rsp_buffer_size, DMA_FROM_DEVICE);
2066 if (dma_mapping_error(dev, rsp_buffer_token)) {
2067 dev_err(dev, "Couldn't map login rsp buffer\n");
2068 goto buf_rsp_map_failed;
2071 adapter->login_buf = login_buffer;
2072 adapter->login_buf_token = buffer_token;
2073 adapter->login_buf_sz = buffer_size;
2074 adapter->login_rsp_buf = login_rsp_buffer;
2075 adapter->login_rsp_buf_token = rsp_buffer_token;
2076 adapter->login_rsp_buf_sz = rsp_buffer_size;
2078 login_buffer->len = cpu_to_be32(buffer_size);
2079 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2080 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2081 login_buffer->off_txcomp_subcrqs =
2082 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2083 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2084 login_buffer->off_rxcomp_subcrqs =
2085 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2086 sizeof(u64) * adapter->req_tx_queues);
2087 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2088 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2090 tx_list_p = (__be64 *)((char *)login_buffer +
2091 sizeof(struct ibmvnic_login_buffer));
2092 rx_list_p = (__be64 *)((char *)login_buffer +
2093 sizeof(struct ibmvnic_login_buffer) +
2094 sizeof(u64) * adapter->req_tx_queues);
2096 for (i = 0; i < adapter->req_tx_queues; i++) {
2097 if (adapter->tx_scrq[i]) {
2098 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2103 for (i = 0; i < adapter->req_rx_queues; i++) {
2104 if (adapter->rx_scrq[i]) {
2105 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2110 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2111 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2112 netdev_dbg(adapter->netdev, "%016lx\n",
2113 ((unsigned long int *)(adapter->login_buf))[i]);
2116 memset(&crq, 0, sizeof(crq));
2117 crq.login.first = IBMVNIC_CRQ_CMD;
2118 crq.login.cmd = LOGIN;
2119 crq.login.ioba = cpu_to_be32(buffer_token);
2120 crq.login.len = cpu_to_be32(buffer_size);
2121 ibmvnic_send_crq(adapter, &crq);
2126 kfree(login_rsp_buffer);
2127 buf_rsp_alloc_failed:
2128 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2130 kfree(login_buffer);
2135 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2138 union ibmvnic_crq crq;
2140 memset(&crq, 0, sizeof(crq));
2141 crq.request_map.first = IBMVNIC_CRQ_CMD;
2142 crq.request_map.cmd = REQUEST_MAP;
2143 crq.request_map.map_id = map_id;
2144 crq.request_map.ioba = cpu_to_be32(addr);
2145 crq.request_map.len = cpu_to_be32(len);
2146 ibmvnic_send_crq(adapter, &crq);
2149 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2151 union ibmvnic_crq crq;
2153 memset(&crq, 0, sizeof(crq));
2154 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2155 crq.request_unmap.cmd = REQUEST_UNMAP;
2156 crq.request_unmap.map_id = map_id;
2157 ibmvnic_send_crq(adapter, &crq);
2160 static void send_map_query(struct ibmvnic_adapter *adapter)
2162 union ibmvnic_crq crq;
2164 memset(&crq, 0, sizeof(crq));
2165 crq.query_map.first = IBMVNIC_CRQ_CMD;
2166 crq.query_map.cmd = QUERY_MAP;
2167 ibmvnic_send_crq(adapter, &crq);
2170 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2171 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2173 union ibmvnic_crq crq;
2175 atomic_set(&adapter->running_cap_crqs, 0);
2176 memset(&crq, 0, sizeof(crq));
2177 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2178 crq.query_capability.cmd = QUERY_CAPABILITY;
2180 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2181 atomic_inc(&adapter->running_cap_crqs);
2182 ibmvnic_send_crq(adapter, &crq);
2184 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2185 atomic_inc(&adapter->running_cap_crqs);
2186 ibmvnic_send_crq(adapter, &crq);
2188 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2189 atomic_inc(&adapter->running_cap_crqs);
2190 ibmvnic_send_crq(adapter, &crq);
2192 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2193 atomic_inc(&adapter->running_cap_crqs);
2194 ibmvnic_send_crq(adapter, &crq);
2196 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2197 atomic_inc(&adapter->running_cap_crqs);
2198 ibmvnic_send_crq(adapter, &crq);
2200 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2201 atomic_inc(&adapter->running_cap_crqs);
2202 ibmvnic_send_crq(adapter, &crq);
2204 crq.query_capability.capability =
2205 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2206 atomic_inc(&adapter->running_cap_crqs);
2207 ibmvnic_send_crq(adapter, &crq);
2209 crq.query_capability.capability =
2210 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2211 atomic_inc(&adapter->running_cap_crqs);
2212 ibmvnic_send_crq(adapter, &crq);
2214 crq.query_capability.capability =
2215 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2216 atomic_inc(&adapter->running_cap_crqs);
2217 ibmvnic_send_crq(adapter, &crq);
2219 crq.query_capability.capability =
2220 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2221 atomic_inc(&adapter->running_cap_crqs);
2222 ibmvnic_send_crq(adapter, &crq);
2224 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2225 atomic_inc(&adapter->running_cap_crqs);
2226 ibmvnic_send_crq(adapter, &crq);
2228 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2229 atomic_inc(&adapter->running_cap_crqs);
2230 ibmvnic_send_crq(adapter, &crq);
2232 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2233 atomic_inc(&adapter->running_cap_crqs);
2234 ibmvnic_send_crq(adapter, &crq);
2236 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2237 atomic_inc(&adapter->running_cap_crqs);
2238 ibmvnic_send_crq(adapter, &crq);
2240 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2241 atomic_inc(&adapter->running_cap_crqs);
2242 ibmvnic_send_crq(adapter, &crq);
2244 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2245 atomic_inc(&adapter->running_cap_crqs);
2246 ibmvnic_send_crq(adapter, &crq);
2248 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
2249 atomic_inc(&adapter->running_cap_crqs);
2250 ibmvnic_send_crq(adapter, &crq);
2252 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2253 atomic_inc(&adapter->running_cap_crqs);
2254 ibmvnic_send_crq(adapter, &crq);
2256 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2257 atomic_inc(&adapter->running_cap_crqs);
2258 ibmvnic_send_crq(adapter, &crq);
2260 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2261 atomic_inc(&adapter->running_cap_crqs);
2262 ibmvnic_send_crq(adapter, &crq);
2264 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2265 atomic_inc(&adapter->running_cap_crqs);
2266 ibmvnic_send_crq(adapter, &crq);
2268 crq.query_capability.capability =
2269 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2270 atomic_inc(&adapter->running_cap_crqs);
2271 ibmvnic_send_crq(adapter, &crq);
2273 crq.query_capability.capability =
2274 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2275 atomic_inc(&adapter->running_cap_crqs);
2276 ibmvnic_send_crq(adapter, &crq);
2278 crq.query_capability.capability =
2279 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2280 atomic_inc(&adapter->running_cap_crqs);
2281 ibmvnic_send_crq(adapter, &crq);
2283 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2284 atomic_inc(&adapter->running_cap_crqs);
2285 ibmvnic_send_crq(adapter, &crq);
2288 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2290 struct device *dev = &adapter->vdev->dev;
2291 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2292 union ibmvnic_crq crq;
2295 dma_unmap_single(dev, adapter->ip_offload_tok,
2296 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2298 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2299 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2300 netdev_dbg(adapter->netdev, "%016lx\n",
2301 ((unsigned long int *)(buf))[i]);
2303 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2304 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2305 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2306 buf->tcp_ipv4_chksum);
2307 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2308 buf->tcp_ipv6_chksum);
2309 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2310 buf->udp_ipv4_chksum);
2311 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2312 buf->udp_ipv6_chksum);
2313 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2314 buf->large_tx_ipv4);
2315 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2316 buf->large_tx_ipv6);
2317 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2318 buf->large_rx_ipv4);
2319 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2320 buf->large_rx_ipv6);
2321 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2322 buf->max_ipv4_header_size);
2323 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2324 buf->max_ipv6_header_size);
2325 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2326 buf->max_tcp_header_size);
2327 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2328 buf->max_udp_header_size);
2329 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2330 buf->max_large_tx_size);
2331 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2332 buf->max_large_rx_size);
2333 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2334 buf->ipv6_extension_header);
2335 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2336 buf->tcp_pseudosum_req);
2337 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2338 buf->num_ipv6_ext_headers);
2339 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2340 buf->off_ipv6_ext_headers);
2342 adapter->ip_offload_ctrl_tok =
2343 dma_map_single(dev, &adapter->ip_offload_ctrl,
2344 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2346 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2347 dev_err(dev, "Couldn't map ip offload control buffer\n");
2351 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2352 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2353 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2354 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2355 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2357 /* large_tx/rx disabled for now, additional features needed */
2358 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2359 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2360 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2361 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2363 adapter->netdev->features = NETIF_F_GSO;
2365 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2366 adapter->netdev->features |= NETIF_F_IP_CSUM;
2368 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2369 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2371 if ((adapter->netdev->features &
2372 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2373 adapter->netdev->features |= NETIF_F_RXCSUM;
2375 memset(&crq, 0, sizeof(crq));
2376 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2377 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2378 crq.control_ip_offload.len =
2379 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2380 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2381 ibmvnic_send_crq(adapter, &crq);
2384 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2385 struct ibmvnic_adapter *adapter)
2387 struct device *dev = &adapter->vdev->dev;
2388 struct ibmvnic_error_buff *error_buff, *tmp;
2389 unsigned long flags;
2393 if (!crq->request_error_rsp.rc.code) {
2394 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2395 crq->request_error_rsp.rc.code);
2399 spin_lock_irqsave(&adapter->error_list_lock, flags);
2400 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2401 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2403 list_del(&error_buff->list);
2406 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2409 dev_err(dev, "Couldn't find error id %x\n",
2410 be32_to_cpu(crq->request_error_rsp.error_id));
2414 dev_err(dev, "Detailed info for error id %x:",
2415 be32_to_cpu(crq->request_error_rsp.error_id));
2417 for (i = 0; i < error_buff->len; i++) {
2418 pr_cont("%02x", (int)error_buff->buff[i]);
2424 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2426 kfree(error_buff->buff);
2430 static void request_error_information(struct ibmvnic_adapter *adapter,
2431 union ibmvnic_crq *err_crq)
2433 struct device *dev = &adapter->vdev->dev;
2434 struct net_device *netdev = adapter->netdev;
2435 struct ibmvnic_error_buff *error_buff;
2436 unsigned long timeout = msecs_to_jiffies(30000);
2437 union ibmvnic_crq crq;
2438 unsigned long flags;
2441 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2445 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
2446 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2447 if (!error_buff->buff) {
2452 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2454 if (dma_mapping_error(dev, error_buff->dma)) {
2455 netdev_err(netdev, "Couldn't map error buffer\n");
2456 kfree(error_buff->buff);
2461 error_buff->len = detail_len;
2462 error_buff->error_id = err_crq->error_indication.error_id;
2464 spin_lock_irqsave(&adapter->error_list_lock, flags);
2465 list_add_tail(&error_buff->list, &adapter->errors);
2466 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2468 memset(&crq, 0, sizeof(crq));
2469 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2470 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2471 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2472 crq.request_error_info.len = cpu_to_be32(detail_len);
2473 crq.request_error_info.error_id = err_crq->error_indication.error_id;
2475 rc = ibmvnic_send_crq(adapter, &crq);
2477 netdev_err(netdev, "failed to request error information\n");
2481 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2482 netdev_err(netdev, "timeout waiting for error information\n");
2489 spin_lock_irqsave(&adapter->error_list_lock, flags);
2490 list_del(&error_buff->list);
2491 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2493 kfree(error_buff->buff);
2497 static void handle_error_indication(union ibmvnic_crq *crq,
2498 struct ibmvnic_adapter *adapter)
2500 struct device *dev = &adapter->vdev->dev;
2502 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2503 crq->error_indication.flags
2504 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2505 be32_to_cpu(crq->error_indication.error_id),
2506 be16_to_cpu(crq->error_indication.error_cause));
2508 if (be32_to_cpu(crq->error_indication.error_id))
2509 request_error_information(adapter, crq);
2512 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2513 struct ibmvnic_adapter *adapter)
2515 struct net_device *netdev = adapter->netdev;
2516 struct device *dev = &adapter->vdev->dev;
2519 rc = crq->change_mac_addr_rsp.rc.code;
2521 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2524 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2528 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2529 struct ibmvnic_adapter *adapter)
2531 struct device *dev = &adapter->vdev->dev;
2535 atomic_dec(&adapter->running_cap_crqs);
2536 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2538 req_value = &adapter->req_tx_queues;
2542 req_value = &adapter->req_rx_queues;
2545 case REQ_RX_ADD_QUEUES:
2546 req_value = &adapter->req_rx_add_queues;
2549 case REQ_TX_ENTRIES_PER_SUBCRQ:
2550 req_value = &adapter->req_tx_entries_per_subcrq;
2551 name = "tx_entries_per_subcrq";
2553 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2554 req_value = &adapter->req_rx_add_entries_per_subcrq;
2555 name = "rx_add_entries_per_subcrq";
2558 req_value = &adapter->req_mtu;
2561 case PROMISC_REQUESTED:
2562 req_value = &adapter->promisc;
2566 dev_err(dev, "Got invalid cap request rsp %d\n",
2567 crq->request_capability.capability);
2571 switch (crq->request_capability_rsp.rc.code) {
2574 case PARTIALSUCCESS:
2575 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2577 (long int)be64_to_cpu(crq->request_capability_rsp.
2579 release_sub_crqs(adapter);
2580 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2581 ibmvnic_send_req_caps(adapter, 1);
2584 dev_err(dev, "Error %d in request cap rsp\n",
2585 crq->request_capability_rsp.rc.code);
2589 /* Done receiving requested capabilities, query IP offload support */
2590 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2591 union ibmvnic_crq newcrq;
2592 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2593 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2594 &adapter->ip_offload_buf;
2596 adapter->wait_capability = false;
2597 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2601 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2602 if (!firmware_has_feature(FW_FEATURE_CMO))
2603 dev_err(dev, "Couldn't map offload buffer\n");
2607 memset(&newcrq, 0, sizeof(newcrq));
2608 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2609 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2610 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2611 newcrq.query_ip_offload.ioba =
2612 cpu_to_be32(adapter->ip_offload_tok);
2614 ibmvnic_send_crq(adapter, &newcrq);
2618 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2619 struct ibmvnic_adapter *adapter)
2621 struct device *dev = &adapter->vdev->dev;
2622 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2623 struct ibmvnic_login_buffer *login = adapter->login_buf;
2626 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2628 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2629 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2631 /* If the number of queues requested can't be allocated by the
2632 * server, the login response will return with code 1. We will need
2633 * to resend the login buffer with fewer queues requested.
2635 if (login_rsp_crq->generic.rc.code) {
2636 adapter->renegotiate = true;
2637 complete(&adapter->init_done);
2641 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2642 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2643 netdev_dbg(adapter->netdev, "%016lx\n",
2644 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2648 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2649 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2650 adapter->req_rx_add_queues !=
2651 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2652 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2653 ibmvnic_remove(adapter->vdev);
2656 complete(&adapter->init_done);
2661 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2662 struct ibmvnic_adapter *adapter)
2664 struct device *dev = &adapter->vdev->dev;
2665 u8 map_id = crq->request_map_rsp.map_id;
2671 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2672 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2674 rc = crq->request_map_rsp.rc.code;
2676 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2678 /* need to find and zero tx/rx_pool map_id */
2679 for (i = 0; i < tx_subcrqs; i++) {
2680 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2681 adapter->tx_pool[i].long_term_buff.map_id = 0;
2683 for (i = 0; i < rx_subcrqs; i++) {
2684 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2685 adapter->rx_pool[i].long_term_buff.map_id = 0;
2688 complete(&adapter->fw_done);
2691 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2692 struct ibmvnic_adapter *adapter)
2694 struct device *dev = &adapter->vdev->dev;
2697 rc = crq->request_unmap_rsp.rc.code;
2699 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2702 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2703 struct ibmvnic_adapter *adapter)
2705 struct net_device *netdev = adapter->netdev;
2706 struct device *dev = &adapter->vdev->dev;
2709 rc = crq->query_map_rsp.rc.code;
2711 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2714 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2715 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2716 crq->query_map_rsp.free_pages);
2719 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2720 struct ibmvnic_adapter *adapter)
2722 struct net_device *netdev = adapter->netdev;
2723 struct device *dev = &adapter->vdev->dev;
2726 atomic_dec(&adapter->running_cap_crqs);
2727 netdev_dbg(netdev, "Outstanding queries: %d\n",
2728 atomic_read(&adapter->running_cap_crqs));
2729 rc = crq->query_capability.rc.code;
2731 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2735 switch (be16_to_cpu(crq->query_capability.capability)) {
2737 adapter->min_tx_queues =
2738 be64_to_cpu(crq->query_capability.number);
2739 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2740 adapter->min_tx_queues);
2743 adapter->min_rx_queues =
2744 be64_to_cpu(crq->query_capability.number);
2745 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2746 adapter->min_rx_queues);
2748 case MIN_RX_ADD_QUEUES:
2749 adapter->min_rx_add_queues =
2750 be64_to_cpu(crq->query_capability.number);
2751 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2752 adapter->min_rx_add_queues);
2755 adapter->max_tx_queues =
2756 be64_to_cpu(crq->query_capability.number);
2757 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2758 adapter->max_tx_queues);
2761 adapter->max_rx_queues =
2762 be64_to_cpu(crq->query_capability.number);
2763 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2764 adapter->max_rx_queues);
2766 case MAX_RX_ADD_QUEUES:
2767 adapter->max_rx_add_queues =
2768 be64_to_cpu(crq->query_capability.number);
2769 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2770 adapter->max_rx_add_queues);
2772 case MIN_TX_ENTRIES_PER_SUBCRQ:
2773 adapter->min_tx_entries_per_subcrq =
2774 be64_to_cpu(crq->query_capability.number);
2775 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2776 adapter->min_tx_entries_per_subcrq);
2778 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2779 adapter->min_rx_add_entries_per_subcrq =
2780 be64_to_cpu(crq->query_capability.number);
2781 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2782 adapter->min_rx_add_entries_per_subcrq);
2784 case MAX_TX_ENTRIES_PER_SUBCRQ:
2785 adapter->max_tx_entries_per_subcrq =
2786 be64_to_cpu(crq->query_capability.number);
2787 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2788 adapter->max_tx_entries_per_subcrq);
2790 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2791 adapter->max_rx_add_entries_per_subcrq =
2792 be64_to_cpu(crq->query_capability.number);
2793 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2794 adapter->max_rx_add_entries_per_subcrq);
2796 case TCP_IP_OFFLOAD:
2797 adapter->tcp_ip_offload =
2798 be64_to_cpu(crq->query_capability.number);
2799 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2800 adapter->tcp_ip_offload);
2802 case PROMISC_SUPPORTED:
2803 adapter->promisc_supported =
2804 be64_to_cpu(crq->query_capability.number);
2805 netdev_dbg(netdev, "promisc_supported = %lld\n",
2806 adapter->promisc_supported);
2809 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2810 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2811 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2814 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2815 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2816 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2818 case MAX_MULTICAST_FILTERS:
2819 adapter->max_multicast_filters =
2820 be64_to_cpu(crq->query_capability.number);
2821 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2822 adapter->max_multicast_filters);
2824 case VLAN_HEADER_INSERTION:
2825 adapter->vlan_header_insertion =
2826 be64_to_cpu(crq->query_capability.number);
2827 if (adapter->vlan_header_insertion)
2828 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2829 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2830 adapter->vlan_header_insertion);
2832 case RX_VLAN_HEADER_INSERTION:
2833 adapter->rx_vlan_header_insertion =
2834 be64_to_cpu(crq->query_capability.number);
2835 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
2836 adapter->rx_vlan_header_insertion);
2838 case MAX_TX_SG_ENTRIES:
2839 adapter->max_tx_sg_entries =
2840 be64_to_cpu(crq->query_capability.number);
2841 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2842 adapter->max_tx_sg_entries);
2844 case RX_SG_SUPPORTED:
2845 adapter->rx_sg_supported =
2846 be64_to_cpu(crq->query_capability.number);
2847 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2848 adapter->rx_sg_supported);
2850 case OPT_TX_COMP_SUB_QUEUES:
2851 adapter->opt_tx_comp_sub_queues =
2852 be64_to_cpu(crq->query_capability.number);
2853 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2854 adapter->opt_tx_comp_sub_queues);
2856 case OPT_RX_COMP_QUEUES:
2857 adapter->opt_rx_comp_queues =
2858 be64_to_cpu(crq->query_capability.number);
2859 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2860 adapter->opt_rx_comp_queues);
2862 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2863 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2864 be64_to_cpu(crq->query_capability.number);
2865 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2866 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2868 case OPT_TX_ENTRIES_PER_SUBCRQ:
2869 adapter->opt_tx_entries_per_subcrq =
2870 be64_to_cpu(crq->query_capability.number);
2871 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2872 adapter->opt_tx_entries_per_subcrq);
2874 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2875 adapter->opt_rxba_entries_per_subcrq =
2876 be64_to_cpu(crq->query_capability.number);
2877 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2878 adapter->opt_rxba_entries_per_subcrq);
2880 case TX_RX_DESC_REQ:
2881 adapter->tx_rx_desc_req = crq->query_capability.number;
2882 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2883 adapter->tx_rx_desc_req);
2887 netdev_err(netdev, "Got invalid cap rsp %d\n",
2888 crq->query_capability.capability);
2892 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2893 adapter->wait_capability = false;
2894 ibmvnic_send_req_caps(adapter, 0);
2898 static void ibmvnic_xport_event(struct work_struct *work)
2900 struct ibmvnic_adapter *adapter = container_of(work,
2901 struct ibmvnic_adapter,
2903 struct device *dev = &adapter->vdev->dev;
2906 release_sub_crqs(adapter);
2907 if (adapter->migrated) {
2908 rc = ibmvnic_reenable_crq_queue(adapter);
2910 dev_err(dev, "Error after enable rc=%ld\n", rc);
2911 adapter->migrated = false;
2912 rc = ibmvnic_send_crq_init(adapter);
2914 dev_err(dev, "Error sending init rc=%ld\n", rc);
2918 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
2919 struct ibmvnic_adapter *adapter)
2921 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
2922 struct net_device *netdev = adapter->netdev;
2923 struct device *dev = &adapter->vdev->dev;
2924 u64 *u64_crq = (u64 *)crq;
2927 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
2928 (unsigned long int)cpu_to_be64(u64_crq[0]),
2929 (unsigned long int)cpu_to_be64(u64_crq[1]));
2930 switch (gen_crq->first) {
2931 case IBMVNIC_CRQ_INIT_RSP:
2932 switch (gen_crq->cmd) {
2933 case IBMVNIC_CRQ_INIT:
2934 dev_info(dev, "Partner initialized\n");
2935 /* Send back a response */
2936 rc = ibmvnic_send_crq_init_complete(adapter);
2938 schedule_work(&adapter->vnic_crq_init);
2940 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
2942 case IBMVNIC_CRQ_INIT_COMPLETE:
2943 dev_info(dev, "Partner initialization complete\n");
2944 send_version_xchg(adapter);
2947 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
2950 case IBMVNIC_CRQ_XPORT_EVENT:
2951 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
2952 dev_info(dev, "Re-enabling adapter\n");
2953 adapter->migrated = true;
2954 schedule_work(&adapter->ibmvnic_xport);
2955 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
2956 dev_info(dev, "Backing device failover detected\n");
2957 netif_carrier_off(netdev);
2958 adapter->failover = true;
2960 /* The adapter lost the connection */
2961 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
2963 schedule_work(&adapter->ibmvnic_xport);
2966 case IBMVNIC_CRQ_CMD_RSP:
2969 dev_err(dev, "Got an invalid msg type 0x%02x\n",
2974 switch (gen_crq->cmd) {
2975 case VERSION_EXCHANGE_RSP:
2976 rc = crq->version_exchange_rsp.rc.code;
2978 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
2981 dev_info(dev, "Partner protocol version is %d\n",
2982 crq->version_exchange_rsp.version);
2983 if (be16_to_cpu(crq->version_exchange_rsp.version) <
2986 be16_to_cpu(crq->version_exchange_rsp.version);
2987 send_cap_queries(adapter);
2989 case QUERY_CAPABILITY_RSP:
2990 handle_query_cap_rsp(crq, adapter);
2993 handle_query_map_rsp(crq, adapter);
2995 case REQUEST_MAP_RSP:
2996 handle_request_map_rsp(crq, adapter);
2998 case REQUEST_UNMAP_RSP:
2999 handle_request_unmap_rsp(crq, adapter);
3001 case REQUEST_CAPABILITY_RSP:
3002 handle_request_cap_rsp(crq, adapter);
3005 netdev_dbg(netdev, "Got Login Response\n");
3006 handle_login_rsp(crq, adapter);
3008 case LOGICAL_LINK_STATE_RSP:
3010 "Got Logical Link State Response, state: %d rc: %d\n",
3011 crq->logical_link_state_rsp.link_state,
3012 crq->logical_link_state_rsp.rc.code);
3013 adapter->logical_link_state =
3014 crq->logical_link_state_rsp.link_state;
3015 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
3016 complete(&adapter->init_done);
3018 case LINK_STATE_INDICATION:
3019 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3020 adapter->phys_link_state =
3021 crq->link_state_indication.phys_link_state;
3022 adapter->logical_link_state =
3023 crq->link_state_indication.logical_link_state;
3025 case CHANGE_MAC_ADDR_RSP:
3026 netdev_dbg(netdev, "Got MAC address change Response\n");
3027 handle_change_mac_rsp(crq, adapter);
3029 case ERROR_INDICATION:
3030 netdev_dbg(netdev, "Got Error Indication\n");
3031 handle_error_indication(crq, adapter);
3033 case REQUEST_ERROR_RSP:
3034 netdev_dbg(netdev, "Got Error Detail Response\n");
3035 handle_error_info_rsp(crq, adapter);
3037 case REQUEST_STATISTICS_RSP:
3038 netdev_dbg(netdev, "Got Statistics Response\n");
3039 complete(&adapter->stats_done);
3041 case QUERY_IP_OFFLOAD_RSP:
3042 netdev_dbg(netdev, "Got Query IP offload Response\n");
3043 handle_query_ip_offload_rsp(adapter);
3045 case MULTICAST_CTRL_RSP:
3046 netdev_dbg(netdev, "Got multicast control Response\n");
3048 case CONTROL_IP_OFFLOAD_RSP:
3049 netdev_dbg(netdev, "Got Control IP offload Response\n");
3050 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3051 sizeof(adapter->ip_offload_ctrl),
3053 complete(&adapter->init_done);
3055 case COLLECT_FW_TRACE_RSP:
3056 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3057 complete(&adapter->fw_done);
3060 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3065 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3067 struct ibmvnic_adapter *adapter = instance;
3069 tasklet_schedule(&adapter->tasklet);
3073 static void ibmvnic_tasklet(void *data)
3075 struct ibmvnic_adapter *adapter = data;
3076 struct ibmvnic_crq_queue *queue = &adapter->crq;
3077 union ibmvnic_crq *crq;
3078 unsigned long flags;
3081 spin_lock_irqsave(&queue->lock, flags);
3083 /* Pull all the valid messages off the CRQ */
3084 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3085 ibmvnic_handle_crq(crq, adapter);
3086 crq->generic.first = 0;
3089 /* remain in tasklet until all
3090 * capabilities responses are received
3092 if (!adapter->wait_capability)
3095 /* if capabilities CRQ's were sent in this tasklet, the following
3096 * tasklet must wait until all responses are received
3098 if (atomic_read(&adapter->running_cap_crqs) != 0)
3099 adapter->wait_capability = true;
3100 spin_unlock_irqrestore(&queue->lock, flags);
3103 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3105 struct vio_dev *vdev = adapter->vdev;
3109 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3110 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3113 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3118 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3120 struct ibmvnic_crq_queue *crq = &adapter->crq;
3121 struct device *dev = &adapter->vdev->dev;
3122 struct vio_dev *vdev = adapter->vdev;
3127 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3128 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3130 /* Clean out the queue */
3131 memset(crq->msgs, 0, PAGE_SIZE);
3134 /* And re-open it again */
3135 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3136 crq->msg_token, PAGE_SIZE);
3139 /* Adapter is good, but other end is not ready */
3140 dev_warn(dev, "Partner adapter not ready\n");
3142 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3147 static void release_crq_queue(struct ibmvnic_adapter *adapter)
3149 struct ibmvnic_crq_queue *crq = &adapter->crq;
3150 struct vio_dev *vdev = adapter->vdev;
3156 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3157 free_irq(vdev->irq, adapter);
3158 tasklet_kill(&adapter->tasklet);
3160 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3161 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3163 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3165 free_page((unsigned long)crq->msgs);
3169 static int init_crq_queue(struct ibmvnic_adapter *adapter)
3171 struct ibmvnic_crq_queue *crq = &adapter->crq;
3172 struct device *dev = &adapter->vdev->dev;
3173 struct vio_dev *vdev = adapter->vdev;
3174 int rc, retrc = -ENOMEM;
3179 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3180 /* Should we allocate more than one page? */
3185 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3186 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3188 if (dma_mapping_error(dev, crq->msg_token))
3191 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3192 crq->msg_token, PAGE_SIZE);
3194 if (rc == H_RESOURCE)
3195 /* maybe kexecing and resource is busy. try a reset */
3196 rc = ibmvnic_reset_crq(adapter);
3199 if (rc == H_CLOSED) {
3200 dev_warn(dev, "Partner adapter not ready\n");
3202 dev_warn(dev, "Error %d opening adapter\n", rc);
3203 goto reg_crq_failed;
3208 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3209 (unsigned long)adapter);
3211 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3212 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3215 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3217 goto req_irq_failed;
3220 rc = vio_enable_interrupts(vdev);
3222 dev_err(dev, "Error %d enabling interrupts\n", rc);
3223 goto req_irq_failed;
3227 spin_lock_init(&crq->lock);
3232 tasklet_kill(&adapter->tasklet);
3234 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3235 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3237 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3239 free_page((unsigned long)crq->msgs);
3244 static void handle_crq_init_rsp(struct work_struct *work)
3246 struct ibmvnic_adapter *adapter = container_of(work,
3247 struct ibmvnic_adapter,
3249 struct device *dev = &adapter->vdev->dev;
3250 struct net_device *netdev = adapter->netdev;
3251 unsigned long timeout = msecs_to_jiffies(30000);
3252 bool restart = false;
3255 if (adapter->failover) {
3256 release_sub_crqs(adapter);
3257 if (netif_running(netdev)) {
3258 netif_tx_disable(netdev);
3259 ibmvnic_close(netdev);
3264 reinit_completion(&adapter->init_done);
3265 send_version_xchg(adapter);
3266 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3267 dev_err(dev, "Passive init timeout\n");
3271 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3273 if (adapter->failover) {
3274 adapter->failover = false;
3276 rc = ibmvnic_open(netdev);
3278 goto restart_failed;
3280 netif_carrier_on(netdev);
3284 rc = register_netdev(netdev);
3287 "failed to register netdev rc=%d\n", rc);
3288 goto register_failed;
3290 dev_info(dev, "ibmvnic registered\n");
3295 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3297 release_sub_crqs(adapter);
3299 dev_err(dev, "Passive initialization was not successful\n");
3302 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3304 struct device *dev = &adapter->vdev->dev;
3305 unsigned long timeout = msecs_to_jiffies(30000);
3308 rc = init_crq_queue(adapter);
3310 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3314 init_completion(&adapter->init_done);
3315 ibmvnic_send_crq_init(adapter);
3316 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3317 dev_err(dev, "Initialization sequence timed out\n");
3318 release_crq_queue(adapter);
3322 rc = init_sub_crqs(adapter);
3324 dev_err(dev, "Initialization of sub crqs failed\n");
3325 release_crq_queue(adapter);
3331 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3333 struct ibmvnic_adapter *adapter;
3334 struct net_device *netdev;
3335 unsigned char *mac_addr_p;
3338 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3341 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3342 VETH_MAC_ADDR, NULL);
3345 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3346 __FILE__, __LINE__);
3350 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3351 IBMVNIC_MAX_TX_QUEUES);
3355 adapter = netdev_priv(netdev);
3356 dev_set_drvdata(&dev->dev, netdev);
3357 adapter->vdev = dev;
3358 adapter->netdev = netdev;
3359 adapter->failover = false;
3361 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3362 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3363 netdev->irq = dev->irq;
3364 netdev->netdev_ops = &ibmvnic_netdev_ops;
3365 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3366 SET_NETDEV_DEV(netdev, &dev->dev);
3368 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3369 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3371 spin_lock_init(&adapter->stats_lock);
3373 INIT_LIST_HEAD(&adapter->errors);
3374 spin_lock_init(&adapter->error_list_lock);
3376 rc = ibmvnic_init(adapter);
3378 free_netdev(netdev);
3382 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3383 adapter->is_closed = false;
3385 rc = register_netdev(netdev);
3387 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3388 free_netdev(netdev);
3391 dev_info(&dev->dev, "ibmvnic registered\n");
3396 static int ibmvnic_remove(struct vio_dev *dev)
3398 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3399 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3401 unregister_netdev(netdev);
3403 release_resources(adapter);
3404 release_sub_crqs(adapter);
3405 release_crq_queue(adapter);
3407 free_netdev(netdev);
3408 dev_set_drvdata(&dev->dev, NULL);
3413 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3415 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3416 struct ibmvnic_adapter *adapter;
3417 struct iommu_table *tbl;
3418 unsigned long ret = 0;
3421 tbl = get_iommu_table_base(&vdev->dev);
3423 /* netdev inits at probe time along with the structures we need below*/
3425 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3427 adapter = netdev_priv(netdev);
3429 ret += PAGE_SIZE; /* the crq message queue */
3430 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3432 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3433 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3435 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3437 ret += adapter->rx_pool[i].size *
3438 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3443 static int ibmvnic_resume(struct device *dev)
3445 struct net_device *netdev = dev_get_drvdata(dev);
3446 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3449 /* kick the interrupt handlers just in case we lost an interrupt */
3450 for (i = 0; i < adapter->req_rx_queues; i++)
3451 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3452 adapter->rx_scrq[i]);
3457 static struct vio_device_id ibmvnic_device_table[] = {
3458 {"network", "IBM,vnic"},
3461 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3463 static const struct dev_pm_ops ibmvnic_pm_ops = {
3464 .resume = ibmvnic_resume
3467 static struct vio_driver ibmvnic_driver = {
3468 .id_table = ibmvnic_device_table,
3469 .probe = ibmvnic_probe,
3470 .remove = ibmvnic_remove,
3471 .get_desired_dma = ibmvnic_get_desired_dma,
3472 .name = ibmvnic_driver_name,
3473 .pm = &ibmvnic_pm_ops,
3476 /* module functions */
3477 static int __init ibmvnic_module_init(void)
3479 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3480 IBMVNIC_DRIVER_VERSION);
3482 return vio_register_driver(&ibmvnic_driver);
3485 static void __exit ibmvnic_module_exit(void)
3487 vio_unregister_driver(&ibmvnic_driver);
3490 module_init(ibmvnic_module_init);
3491 module_exit(ibmvnic_module_exit);