1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
84 MODULE_AUTHOR("Santiago Leon");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
112 static void send_login(struct ibmvnic_adapter *adapter);
113 static void send_cap_queries(struct ibmvnic_adapter *adapter);
114 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115 static int ibmvnic_init(struct ibmvnic_adapter *);
116 static void release_crq_queue(struct ibmvnic_adapter *);
118 struct ibmvnic_stat {
119 char name[ETH_GSTRING_LEN];
123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124 offsetof(struct ibmvnic_statistics, stat))
125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
127 static const struct ibmvnic_stat ibmvnic_stats[] = {
128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
152 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
153 unsigned long length, unsigned long *number,
156 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
159 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
166 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
167 struct ibmvnic_long_term_buff *ltb, int size)
169 struct device *dev = &adapter->vdev->dev;
172 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
176 dev_err(dev, "Couldn't alloc long term buffer\n");
179 ltb->map_id = adapter->map_id;
182 init_completion(&adapter->fw_done);
183 send_request_map(adapter, ltb->addr,
184 ltb->size, ltb->map_id);
185 wait_for_completion(&adapter->fw_done);
189 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
190 struct ibmvnic_long_term_buff *ltb)
192 struct device *dev = &adapter->vdev->dev;
197 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
198 adapter->reset_reason != VNIC_RESET_MOBILITY)
199 send_request_unmap(adapter, ltb->map_id);
200 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
203 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
204 struct ibmvnic_rx_pool *pool)
206 int count = pool->size - atomic_read(&pool->available);
207 struct device *dev = &adapter->vdev->dev;
208 int buffers_added = 0;
209 unsigned long lpar_rc;
210 union sub_crq sub_crq;
220 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
221 be32_to_cpu(adapter->login_rsp_buf->
224 for (i = 0; i < count; ++i) {
225 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
227 dev_err(dev, "Couldn't replenish rx buff\n");
228 adapter->replenish_no_mem++;
232 index = pool->free_map[pool->next_free];
234 if (pool->rx_buff[index].skb)
235 dev_err(dev, "Inconsistent free_map!\n");
237 /* Copy the skb to the long term mapped DMA buffer */
238 offset = index * pool->buff_size;
239 dst = pool->long_term_buff.buff + offset;
240 memset(dst, 0, pool->buff_size);
241 dma_addr = pool->long_term_buff.addr + offset;
242 pool->rx_buff[index].data = dst;
244 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
245 pool->rx_buff[index].dma = dma_addr;
246 pool->rx_buff[index].skb = skb;
247 pool->rx_buff[index].pool_index = pool->index;
248 pool->rx_buff[index].size = pool->buff_size;
250 memset(&sub_crq, 0, sizeof(sub_crq));
251 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
252 sub_crq.rx_add.correlator =
253 cpu_to_be64((u64)&pool->rx_buff[index]);
254 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
255 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
257 /* The length field of the sCRQ is defined to be 24 bits so the
258 * buffer size needs to be left shifted by a byte before it is
259 * converted to big endian to prevent the last byte from being
262 #ifdef __LITTLE_ENDIAN__
265 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
267 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
269 if (lpar_rc != H_SUCCESS)
273 adapter->replenish_add_buff_success++;
274 pool->next_free = (pool->next_free + 1) % pool->size;
276 atomic_add(buffers_added, &pool->available);
280 dev_info(dev, "replenish pools failure\n");
281 pool->free_map[pool->next_free] = index;
282 pool->rx_buff[index].skb = NULL;
283 if (!dma_mapping_error(dev, dma_addr))
284 dma_unmap_single(dev, dma_addr, pool->buff_size,
287 dev_kfree_skb_any(skb);
288 adapter->replenish_add_buff_failure++;
289 atomic_add(buffers_added, &pool->available);
292 static void replenish_pools(struct ibmvnic_adapter *adapter)
296 adapter->replenish_task_cycles++;
297 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
299 if (adapter->rx_pool[i].active)
300 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
304 static void release_stats_token(struct ibmvnic_adapter *adapter)
306 struct device *dev = &adapter->vdev->dev;
308 if (!adapter->stats_token)
311 dma_unmap_single(dev, adapter->stats_token,
312 sizeof(struct ibmvnic_statistics),
314 adapter->stats_token = 0;
317 static int init_stats_token(struct ibmvnic_adapter *adapter)
319 struct device *dev = &adapter->vdev->dev;
322 stok = dma_map_single(dev, &adapter->stats,
323 sizeof(struct ibmvnic_statistics),
325 if (dma_mapping_error(dev, stok)) {
326 dev_err(dev, "Couldn't map stats buffer\n");
330 adapter->stats_token = stok;
334 static void release_rx_pools(struct ibmvnic_adapter *adapter)
336 struct ibmvnic_rx_pool *rx_pool;
340 if (!adapter->rx_pool)
343 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
344 for (i = 0; i < rx_scrqs; i++) {
345 rx_pool = &adapter->rx_pool[i];
347 kfree(rx_pool->free_map);
348 free_long_term_buff(adapter, &rx_pool->long_term_buff);
350 if (!rx_pool->rx_buff)
353 for (j = 0; j < rx_pool->size; j++) {
354 if (rx_pool->rx_buff[j].skb) {
355 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
356 rx_pool->rx_buff[i].skb = NULL;
360 kfree(rx_pool->rx_buff);
363 kfree(adapter->rx_pool);
364 adapter->rx_pool = NULL;
367 static int init_rx_pools(struct net_device *netdev)
369 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
370 struct device *dev = &adapter->vdev->dev;
371 struct ibmvnic_rx_pool *rx_pool;
377 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
378 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
379 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
381 adapter->rx_pool = kcalloc(rxadd_subcrqs,
382 sizeof(struct ibmvnic_rx_pool),
384 if (!adapter->rx_pool) {
385 dev_err(dev, "Failed to allocate rx pools\n");
389 for (i = 0; i < rxadd_subcrqs; i++) {
390 rx_pool = &adapter->rx_pool[i];
392 netdev_dbg(adapter->netdev,
393 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
394 i, adapter->req_rx_add_entries_per_subcrq,
395 be64_to_cpu(size_array[i]));
397 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
399 rx_pool->buff_size = be64_to_cpu(size_array[i]);
402 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
404 if (!rx_pool->free_map) {
405 release_rx_pools(adapter);
409 rx_pool->rx_buff = kcalloc(rx_pool->size,
410 sizeof(struct ibmvnic_rx_buff),
412 if (!rx_pool->rx_buff) {
413 dev_err(dev, "Couldn't alloc rx buffers\n");
414 release_rx_pools(adapter);
418 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
419 rx_pool->size * rx_pool->buff_size)) {
420 release_rx_pools(adapter);
424 for (j = 0; j < rx_pool->size; ++j)
425 rx_pool->free_map[j] = j;
427 atomic_set(&rx_pool->available, 0);
428 rx_pool->next_alloc = 0;
429 rx_pool->next_free = 0;
435 static void release_tx_pools(struct ibmvnic_adapter *adapter)
437 struct ibmvnic_tx_pool *tx_pool;
440 if (!adapter->tx_pool)
443 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
444 for (i = 0; i < tx_scrqs; i++) {
445 tx_pool = &adapter->tx_pool[i];
446 kfree(tx_pool->tx_buff);
447 free_long_term_buff(adapter, &tx_pool->long_term_buff);
448 kfree(tx_pool->free_map);
451 kfree(adapter->tx_pool);
452 adapter->tx_pool = NULL;
455 static int init_tx_pools(struct net_device *netdev)
457 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
458 struct device *dev = &adapter->vdev->dev;
459 struct ibmvnic_tx_pool *tx_pool;
463 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
464 adapter->tx_pool = kcalloc(tx_subcrqs,
465 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
466 if (!adapter->tx_pool)
469 for (i = 0; i < tx_subcrqs; i++) {
470 tx_pool = &adapter->tx_pool[i];
471 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
472 sizeof(struct ibmvnic_tx_buff),
474 if (!tx_pool->tx_buff) {
475 dev_err(dev, "tx pool buffer allocation failed\n");
476 release_tx_pools(adapter);
480 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
481 adapter->req_tx_entries_per_subcrq *
483 release_tx_pools(adapter);
487 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
488 sizeof(int), GFP_KERNEL);
489 if (!tx_pool->free_map) {
490 release_tx_pools(adapter);
494 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
495 tx_pool->free_map[j] = j;
497 tx_pool->consumer_index = 0;
498 tx_pool->producer_index = 0;
504 static void release_error_buffers(struct ibmvnic_adapter *adapter)
506 struct device *dev = &adapter->vdev->dev;
507 struct ibmvnic_error_buff *error_buff, *tmp;
510 spin_lock_irqsave(&adapter->error_list_lock, flags);
511 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
512 list_del(&error_buff->list);
513 dma_unmap_single(dev, error_buff->dma, error_buff->len,
515 kfree(error_buff->buff);
518 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
521 static int ibmvnic_login(struct net_device *netdev)
523 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
524 unsigned long timeout = msecs_to_jiffies(30000);
525 struct device *dev = &adapter->vdev->dev;
528 if (adapter->renegotiate) {
529 adapter->renegotiate = false;
530 release_sub_crqs(adapter);
532 reinit_completion(&adapter->init_done);
533 send_cap_queries(adapter);
534 if (!wait_for_completion_timeout(&adapter->init_done,
536 dev_err(dev, "Capabilities query timeout\n");
541 reinit_completion(&adapter->init_done);
543 if (!wait_for_completion_timeout(&adapter->init_done,
545 dev_err(dev, "Login timeout\n");
548 } while (adapter->renegotiate);
553 static void release_resources(struct ibmvnic_adapter *adapter)
557 release_tx_pools(adapter);
558 release_rx_pools(adapter);
560 release_stats_token(adapter);
561 release_error_buffers(adapter);
564 for (i = 0; i < adapter->req_rx_queues; i++) {
565 if (&adapter->napi[i])
566 netif_napi_del(&adapter->napi[i]);
571 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
573 struct net_device *netdev = adapter->netdev;
574 unsigned long timeout = msecs_to_jiffies(30000);
575 union ibmvnic_crq crq;
579 netdev_err(netdev, "setting link state %d\n", link_state);
580 memset(&crq, 0, sizeof(crq));
581 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
582 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
583 crq.logical_link_state.link_state = link_state;
588 reinit_completion(&adapter->init_done);
589 rc = ibmvnic_send_crq(adapter, &crq);
591 netdev_err(netdev, "Failed to set link state\n");
595 if (!wait_for_completion_timeout(&adapter->init_done,
597 netdev_err(netdev, "timeout setting link state\n");
601 if (adapter->init_done_rc == 1) {
602 /* Partuial success, delay and re-send */
611 static int set_real_num_queues(struct net_device *netdev)
613 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
616 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
618 netdev_err(netdev, "failed to set the number of tx queues\n");
622 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
624 netdev_err(netdev, "failed to set the number of rx queues\n");
629 static int init_resources(struct ibmvnic_adapter *adapter)
631 struct net_device *netdev = adapter->netdev;
634 rc = set_real_num_queues(netdev);
638 rc = init_sub_crq_irqs(adapter);
640 netdev_err(netdev, "failed to initialize sub crq irqs\n");
644 rc = init_stats_token(adapter);
649 adapter->napi = kcalloc(adapter->req_rx_queues,
650 sizeof(struct napi_struct), GFP_KERNEL);
654 for (i = 0; i < adapter->req_rx_queues; i++) {
655 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
659 send_map_query(adapter);
661 rc = init_rx_pools(netdev);
665 rc = init_tx_pools(netdev);
669 static int __ibmvnic_open(struct net_device *netdev)
671 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
672 enum vnic_state prev_state = adapter->state;
675 adapter->state = VNIC_OPENING;
676 replenish_pools(adapter);
678 for (i = 0; i < adapter->req_rx_queues; i++)
679 napi_enable(&adapter->napi[i]);
681 /* We're ready to receive frames, enable the sub-crq interrupts and
682 * set the logical link state to up
684 for (i = 0; i < adapter->req_rx_queues; i++) {
685 if (prev_state == VNIC_CLOSED)
686 enable_irq(adapter->rx_scrq[i]->irq);
688 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
691 for (i = 0; i < adapter->req_tx_queues; i++) {
692 if (prev_state == VNIC_CLOSED)
693 enable_irq(adapter->tx_scrq[i]->irq);
695 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
698 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
700 for (i = 0; i < adapter->req_rx_queues; i++)
701 napi_disable(&adapter->napi[i]);
702 release_resources(adapter);
706 netif_tx_start_all_queues(netdev);
708 if (prev_state == VNIC_CLOSED) {
709 for (i = 0; i < adapter->req_rx_queues; i++)
710 napi_schedule(&adapter->napi[i]);
713 adapter->state = VNIC_OPEN;
717 static int ibmvnic_open(struct net_device *netdev)
719 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
722 mutex_lock(&adapter->reset_lock);
724 if (adapter->state != VNIC_CLOSED) {
725 rc = ibmvnic_login(netdev);
727 mutex_unlock(&adapter->reset_lock);
731 rc = init_resources(adapter);
733 netdev_err(netdev, "failed to initialize resources\n");
734 release_resources(adapter);
735 mutex_unlock(&adapter->reset_lock);
740 rc = __ibmvnic_open(netdev);
741 mutex_unlock(&adapter->reset_lock);
746 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
748 struct ibmvnic_tx_pool *tx_pool;
753 if (!adapter->tx_pool)
756 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
757 tx_entries = adapter->req_tx_entries_per_subcrq;
759 /* Free any remaining skbs in the tx buffer pools */
760 for (i = 0; i < tx_scrqs; i++) {
761 tx_pool = &adapter->tx_pool[i];
765 for (j = 0; j < tx_entries; j++) {
766 if (tx_pool->tx_buff[j].skb) {
767 dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
768 tx_pool->tx_buff[j].skb = NULL;
774 static int __ibmvnic_close(struct net_device *netdev)
776 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
780 adapter->state = VNIC_CLOSING;
781 netif_tx_stop_all_queues(netdev);
784 for (i = 0; i < adapter->req_rx_queues; i++)
785 napi_disable(&adapter->napi[i]);
788 clean_tx_pools(adapter);
790 if (adapter->tx_scrq) {
791 for (i = 0; i < adapter->req_tx_queues; i++)
792 if (adapter->tx_scrq[i]->irq)
793 disable_irq(adapter->tx_scrq[i]->irq);
796 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
800 if (adapter->rx_scrq) {
801 for (i = 0; i < adapter->req_rx_queues; i++) {
804 while (pending_scrq(adapter, adapter->rx_scrq[i])) {
812 if (adapter->rx_scrq[i]->irq)
813 disable_irq(adapter->rx_scrq[i]->irq);
817 adapter->state = VNIC_CLOSED;
821 static int ibmvnic_close(struct net_device *netdev)
823 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
826 mutex_lock(&adapter->reset_lock);
827 rc = __ibmvnic_close(netdev);
828 mutex_unlock(&adapter->reset_lock);
834 * build_hdr_data - creates L2/L3/L4 header data buffer
835 * @hdr_field - bitfield determining needed headers
836 * @skb - socket buffer
837 * @hdr_len - array of header lengths
838 * @tot_len - total length of data
840 * Reads hdr_field to determine which headers are needed by firmware.
841 * Builds a buffer containing these headers. Saves individual header
842 * lengths and total buffer length to be used to build descriptors.
844 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
845 int *hdr_len, u8 *hdr_data)
850 hdr_len[0] = sizeof(struct ethhdr);
852 if (skb->protocol == htons(ETH_P_IP)) {
853 hdr_len[1] = ip_hdr(skb)->ihl * 4;
854 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
855 hdr_len[2] = tcp_hdrlen(skb);
856 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
857 hdr_len[2] = sizeof(struct udphdr);
858 } else if (skb->protocol == htons(ETH_P_IPV6)) {
859 hdr_len[1] = sizeof(struct ipv6hdr);
860 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
861 hdr_len[2] = tcp_hdrlen(skb);
862 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
863 hdr_len[2] = sizeof(struct udphdr);
866 memset(hdr_data, 0, 120);
867 if ((hdr_field >> 6) & 1) {
868 hdr = skb_mac_header(skb);
869 memcpy(hdr_data, hdr, hdr_len[0]);
873 if ((hdr_field >> 5) & 1) {
874 hdr = skb_network_header(skb);
875 memcpy(hdr_data + len, hdr, hdr_len[1]);
879 if ((hdr_field >> 4) & 1) {
880 hdr = skb_transport_header(skb);
881 memcpy(hdr_data + len, hdr, hdr_len[2]);
888 * create_hdr_descs - create header and header extension descriptors
889 * @hdr_field - bitfield determining needed headers
890 * @data - buffer containing header data
891 * @len - length of data buffer
892 * @hdr_len - array of individual header lengths
893 * @scrq_arr - descriptor array
895 * Creates header and, if needed, header extension descriptors and
896 * places them in a descriptor array, scrq_arr
899 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
900 union sub_crq *scrq_arr)
902 union sub_crq hdr_desc;
907 while (tmp_len > 0) {
908 cur = hdr_data + len - tmp_len;
910 memset(&hdr_desc, 0, sizeof(hdr_desc));
911 if (cur != hdr_data) {
912 data = hdr_desc.hdr_ext.data;
913 tmp = tmp_len > 29 ? 29 : tmp_len;
914 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
915 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
916 hdr_desc.hdr_ext.len = tmp;
918 data = hdr_desc.hdr.data;
919 tmp = tmp_len > 24 ? 24 : tmp_len;
920 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
921 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
922 hdr_desc.hdr.len = tmp;
923 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
924 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
925 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
926 hdr_desc.hdr.flag = hdr_field << 1;
928 memcpy(data, cur, tmp);
930 *scrq_arr = hdr_desc;
936 * build_hdr_descs_arr - build a header descriptor array
937 * @skb - socket buffer
938 * @num_entries - number of descriptors to be sent
939 * @subcrq - first TX descriptor
940 * @hdr_field - bit field determining which headers will be sent
942 * This function will build a TX descriptor array with applicable
943 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
946 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
947 int *num_entries, u8 hdr_field)
949 int hdr_len[3] = {0, 0, 0};
951 u8 *hdr_data = txbuff->hdr_data;
953 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
958 num_entries += len % 29 ? len / 29 + 1 : len / 29;
959 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
960 txbuff->indir_arr + 1);
963 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
965 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
966 int queue_num = skb_get_queue_mapping(skb);
967 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
968 struct device *dev = &adapter->vdev->dev;
969 struct ibmvnic_tx_buff *tx_buff = NULL;
970 struct ibmvnic_sub_crq_queue *tx_scrq;
971 struct ibmvnic_tx_pool *tx_pool;
972 unsigned int tx_send_failed = 0;
973 unsigned int tx_map_failed = 0;
974 unsigned int tx_dropped = 0;
975 unsigned int tx_packets = 0;
976 unsigned int tx_bytes = 0;
977 dma_addr_t data_dma_addr;
978 struct netdev_queue *txq;
979 unsigned long lpar_rc;
980 union sub_crq tx_crq;
988 if (adapter->resetting) {
989 if (!netif_subqueue_stopped(netdev, skb))
990 netif_stop_subqueue(netdev, queue_num);
991 dev_kfree_skb_any(skb);
999 tx_pool = &adapter->tx_pool[queue_num];
1000 tx_scrq = adapter->tx_scrq[queue_num];
1001 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1002 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1003 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1005 index = tx_pool->free_map[tx_pool->consumer_index];
1006 offset = index * adapter->req_mtu;
1007 dst = tx_pool->long_term_buff.buff + offset;
1008 memset(dst, 0, adapter->req_mtu);
1009 skb_copy_from_linear_data(skb, dst, skb->len);
1010 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1012 tx_pool->consumer_index =
1013 (tx_pool->consumer_index + 1) %
1014 adapter->req_tx_entries_per_subcrq;
1016 tx_buff = &tx_pool->tx_buff[index];
1018 tx_buff->data_dma[0] = data_dma_addr;
1019 tx_buff->data_len[0] = skb->len;
1020 tx_buff->index = index;
1021 tx_buff->pool_index = queue_num;
1022 tx_buff->last_frag = true;
1024 memset(&tx_crq, 0, sizeof(tx_crq));
1025 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1026 tx_crq.v1.type = IBMVNIC_TX_DESC;
1027 tx_crq.v1.n_crq_elem = 1;
1028 tx_crq.v1.n_sge = 1;
1029 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1030 tx_crq.v1.correlator = cpu_to_be32(index);
1031 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1032 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1033 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1035 if (adapter->vlan_header_insertion) {
1036 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1037 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1040 if (skb->protocol == htons(ETH_P_IP)) {
1041 if (ip_hdr(skb)->version == 4)
1042 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1043 else if (ip_hdr(skb)->version == 6)
1044 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1046 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1047 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1048 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1049 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1052 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1053 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1056 /* determine if l2/3/4 headers are sent to firmware */
1057 if ((*hdrs >> 7) & 1 &&
1058 (skb->protocol == htons(ETH_P_IP) ||
1059 skb->protocol == htons(ETH_P_IPV6))) {
1060 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1061 tx_crq.v1.n_crq_elem = num_entries;
1062 tx_buff->indir_arr[0] = tx_crq;
1063 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1064 sizeof(tx_buff->indir_arr),
1066 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1067 dev_kfree_skb_any(skb);
1068 tx_buff->skb = NULL;
1069 if (!firmware_has_feature(FW_FEATURE_CMO))
1070 dev_err(dev, "tx: unable to map descriptor array\n");
1076 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1077 (u64)tx_buff->indir_dma,
1080 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1083 if (lpar_rc != H_SUCCESS) {
1084 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1086 if (tx_pool->consumer_index == 0)
1087 tx_pool->consumer_index =
1088 adapter->req_tx_entries_per_subcrq - 1;
1090 tx_pool->consumer_index--;
1092 dev_kfree_skb_any(skb);
1093 tx_buff->skb = NULL;
1095 if (lpar_rc == H_CLOSED)
1096 netif_stop_subqueue(netdev, queue_num);
1104 if (atomic_inc_return(&tx_scrq->used)
1105 >= adapter->req_tx_entries_per_subcrq) {
1106 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1107 netif_stop_subqueue(netdev, queue_num);
1111 tx_bytes += skb->len;
1112 txq->trans_start = jiffies;
1116 netdev->stats.tx_dropped += tx_dropped;
1117 netdev->stats.tx_bytes += tx_bytes;
1118 netdev->stats.tx_packets += tx_packets;
1119 adapter->tx_send_failed += tx_send_failed;
1120 adapter->tx_map_failed += tx_map_failed;
1125 static void ibmvnic_set_multi(struct net_device *netdev)
1127 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1128 struct netdev_hw_addr *ha;
1129 union ibmvnic_crq crq;
1131 memset(&crq, 0, sizeof(crq));
1132 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1133 crq.request_capability.cmd = REQUEST_CAPABILITY;
1135 if (netdev->flags & IFF_PROMISC) {
1136 if (!adapter->promisc_supported)
1139 if (netdev->flags & IFF_ALLMULTI) {
1140 /* Accept all multicast */
1141 memset(&crq, 0, sizeof(crq));
1142 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1143 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1144 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1145 ibmvnic_send_crq(adapter, &crq);
1146 } else if (netdev_mc_empty(netdev)) {
1147 /* Reject all multicast */
1148 memset(&crq, 0, sizeof(crq));
1149 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1150 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1151 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1152 ibmvnic_send_crq(adapter, &crq);
1154 /* Accept one or more multicast(s) */
1155 netdev_for_each_mc_addr(ha, netdev) {
1156 memset(&crq, 0, sizeof(crq));
1157 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1158 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1159 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1160 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1162 ibmvnic_send_crq(adapter, &crq);
1168 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1170 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1171 struct sockaddr *addr = p;
1172 union ibmvnic_crq crq;
1174 if (!is_valid_ether_addr(addr->sa_data))
1175 return -EADDRNOTAVAIL;
1177 memset(&crq, 0, sizeof(crq));
1178 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1179 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1180 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1181 ibmvnic_send_crq(adapter, &crq);
1182 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1187 * do_reset returns zero if we are able to keep processing reset events, or
1188 * non-zero if we hit a fatal error and must halt.
1190 static int do_reset(struct ibmvnic_adapter *adapter,
1191 struct ibmvnic_rwi *rwi, u32 reset_state)
1193 struct net_device *netdev = adapter->netdev;
1196 netif_carrier_off(netdev);
1197 adapter->reset_reason = rwi->reset_reason;
1199 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1200 rc = ibmvnic_reenable_crq_queue(adapter);
1205 rc = __ibmvnic_close(netdev);
1209 /* remove the closed state so when we call open it appears
1210 * we are coming from the probed state.
1212 adapter->state = VNIC_PROBED;
1214 release_resources(adapter);
1215 release_sub_crqs(adapter);
1216 release_crq_queue(adapter);
1218 rc = ibmvnic_init(adapter);
1222 /* If the adapter was in PROBE state prior to the reset, exit here. */
1223 if (reset_state == VNIC_PROBED)
1226 rc = ibmvnic_login(netdev);
1228 adapter->state = VNIC_PROBED;
1233 rc = init_resources(adapter);
1238 if (reset_state == VNIC_CLOSED)
1241 rc = __ibmvnic_open(netdev);
1243 if (list_empty(&adapter->rwi_list))
1244 adapter->state = VNIC_CLOSED;
1246 adapter->state = reset_state;
1251 netif_carrier_on(netdev);
1254 for (i = 0; i < adapter->req_rx_queues; i++)
1255 napi_schedule(&adapter->napi[i]);
1260 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1262 struct ibmvnic_rwi *rwi;
1264 mutex_lock(&adapter->rwi_lock);
1266 if (!list_empty(&adapter->rwi_list)) {
1267 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1269 list_del(&rwi->list);
1274 mutex_unlock(&adapter->rwi_lock);
1278 static void free_all_rwi(struct ibmvnic_adapter *adapter)
1280 struct ibmvnic_rwi *rwi;
1282 rwi = get_next_rwi(adapter);
1285 rwi = get_next_rwi(adapter);
1289 static void __ibmvnic_reset(struct work_struct *work)
1291 struct ibmvnic_rwi *rwi;
1292 struct ibmvnic_adapter *adapter;
1293 struct net_device *netdev;
1297 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1298 netdev = adapter->netdev;
1300 mutex_lock(&adapter->reset_lock);
1301 adapter->resetting = true;
1302 reset_state = adapter->state;
1304 rwi = get_next_rwi(adapter);
1306 rc = do_reset(adapter, rwi, reset_state);
1311 rwi = get_next_rwi(adapter);
1315 free_all_rwi(adapter);
1319 adapter->resetting = false;
1320 mutex_unlock(&adapter->reset_lock);
1323 static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1324 enum ibmvnic_reset_reason reason)
1326 struct ibmvnic_rwi *rwi, *tmp;
1327 struct net_device *netdev = adapter->netdev;
1328 struct list_head *entry;
1330 if (adapter->state == VNIC_REMOVING ||
1331 adapter->state == VNIC_REMOVED) {
1332 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1336 mutex_lock(&adapter->rwi_lock);
1338 list_for_each(entry, &adapter->rwi_list) {
1339 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1340 if (tmp->reset_reason == reason) {
1341 netdev_err(netdev, "Matching reset found, skipping\n");
1342 mutex_unlock(&adapter->rwi_lock);
1347 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1349 mutex_unlock(&adapter->rwi_lock);
1350 ibmvnic_close(netdev);
1354 rwi->reset_reason = reason;
1355 list_add_tail(&rwi->list, &adapter->rwi_list);
1356 mutex_unlock(&adapter->rwi_lock);
1357 schedule_work(&adapter->ibmvnic_reset);
1360 static void ibmvnic_tx_timeout(struct net_device *dev)
1362 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1364 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
1367 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1368 struct ibmvnic_rx_buff *rx_buff)
1370 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1372 rx_buff->skb = NULL;
1374 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1375 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1377 atomic_dec(&pool->available);
1380 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1382 struct net_device *netdev = napi->dev;
1383 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1384 int scrq_num = (int)(napi - adapter->napi);
1385 int frames_processed = 0;
1387 while (frames_processed < budget) {
1388 struct sk_buff *skb;
1389 struct ibmvnic_rx_buff *rx_buff;
1390 union sub_crq *next;
1395 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1397 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1399 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1400 rx_comp.correlator);
1401 /* do error checking */
1402 if (next->rx_comp.rc) {
1403 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1404 /* free the entry */
1405 next->rx_comp.first = 0;
1406 remove_buff_from_pool(adapter, rx_buff);
1410 length = be32_to_cpu(next->rx_comp.len);
1411 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1412 flags = next->rx_comp.flags;
1414 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1417 /* VLAN Header has been stripped by the system firmware and
1418 * needs to be inserted by the driver
1420 if (adapter->rx_vlan_header_insertion &&
1421 (flags & IBMVNIC_VLAN_STRIPPED))
1422 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1423 ntohs(next->rx_comp.vlan_tci));
1425 /* free the entry */
1426 next->rx_comp.first = 0;
1427 remove_buff_from_pool(adapter, rx_buff);
1429 skb_put(skb, length);
1430 skb->protocol = eth_type_trans(skb, netdev);
1431 skb_record_rx_queue(skb, scrq_num);
1433 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1434 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1435 skb->ip_summed = CHECKSUM_UNNECESSARY;
1439 napi_gro_receive(napi, skb); /* send it up */
1440 netdev->stats.rx_packets++;
1441 netdev->stats.rx_bytes += length;
1444 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1446 if (frames_processed < budget) {
1447 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1448 napi_complete_done(napi, frames_processed);
1449 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1450 napi_reschedule(napi)) {
1451 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1455 return frames_processed;
1458 #ifdef CONFIG_NET_POLL_CONTROLLER
1459 static void ibmvnic_netpoll_controller(struct net_device *dev)
1461 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1464 replenish_pools(netdev_priv(dev));
1465 for (i = 0; i < adapter->req_rx_queues; i++)
1466 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1467 adapter->rx_scrq[i]);
1471 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
1476 static const struct net_device_ops ibmvnic_netdev_ops = {
1477 .ndo_open = ibmvnic_open,
1478 .ndo_stop = ibmvnic_close,
1479 .ndo_start_xmit = ibmvnic_xmit,
1480 .ndo_set_rx_mode = ibmvnic_set_multi,
1481 .ndo_set_mac_address = ibmvnic_set_mac,
1482 .ndo_validate_addr = eth_validate_addr,
1483 .ndo_tx_timeout = ibmvnic_tx_timeout,
1484 #ifdef CONFIG_NET_POLL_CONTROLLER
1485 .ndo_poll_controller = ibmvnic_netpoll_controller,
1487 .ndo_change_mtu = ibmvnic_change_mtu,
1490 /* ethtool functions */
1492 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1493 struct ethtool_link_ksettings *cmd)
1495 u32 supported, advertising;
1497 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1499 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1501 cmd->base.speed = SPEED_1000;
1502 cmd->base.duplex = DUPLEX_FULL;
1503 cmd->base.port = PORT_FIBRE;
1504 cmd->base.phy_address = 0;
1505 cmd->base.autoneg = AUTONEG_ENABLE;
1507 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1509 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1515 static void ibmvnic_get_drvinfo(struct net_device *dev,
1516 struct ethtool_drvinfo *info)
1518 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1519 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1522 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1524 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1526 return adapter->msg_enable;
1529 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1531 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1533 adapter->msg_enable = data;
1536 static u32 ibmvnic_get_link(struct net_device *netdev)
1538 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1540 /* Don't need to send a query because we request a logical link up at
1541 * init and then we wait for link state indications
1543 return adapter->logical_link_state;
1546 static void ibmvnic_get_ringparam(struct net_device *netdev,
1547 struct ethtool_ringparam *ring)
1549 ring->rx_max_pending = 0;
1550 ring->tx_max_pending = 0;
1551 ring->rx_mini_max_pending = 0;
1552 ring->rx_jumbo_max_pending = 0;
1553 ring->rx_pending = 0;
1554 ring->tx_pending = 0;
1555 ring->rx_mini_pending = 0;
1556 ring->rx_jumbo_pending = 0;
1559 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1563 if (stringset != ETH_SS_STATS)
1566 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1567 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1570 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1574 return ARRAY_SIZE(ibmvnic_stats);
1580 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1581 struct ethtool_stats *stats, u64 *data)
1583 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1584 union ibmvnic_crq crq;
1587 memset(&crq, 0, sizeof(crq));
1588 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1589 crq.request_statistics.cmd = REQUEST_STATISTICS;
1590 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1591 crq.request_statistics.len =
1592 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1594 /* Wait for data to be written */
1595 init_completion(&adapter->stats_done);
1596 ibmvnic_send_crq(adapter, &crq);
1597 wait_for_completion(&adapter->stats_done);
1599 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1600 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1603 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1604 .get_drvinfo = ibmvnic_get_drvinfo,
1605 .get_msglevel = ibmvnic_get_msglevel,
1606 .set_msglevel = ibmvnic_set_msglevel,
1607 .get_link = ibmvnic_get_link,
1608 .get_ringparam = ibmvnic_get_ringparam,
1609 .get_strings = ibmvnic_get_strings,
1610 .get_sset_count = ibmvnic_get_sset_count,
1611 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1612 .get_link_ksettings = ibmvnic_get_link_ksettings,
1615 /* Routines for managing CRQs/sCRQs */
1617 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1618 struct ibmvnic_sub_crq_queue *scrq)
1620 struct device *dev = &adapter->vdev->dev;
1623 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1625 /* Close the sub-crqs */
1627 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1628 adapter->vdev->unit_address,
1630 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1633 netdev_err(adapter->netdev,
1634 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1638 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1640 free_pages((unsigned long)scrq->msgs, 2);
1644 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1647 struct device *dev = &adapter->vdev->dev;
1648 struct ibmvnic_sub_crq_queue *scrq;
1651 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
1656 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
1658 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1659 goto zero_page_failed;
1662 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1664 if (dma_mapping_error(dev, scrq->msg_token)) {
1665 dev_warn(dev, "Couldn't map crq queue messages page\n");
1669 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1670 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1672 if (rc == H_RESOURCE)
1673 rc = ibmvnic_reset_crq(adapter);
1675 if (rc == H_CLOSED) {
1676 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1678 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1682 scrq->adapter = adapter;
1683 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1684 spin_lock_init(&scrq->lock);
1686 netdev_dbg(adapter->netdev,
1687 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1688 scrq->crq_num, scrq->hw_irq, scrq->irq);
1693 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1696 free_pages((unsigned long)scrq->msgs, 2);
1703 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1707 if (adapter->tx_scrq) {
1708 for (i = 0; i < adapter->req_tx_queues; i++) {
1709 if (!adapter->tx_scrq[i])
1712 if (adapter->tx_scrq[i]->irq) {
1713 free_irq(adapter->tx_scrq[i]->irq,
1714 adapter->tx_scrq[i]);
1715 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1716 adapter->tx_scrq[i]->irq = 0;
1719 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1722 kfree(adapter->tx_scrq);
1723 adapter->tx_scrq = NULL;
1726 if (adapter->rx_scrq) {
1727 for (i = 0; i < adapter->req_rx_queues; i++) {
1728 if (!adapter->rx_scrq[i])
1731 if (adapter->rx_scrq[i]->irq) {
1732 free_irq(adapter->rx_scrq[i]->irq,
1733 adapter->rx_scrq[i]);
1734 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1735 adapter->rx_scrq[i]->irq = 0;
1738 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1741 kfree(adapter->rx_scrq);
1742 adapter->rx_scrq = NULL;
1746 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1747 struct ibmvnic_sub_crq_queue *scrq)
1749 struct device *dev = &adapter->vdev->dev;
1752 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1753 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1755 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1760 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1761 struct ibmvnic_sub_crq_queue *scrq)
1763 struct device *dev = &adapter->vdev->dev;
1766 if (scrq->hw_irq > 0x100000000ULL) {
1767 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1771 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1772 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1774 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1779 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1780 struct ibmvnic_sub_crq_queue *scrq)
1782 struct device *dev = &adapter->vdev->dev;
1783 struct ibmvnic_tx_buff *txbuff;
1784 union sub_crq *next;
1790 while (pending_scrq(adapter, scrq)) {
1791 unsigned int pool = scrq->pool_index;
1793 next = ibmvnic_next_scrq(adapter, scrq);
1794 for (i = 0; i < next->tx_comp.num_comps; i++) {
1795 if (next->tx_comp.rcs[i]) {
1796 dev_err(dev, "tx error %x\n",
1797 next->tx_comp.rcs[i]);
1800 index = be32_to_cpu(next->tx_comp.correlators[i]);
1801 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1803 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1804 if (!txbuff->data_dma[j])
1807 txbuff->data_dma[j] = 0;
1809 /* if sub_crq was sent indirectly */
1810 first = txbuff->indir_arr[0].generic.first;
1811 if (first == IBMVNIC_CRQ_CMD) {
1812 dma_unmap_single(dev, txbuff->indir_dma,
1813 sizeof(txbuff->indir_arr),
1817 if (txbuff->last_frag) {
1818 dev_kfree_skb_any(txbuff->skb);
1822 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1823 producer_index] = index;
1824 adapter->tx_pool[pool].producer_index =
1825 (adapter->tx_pool[pool].producer_index + 1) %
1826 adapter->req_tx_entries_per_subcrq;
1828 /* remove tx_comp scrq*/
1829 next->tx_comp.first = 0;
1831 if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
1832 (adapter->req_tx_entries_per_subcrq / 2) &&
1833 __netif_subqueue_stopped(adapter->netdev,
1834 scrq->pool_index)) {
1835 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
1836 netdev_info(adapter->netdev, "Started queue %d\n",
1841 enable_scrq_irq(adapter, scrq);
1843 if (pending_scrq(adapter, scrq)) {
1844 disable_scrq_irq(adapter, scrq);
1851 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1853 struct ibmvnic_sub_crq_queue *scrq = instance;
1854 struct ibmvnic_adapter *adapter = scrq->adapter;
1856 disable_scrq_irq(adapter, scrq);
1857 ibmvnic_complete_tx(adapter, scrq);
1862 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1864 struct ibmvnic_sub_crq_queue *scrq = instance;
1865 struct ibmvnic_adapter *adapter = scrq->adapter;
1867 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1868 disable_scrq_irq(adapter, scrq);
1869 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1875 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1877 struct device *dev = &adapter->vdev->dev;
1878 struct ibmvnic_sub_crq_queue *scrq;
1882 for (i = 0; i < adapter->req_tx_queues; i++) {
1883 scrq = adapter->tx_scrq[i];
1884 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1888 dev_err(dev, "Error mapping irq\n");
1889 goto req_tx_irq_failed;
1892 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1893 0, "ibmvnic_tx", scrq);
1896 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1898 irq_dispose_mapping(scrq->irq);
1899 goto req_rx_irq_failed;
1903 for (i = 0; i < adapter->req_rx_queues; i++) {
1904 scrq = adapter->rx_scrq[i];
1905 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1908 dev_err(dev, "Error mapping irq\n");
1909 goto req_rx_irq_failed;
1911 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1912 0, "ibmvnic_rx", scrq);
1914 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1916 irq_dispose_mapping(scrq->irq);
1917 goto req_rx_irq_failed;
1923 for (j = 0; j < i; j++) {
1924 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1925 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1927 i = adapter->req_tx_queues;
1929 for (j = 0; j < i; j++) {
1930 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1931 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1933 release_sub_crqs(adapter);
1937 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
1939 struct device *dev = &adapter->vdev->dev;
1940 struct ibmvnic_sub_crq_queue **allqueues;
1941 int registered_queues = 0;
1946 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1948 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
1952 for (i = 0; i < total_queues; i++) {
1953 allqueues[i] = init_sub_crq_queue(adapter);
1954 if (!allqueues[i]) {
1955 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1958 registered_queues++;
1961 /* Make sure we were able to register the minimum number of queues */
1962 if (registered_queues <
1963 adapter->min_tx_queues + adapter->min_rx_queues) {
1964 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1968 /* Distribute the failed allocated queues*/
1969 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1970 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1973 if (adapter->req_rx_queues > adapter->min_rx_queues)
1974 adapter->req_rx_queues--;
1979 if (adapter->req_tx_queues > adapter->min_tx_queues)
1980 adapter->req_tx_queues--;
1987 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1988 sizeof(*adapter->tx_scrq), GFP_KERNEL);
1989 if (!adapter->tx_scrq)
1992 for (i = 0; i < adapter->req_tx_queues; i++) {
1993 adapter->tx_scrq[i] = allqueues[i];
1994 adapter->tx_scrq[i]->pool_index = i;
1997 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1998 sizeof(*adapter->rx_scrq), GFP_KERNEL);
1999 if (!adapter->rx_scrq)
2002 for (i = 0; i < adapter->req_rx_queues; i++) {
2003 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2004 adapter->rx_scrq[i]->scrq_num = i;
2011 kfree(adapter->tx_scrq);
2012 adapter->tx_scrq = NULL;
2014 for (i = 0; i < registered_queues; i++)
2015 release_sub_crq_queue(adapter, allqueues[i]);
2020 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2022 struct device *dev = &adapter->vdev->dev;
2023 union ibmvnic_crq crq;
2026 /* Sub-CRQ entries are 32 byte long */
2027 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2029 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2030 adapter->min_rx_add_entries_per_subcrq > entries_page) {
2031 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2035 /* Get the minimum between the queried max and the entries
2036 * that fit in our PAGE_SIZE
2038 adapter->req_tx_entries_per_subcrq =
2039 adapter->max_tx_entries_per_subcrq > entries_page ?
2040 entries_page : adapter->max_tx_entries_per_subcrq;
2041 adapter->req_rx_add_entries_per_subcrq =
2042 adapter->max_rx_add_entries_per_subcrq > entries_page ?
2043 entries_page : adapter->max_rx_add_entries_per_subcrq;
2045 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
2046 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
2047 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2049 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2052 memset(&crq, 0, sizeof(crq));
2053 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2054 crq.request_capability.cmd = REQUEST_CAPABILITY;
2056 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
2057 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
2058 atomic_inc(&adapter->running_cap_crqs);
2059 ibmvnic_send_crq(adapter, &crq);
2061 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
2062 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
2063 atomic_inc(&adapter->running_cap_crqs);
2064 ibmvnic_send_crq(adapter, &crq);
2066 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
2067 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
2068 atomic_inc(&adapter->running_cap_crqs);
2069 ibmvnic_send_crq(adapter, &crq);
2071 crq.request_capability.capability =
2072 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2073 crq.request_capability.number =
2074 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
2075 atomic_inc(&adapter->running_cap_crqs);
2076 ibmvnic_send_crq(adapter, &crq);
2078 crq.request_capability.capability =
2079 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2080 crq.request_capability.number =
2081 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
2082 atomic_inc(&adapter->running_cap_crqs);
2083 ibmvnic_send_crq(adapter, &crq);
2085 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
2086 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
2087 atomic_inc(&adapter->running_cap_crqs);
2088 ibmvnic_send_crq(adapter, &crq);
2090 if (adapter->netdev->flags & IFF_PROMISC) {
2091 if (adapter->promisc_supported) {
2092 crq.request_capability.capability =
2093 cpu_to_be16(PROMISC_REQUESTED);
2094 crq.request_capability.number = cpu_to_be64(1);
2095 atomic_inc(&adapter->running_cap_crqs);
2096 ibmvnic_send_crq(adapter, &crq);
2099 crq.request_capability.capability =
2100 cpu_to_be16(PROMISC_REQUESTED);
2101 crq.request_capability.number = cpu_to_be64(0);
2102 atomic_inc(&adapter->running_cap_crqs);
2103 ibmvnic_send_crq(adapter, &crq);
2107 static int pending_scrq(struct ibmvnic_adapter *adapter,
2108 struct ibmvnic_sub_crq_queue *scrq)
2110 union sub_crq *entry = &scrq->msgs[scrq->cur];
2112 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP ||
2113 adapter->state == VNIC_CLOSING)
2119 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2120 struct ibmvnic_sub_crq_queue *scrq)
2122 union sub_crq *entry;
2123 unsigned long flags;
2125 spin_lock_irqsave(&scrq->lock, flags);
2126 entry = &scrq->msgs[scrq->cur];
2127 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2128 if (++scrq->cur == scrq->size)
2133 spin_unlock_irqrestore(&scrq->lock, flags);
2138 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2140 struct ibmvnic_crq_queue *queue = &adapter->crq;
2141 union ibmvnic_crq *crq;
2143 crq = &queue->msgs[queue->cur];
2144 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2145 if (++queue->cur == queue->size)
2154 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2155 union sub_crq *sub_crq)
2157 unsigned int ua = adapter->vdev->unit_address;
2158 struct device *dev = &adapter->vdev->dev;
2159 u64 *u64_crq = (u64 *)sub_crq;
2162 netdev_dbg(adapter->netdev,
2163 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2164 (unsigned long int)cpu_to_be64(remote_handle),
2165 (unsigned long int)cpu_to_be64(u64_crq[0]),
2166 (unsigned long int)cpu_to_be64(u64_crq[1]),
2167 (unsigned long int)cpu_to_be64(u64_crq[2]),
2168 (unsigned long int)cpu_to_be64(u64_crq[3]));
2170 /* Make sure the hypervisor sees the complete request */
2173 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2174 cpu_to_be64(remote_handle),
2175 cpu_to_be64(u64_crq[0]),
2176 cpu_to_be64(u64_crq[1]),
2177 cpu_to_be64(u64_crq[2]),
2178 cpu_to_be64(u64_crq[3]));
2182 dev_warn(dev, "CRQ Queue closed\n");
2183 dev_err(dev, "Send error (rc=%d)\n", rc);
2189 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2190 u64 remote_handle, u64 ioba, u64 num_entries)
2192 unsigned int ua = adapter->vdev->unit_address;
2193 struct device *dev = &adapter->vdev->dev;
2196 /* Make sure the hypervisor sees the complete request */
2198 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2199 cpu_to_be64(remote_handle),
2204 dev_warn(dev, "CRQ Queue closed\n");
2205 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2211 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2212 union ibmvnic_crq *crq)
2214 unsigned int ua = adapter->vdev->unit_address;
2215 struct device *dev = &adapter->vdev->dev;
2216 u64 *u64_crq = (u64 *)crq;
2219 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2220 (unsigned long int)cpu_to_be64(u64_crq[0]),
2221 (unsigned long int)cpu_to_be64(u64_crq[1]));
2223 /* Make sure the hypervisor sees the complete request */
2226 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2227 cpu_to_be64(u64_crq[0]),
2228 cpu_to_be64(u64_crq[1]));
2232 dev_warn(dev, "CRQ Queue closed\n");
2233 dev_warn(dev, "Send error (rc=%d)\n", rc);
2239 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
2241 union ibmvnic_crq crq;
2243 memset(&crq, 0, sizeof(crq));
2244 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
2245 crq.generic.cmd = IBMVNIC_CRQ_INIT;
2246 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
2248 return ibmvnic_send_crq(adapter, &crq);
2251 static int send_version_xchg(struct ibmvnic_adapter *adapter)
2253 union ibmvnic_crq crq;
2255 memset(&crq, 0, sizeof(crq));
2256 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
2257 crq.version_exchange.cmd = VERSION_EXCHANGE;
2258 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
2260 return ibmvnic_send_crq(adapter, &crq);
2263 static void send_login(struct ibmvnic_adapter *adapter)
2265 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
2266 struct ibmvnic_login_buffer *login_buffer;
2267 struct device *dev = &adapter->vdev->dev;
2268 dma_addr_t rsp_buffer_token;
2269 dma_addr_t buffer_token;
2270 size_t rsp_buffer_size;
2271 union ibmvnic_crq crq;
2278 sizeof(struct ibmvnic_login_buffer) +
2279 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
2281 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
2283 goto buf_alloc_failed;
2285 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
2287 if (dma_mapping_error(dev, buffer_token)) {
2288 dev_err(dev, "Couldn't map login buffer\n");
2289 goto buf_map_failed;
2292 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
2293 sizeof(u64) * adapter->req_tx_queues +
2294 sizeof(u64) * adapter->req_rx_queues +
2295 sizeof(u64) * adapter->req_rx_queues +
2296 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
2298 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2299 if (!login_rsp_buffer)
2300 goto buf_rsp_alloc_failed;
2302 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2303 rsp_buffer_size, DMA_FROM_DEVICE);
2304 if (dma_mapping_error(dev, rsp_buffer_token)) {
2305 dev_err(dev, "Couldn't map login rsp buffer\n");
2306 goto buf_rsp_map_failed;
2309 adapter->login_buf = login_buffer;
2310 adapter->login_buf_token = buffer_token;
2311 adapter->login_buf_sz = buffer_size;
2312 adapter->login_rsp_buf = login_rsp_buffer;
2313 adapter->login_rsp_buf_token = rsp_buffer_token;
2314 adapter->login_rsp_buf_sz = rsp_buffer_size;
2316 login_buffer->len = cpu_to_be32(buffer_size);
2317 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2318 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2319 login_buffer->off_txcomp_subcrqs =
2320 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2321 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2322 login_buffer->off_rxcomp_subcrqs =
2323 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2324 sizeof(u64) * adapter->req_tx_queues);
2325 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2326 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2328 tx_list_p = (__be64 *)((char *)login_buffer +
2329 sizeof(struct ibmvnic_login_buffer));
2330 rx_list_p = (__be64 *)((char *)login_buffer +
2331 sizeof(struct ibmvnic_login_buffer) +
2332 sizeof(u64) * adapter->req_tx_queues);
2334 for (i = 0; i < adapter->req_tx_queues; i++) {
2335 if (adapter->tx_scrq[i]) {
2336 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2341 for (i = 0; i < adapter->req_rx_queues; i++) {
2342 if (adapter->rx_scrq[i]) {
2343 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2348 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2349 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2350 netdev_dbg(adapter->netdev, "%016lx\n",
2351 ((unsigned long int *)(adapter->login_buf))[i]);
2354 memset(&crq, 0, sizeof(crq));
2355 crq.login.first = IBMVNIC_CRQ_CMD;
2356 crq.login.cmd = LOGIN;
2357 crq.login.ioba = cpu_to_be32(buffer_token);
2358 crq.login.len = cpu_to_be32(buffer_size);
2359 ibmvnic_send_crq(adapter, &crq);
2364 kfree(login_rsp_buffer);
2365 buf_rsp_alloc_failed:
2366 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2368 kfree(login_buffer);
2373 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2376 union ibmvnic_crq crq;
2378 memset(&crq, 0, sizeof(crq));
2379 crq.request_map.first = IBMVNIC_CRQ_CMD;
2380 crq.request_map.cmd = REQUEST_MAP;
2381 crq.request_map.map_id = map_id;
2382 crq.request_map.ioba = cpu_to_be32(addr);
2383 crq.request_map.len = cpu_to_be32(len);
2384 ibmvnic_send_crq(adapter, &crq);
2387 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2389 union ibmvnic_crq crq;
2391 memset(&crq, 0, sizeof(crq));
2392 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2393 crq.request_unmap.cmd = REQUEST_UNMAP;
2394 crq.request_unmap.map_id = map_id;
2395 ibmvnic_send_crq(adapter, &crq);
2398 static void send_map_query(struct ibmvnic_adapter *adapter)
2400 union ibmvnic_crq crq;
2402 memset(&crq, 0, sizeof(crq));
2403 crq.query_map.first = IBMVNIC_CRQ_CMD;
2404 crq.query_map.cmd = QUERY_MAP;
2405 ibmvnic_send_crq(adapter, &crq);
2408 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2409 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2411 union ibmvnic_crq crq;
2413 atomic_set(&adapter->running_cap_crqs, 0);
2414 memset(&crq, 0, sizeof(crq));
2415 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2416 crq.query_capability.cmd = QUERY_CAPABILITY;
2418 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2419 atomic_inc(&adapter->running_cap_crqs);
2420 ibmvnic_send_crq(adapter, &crq);
2422 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2423 atomic_inc(&adapter->running_cap_crqs);
2424 ibmvnic_send_crq(adapter, &crq);
2426 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2427 atomic_inc(&adapter->running_cap_crqs);
2428 ibmvnic_send_crq(adapter, &crq);
2430 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2431 atomic_inc(&adapter->running_cap_crqs);
2432 ibmvnic_send_crq(adapter, &crq);
2434 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2435 atomic_inc(&adapter->running_cap_crqs);
2436 ibmvnic_send_crq(adapter, &crq);
2438 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2439 atomic_inc(&adapter->running_cap_crqs);
2440 ibmvnic_send_crq(adapter, &crq);
2442 crq.query_capability.capability =
2443 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2444 atomic_inc(&adapter->running_cap_crqs);
2445 ibmvnic_send_crq(adapter, &crq);
2447 crq.query_capability.capability =
2448 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2449 atomic_inc(&adapter->running_cap_crqs);
2450 ibmvnic_send_crq(adapter, &crq);
2452 crq.query_capability.capability =
2453 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2454 atomic_inc(&adapter->running_cap_crqs);
2455 ibmvnic_send_crq(adapter, &crq);
2457 crq.query_capability.capability =
2458 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2459 atomic_inc(&adapter->running_cap_crqs);
2460 ibmvnic_send_crq(adapter, &crq);
2462 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2463 atomic_inc(&adapter->running_cap_crqs);
2464 ibmvnic_send_crq(adapter, &crq);
2466 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2467 atomic_inc(&adapter->running_cap_crqs);
2468 ibmvnic_send_crq(adapter, &crq);
2470 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2471 atomic_inc(&adapter->running_cap_crqs);
2472 ibmvnic_send_crq(adapter, &crq);
2474 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2475 atomic_inc(&adapter->running_cap_crqs);
2476 ibmvnic_send_crq(adapter, &crq);
2478 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2479 atomic_inc(&adapter->running_cap_crqs);
2480 ibmvnic_send_crq(adapter, &crq);
2482 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2483 atomic_inc(&adapter->running_cap_crqs);
2484 ibmvnic_send_crq(adapter, &crq);
2486 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
2487 atomic_inc(&adapter->running_cap_crqs);
2488 ibmvnic_send_crq(adapter, &crq);
2490 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2491 atomic_inc(&adapter->running_cap_crqs);
2492 ibmvnic_send_crq(adapter, &crq);
2494 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2495 atomic_inc(&adapter->running_cap_crqs);
2496 ibmvnic_send_crq(adapter, &crq);
2498 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2499 atomic_inc(&adapter->running_cap_crqs);
2500 ibmvnic_send_crq(adapter, &crq);
2502 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2503 atomic_inc(&adapter->running_cap_crqs);
2504 ibmvnic_send_crq(adapter, &crq);
2506 crq.query_capability.capability =
2507 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2508 atomic_inc(&adapter->running_cap_crqs);
2509 ibmvnic_send_crq(adapter, &crq);
2511 crq.query_capability.capability =
2512 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2513 atomic_inc(&adapter->running_cap_crqs);
2514 ibmvnic_send_crq(adapter, &crq);
2516 crq.query_capability.capability =
2517 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2518 atomic_inc(&adapter->running_cap_crqs);
2519 ibmvnic_send_crq(adapter, &crq);
2521 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2522 atomic_inc(&adapter->running_cap_crqs);
2523 ibmvnic_send_crq(adapter, &crq);
2526 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2528 struct device *dev = &adapter->vdev->dev;
2529 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2530 union ibmvnic_crq crq;
2533 dma_unmap_single(dev, adapter->ip_offload_tok,
2534 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2536 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2537 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2538 netdev_dbg(adapter->netdev, "%016lx\n",
2539 ((unsigned long int *)(buf))[i]);
2541 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2542 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2543 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2544 buf->tcp_ipv4_chksum);
2545 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2546 buf->tcp_ipv6_chksum);
2547 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2548 buf->udp_ipv4_chksum);
2549 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2550 buf->udp_ipv6_chksum);
2551 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2552 buf->large_tx_ipv4);
2553 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2554 buf->large_tx_ipv6);
2555 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2556 buf->large_rx_ipv4);
2557 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2558 buf->large_rx_ipv6);
2559 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2560 buf->max_ipv4_header_size);
2561 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2562 buf->max_ipv6_header_size);
2563 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2564 buf->max_tcp_header_size);
2565 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2566 buf->max_udp_header_size);
2567 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2568 buf->max_large_tx_size);
2569 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2570 buf->max_large_rx_size);
2571 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2572 buf->ipv6_extension_header);
2573 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2574 buf->tcp_pseudosum_req);
2575 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2576 buf->num_ipv6_ext_headers);
2577 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2578 buf->off_ipv6_ext_headers);
2580 adapter->ip_offload_ctrl_tok =
2581 dma_map_single(dev, &adapter->ip_offload_ctrl,
2582 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2584 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2585 dev_err(dev, "Couldn't map ip offload control buffer\n");
2589 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2590 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2591 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2592 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2593 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2595 /* large_tx/rx disabled for now, additional features needed */
2596 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2597 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2598 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2599 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2601 adapter->netdev->features = NETIF_F_GSO;
2603 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2604 adapter->netdev->features |= NETIF_F_IP_CSUM;
2606 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2607 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2609 if ((adapter->netdev->features &
2610 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2611 adapter->netdev->features |= NETIF_F_RXCSUM;
2613 memset(&crq, 0, sizeof(crq));
2614 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2615 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2616 crq.control_ip_offload.len =
2617 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2618 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2619 ibmvnic_send_crq(adapter, &crq);
2622 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2623 struct ibmvnic_adapter *adapter)
2625 struct device *dev = &adapter->vdev->dev;
2626 struct ibmvnic_error_buff *error_buff, *tmp;
2627 unsigned long flags;
2631 if (!crq->request_error_rsp.rc.code) {
2632 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2633 crq->request_error_rsp.rc.code);
2637 spin_lock_irqsave(&adapter->error_list_lock, flags);
2638 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2639 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2641 list_del(&error_buff->list);
2644 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2647 dev_err(dev, "Couldn't find error id %x\n",
2648 be32_to_cpu(crq->request_error_rsp.error_id));
2652 dev_err(dev, "Detailed info for error id %x:",
2653 be32_to_cpu(crq->request_error_rsp.error_id));
2655 for (i = 0; i < error_buff->len; i++) {
2656 pr_cont("%02x", (int)error_buff->buff[i]);
2662 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2664 kfree(error_buff->buff);
2668 static void request_error_information(struct ibmvnic_adapter *adapter,
2669 union ibmvnic_crq *err_crq)
2671 struct device *dev = &adapter->vdev->dev;
2672 struct net_device *netdev = adapter->netdev;
2673 struct ibmvnic_error_buff *error_buff;
2674 unsigned long timeout = msecs_to_jiffies(30000);
2675 union ibmvnic_crq crq;
2676 unsigned long flags;
2679 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2683 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
2684 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2685 if (!error_buff->buff) {
2690 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2692 if (dma_mapping_error(dev, error_buff->dma)) {
2693 netdev_err(netdev, "Couldn't map error buffer\n");
2694 kfree(error_buff->buff);
2699 error_buff->len = detail_len;
2700 error_buff->error_id = err_crq->error_indication.error_id;
2702 spin_lock_irqsave(&adapter->error_list_lock, flags);
2703 list_add_tail(&error_buff->list, &adapter->errors);
2704 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2706 memset(&crq, 0, sizeof(crq));
2707 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2708 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2709 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2710 crq.request_error_info.len = cpu_to_be32(detail_len);
2711 crq.request_error_info.error_id = err_crq->error_indication.error_id;
2713 rc = ibmvnic_send_crq(adapter, &crq);
2715 netdev_err(netdev, "failed to request error information\n");
2719 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2720 netdev_err(netdev, "timeout waiting for error information\n");
2727 spin_lock_irqsave(&adapter->error_list_lock, flags);
2728 list_del(&error_buff->list);
2729 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2731 kfree(error_buff->buff);
2735 static void handle_error_indication(union ibmvnic_crq *crq,
2736 struct ibmvnic_adapter *adapter)
2738 struct device *dev = &adapter->vdev->dev;
2740 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2741 crq->error_indication.flags
2742 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2743 be32_to_cpu(crq->error_indication.error_id),
2744 be16_to_cpu(crq->error_indication.error_cause));
2746 if (be32_to_cpu(crq->error_indication.error_id))
2747 request_error_information(adapter, crq);
2749 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
2750 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
2753 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2754 struct ibmvnic_adapter *adapter)
2756 struct net_device *netdev = adapter->netdev;
2757 struct device *dev = &adapter->vdev->dev;
2760 rc = crq->change_mac_addr_rsp.rc.code;
2762 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2765 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2769 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2770 struct ibmvnic_adapter *adapter)
2772 struct device *dev = &adapter->vdev->dev;
2776 atomic_dec(&adapter->running_cap_crqs);
2777 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2779 req_value = &adapter->req_tx_queues;
2783 req_value = &adapter->req_rx_queues;
2786 case REQ_RX_ADD_QUEUES:
2787 req_value = &adapter->req_rx_add_queues;
2790 case REQ_TX_ENTRIES_PER_SUBCRQ:
2791 req_value = &adapter->req_tx_entries_per_subcrq;
2792 name = "tx_entries_per_subcrq";
2794 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2795 req_value = &adapter->req_rx_add_entries_per_subcrq;
2796 name = "rx_add_entries_per_subcrq";
2799 req_value = &adapter->req_mtu;
2802 case PROMISC_REQUESTED:
2803 req_value = &adapter->promisc;
2807 dev_err(dev, "Got invalid cap request rsp %d\n",
2808 crq->request_capability.capability);
2812 switch (crq->request_capability_rsp.rc.code) {
2815 case PARTIALSUCCESS:
2816 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2818 (long int)be64_to_cpu(crq->request_capability_rsp.
2820 release_sub_crqs(adapter);
2821 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2822 ibmvnic_send_req_caps(adapter, 1);
2825 dev_err(dev, "Error %d in request cap rsp\n",
2826 crq->request_capability_rsp.rc.code);
2830 /* Done receiving requested capabilities, query IP offload support */
2831 if (atomic_read(&adapter->running_cap_crqs) == 0) {
2832 union ibmvnic_crq newcrq;
2833 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2834 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2835 &adapter->ip_offload_buf;
2837 adapter->wait_capability = false;
2838 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2842 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2843 if (!firmware_has_feature(FW_FEATURE_CMO))
2844 dev_err(dev, "Couldn't map offload buffer\n");
2848 memset(&newcrq, 0, sizeof(newcrq));
2849 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2850 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2851 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2852 newcrq.query_ip_offload.ioba =
2853 cpu_to_be32(adapter->ip_offload_tok);
2855 ibmvnic_send_crq(adapter, &newcrq);
2859 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2860 struct ibmvnic_adapter *adapter)
2862 struct device *dev = &adapter->vdev->dev;
2863 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2864 struct ibmvnic_login_buffer *login = adapter->login_buf;
2867 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2869 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2870 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2872 /* If the number of queues requested can't be allocated by the
2873 * server, the login response will return with code 1. We will need
2874 * to resend the login buffer with fewer queues requested.
2876 if (login_rsp_crq->generic.rc.code) {
2877 adapter->renegotiate = true;
2878 complete(&adapter->init_done);
2882 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2883 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2884 netdev_dbg(adapter->netdev, "%016lx\n",
2885 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2889 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2890 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2891 adapter->req_rx_add_queues !=
2892 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2893 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2894 ibmvnic_remove(adapter->vdev);
2897 complete(&adapter->init_done);
2902 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2903 struct ibmvnic_adapter *adapter)
2905 struct device *dev = &adapter->vdev->dev;
2906 u8 map_id = crq->request_map_rsp.map_id;
2912 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2913 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2915 rc = crq->request_map_rsp.rc.code;
2917 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2919 /* need to find and zero tx/rx_pool map_id */
2920 for (i = 0; i < tx_subcrqs; i++) {
2921 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2922 adapter->tx_pool[i].long_term_buff.map_id = 0;
2924 for (i = 0; i < rx_subcrqs; i++) {
2925 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2926 adapter->rx_pool[i].long_term_buff.map_id = 0;
2929 complete(&adapter->fw_done);
2932 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2933 struct ibmvnic_adapter *adapter)
2935 struct device *dev = &adapter->vdev->dev;
2938 rc = crq->request_unmap_rsp.rc.code;
2940 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2943 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2944 struct ibmvnic_adapter *adapter)
2946 struct net_device *netdev = adapter->netdev;
2947 struct device *dev = &adapter->vdev->dev;
2950 rc = crq->query_map_rsp.rc.code;
2952 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2955 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2956 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2957 crq->query_map_rsp.free_pages);
2960 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2961 struct ibmvnic_adapter *adapter)
2963 struct net_device *netdev = adapter->netdev;
2964 struct device *dev = &adapter->vdev->dev;
2967 atomic_dec(&adapter->running_cap_crqs);
2968 netdev_dbg(netdev, "Outstanding queries: %d\n",
2969 atomic_read(&adapter->running_cap_crqs));
2970 rc = crq->query_capability.rc.code;
2972 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2976 switch (be16_to_cpu(crq->query_capability.capability)) {
2978 adapter->min_tx_queues =
2979 be64_to_cpu(crq->query_capability.number);
2980 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2981 adapter->min_tx_queues);
2984 adapter->min_rx_queues =
2985 be64_to_cpu(crq->query_capability.number);
2986 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2987 adapter->min_rx_queues);
2989 case MIN_RX_ADD_QUEUES:
2990 adapter->min_rx_add_queues =
2991 be64_to_cpu(crq->query_capability.number);
2992 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2993 adapter->min_rx_add_queues);
2996 adapter->max_tx_queues =
2997 be64_to_cpu(crq->query_capability.number);
2998 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2999 adapter->max_tx_queues);
3002 adapter->max_rx_queues =
3003 be64_to_cpu(crq->query_capability.number);
3004 netdev_dbg(netdev, "max_rx_queues = %lld\n",
3005 adapter->max_rx_queues);
3007 case MAX_RX_ADD_QUEUES:
3008 adapter->max_rx_add_queues =
3009 be64_to_cpu(crq->query_capability.number);
3010 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3011 adapter->max_rx_add_queues);
3013 case MIN_TX_ENTRIES_PER_SUBCRQ:
3014 adapter->min_tx_entries_per_subcrq =
3015 be64_to_cpu(crq->query_capability.number);
3016 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3017 adapter->min_tx_entries_per_subcrq);
3019 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3020 adapter->min_rx_add_entries_per_subcrq =
3021 be64_to_cpu(crq->query_capability.number);
3022 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3023 adapter->min_rx_add_entries_per_subcrq);
3025 case MAX_TX_ENTRIES_PER_SUBCRQ:
3026 adapter->max_tx_entries_per_subcrq =
3027 be64_to_cpu(crq->query_capability.number);
3028 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3029 adapter->max_tx_entries_per_subcrq);
3031 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3032 adapter->max_rx_add_entries_per_subcrq =
3033 be64_to_cpu(crq->query_capability.number);
3034 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3035 adapter->max_rx_add_entries_per_subcrq);
3037 case TCP_IP_OFFLOAD:
3038 adapter->tcp_ip_offload =
3039 be64_to_cpu(crq->query_capability.number);
3040 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3041 adapter->tcp_ip_offload);
3043 case PROMISC_SUPPORTED:
3044 adapter->promisc_supported =
3045 be64_to_cpu(crq->query_capability.number);
3046 netdev_dbg(netdev, "promisc_supported = %lld\n",
3047 adapter->promisc_supported);
3050 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
3051 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
3052 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3055 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
3056 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
3057 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3059 case MAX_MULTICAST_FILTERS:
3060 adapter->max_multicast_filters =
3061 be64_to_cpu(crq->query_capability.number);
3062 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3063 adapter->max_multicast_filters);
3065 case VLAN_HEADER_INSERTION:
3066 adapter->vlan_header_insertion =
3067 be64_to_cpu(crq->query_capability.number);
3068 if (adapter->vlan_header_insertion)
3069 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3070 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3071 adapter->vlan_header_insertion);
3073 case RX_VLAN_HEADER_INSERTION:
3074 adapter->rx_vlan_header_insertion =
3075 be64_to_cpu(crq->query_capability.number);
3076 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3077 adapter->rx_vlan_header_insertion);
3079 case MAX_TX_SG_ENTRIES:
3080 adapter->max_tx_sg_entries =
3081 be64_to_cpu(crq->query_capability.number);
3082 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3083 adapter->max_tx_sg_entries);
3085 case RX_SG_SUPPORTED:
3086 adapter->rx_sg_supported =
3087 be64_to_cpu(crq->query_capability.number);
3088 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3089 adapter->rx_sg_supported);
3091 case OPT_TX_COMP_SUB_QUEUES:
3092 adapter->opt_tx_comp_sub_queues =
3093 be64_to_cpu(crq->query_capability.number);
3094 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3095 adapter->opt_tx_comp_sub_queues);
3097 case OPT_RX_COMP_QUEUES:
3098 adapter->opt_rx_comp_queues =
3099 be64_to_cpu(crq->query_capability.number);
3100 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
3101 adapter->opt_rx_comp_queues);
3103 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
3104 adapter->opt_rx_bufadd_q_per_rx_comp_q =
3105 be64_to_cpu(crq->query_capability.number);
3106 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3107 adapter->opt_rx_bufadd_q_per_rx_comp_q);
3109 case OPT_TX_ENTRIES_PER_SUBCRQ:
3110 adapter->opt_tx_entries_per_subcrq =
3111 be64_to_cpu(crq->query_capability.number);
3112 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
3113 adapter->opt_tx_entries_per_subcrq);
3115 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
3116 adapter->opt_rxba_entries_per_subcrq =
3117 be64_to_cpu(crq->query_capability.number);
3118 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
3119 adapter->opt_rxba_entries_per_subcrq);
3121 case TX_RX_DESC_REQ:
3122 adapter->tx_rx_desc_req = crq->query_capability.number;
3123 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
3124 adapter->tx_rx_desc_req);
3128 netdev_err(netdev, "Got invalid cap rsp %d\n",
3129 crq->query_capability.capability);
3133 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3134 adapter->wait_capability = false;
3135 ibmvnic_send_req_caps(adapter, 0);
3139 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3140 struct ibmvnic_adapter *adapter)
3142 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3143 struct net_device *netdev = adapter->netdev;
3144 struct device *dev = &adapter->vdev->dev;
3145 u64 *u64_crq = (u64 *)crq;
3148 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3149 (unsigned long int)cpu_to_be64(u64_crq[0]),
3150 (unsigned long int)cpu_to_be64(u64_crq[1]));
3151 switch (gen_crq->first) {
3152 case IBMVNIC_CRQ_INIT_RSP:
3153 switch (gen_crq->cmd) {
3154 case IBMVNIC_CRQ_INIT:
3155 dev_info(dev, "Partner initialized\n");
3157 case IBMVNIC_CRQ_INIT_COMPLETE:
3158 dev_info(dev, "Partner initialization complete\n");
3159 send_version_xchg(adapter);
3162 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3165 case IBMVNIC_CRQ_XPORT_EVENT:
3166 netif_carrier_off(netdev);
3167 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3168 dev_info(dev, "Migrated, re-enabling adapter\n");
3169 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
3170 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3171 dev_info(dev, "Backing device failover detected\n");
3172 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
3174 /* The adapter lost the connection */
3175 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3177 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3180 case IBMVNIC_CRQ_CMD_RSP:
3183 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3188 switch (gen_crq->cmd) {
3189 case VERSION_EXCHANGE_RSP:
3190 rc = crq->version_exchange_rsp.rc.code;
3192 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3195 dev_info(dev, "Partner protocol version is %d\n",
3196 crq->version_exchange_rsp.version);
3197 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3200 be16_to_cpu(crq->version_exchange_rsp.version);
3201 send_cap_queries(adapter);
3203 case QUERY_CAPABILITY_RSP:
3204 handle_query_cap_rsp(crq, adapter);
3207 handle_query_map_rsp(crq, adapter);
3209 case REQUEST_MAP_RSP:
3210 handle_request_map_rsp(crq, adapter);
3212 case REQUEST_UNMAP_RSP:
3213 handle_request_unmap_rsp(crq, adapter);
3215 case REQUEST_CAPABILITY_RSP:
3216 handle_request_cap_rsp(crq, adapter);
3219 netdev_dbg(netdev, "Got Login Response\n");
3220 handle_login_rsp(crq, adapter);
3222 case LOGICAL_LINK_STATE_RSP:
3224 "Got Logical Link State Response, state: %d rc: %d\n",
3225 crq->logical_link_state_rsp.link_state,
3226 crq->logical_link_state_rsp.rc.code);
3227 adapter->logical_link_state =
3228 crq->logical_link_state_rsp.link_state;
3229 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
3230 complete(&adapter->init_done);
3232 case LINK_STATE_INDICATION:
3233 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3234 adapter->phys_link_state =
3235 crq->link_state_indication.phys_link_state;
3236 adapter->logical_link_state =
3237 crq->link_state_indication.logical_link_state;
3239 case CHANGE_MAC_ADDR_RSP:
3240 netdev_dbg(netdev, "Got MAC address change Response\n");
3241 handle_change_mac_rsp(crq, adapter);
3243 case ERROR_INDICATION:
3244 netdev_dbg(netdev, "Got Error Indication\n");
3245 handle_error_indication(crq, adapter);
3247 case REQUEST_ERROR_RSP:
3248 netdev_dbg(netdev, "Got Error Detail Response\n");
3249 handle_error_info_rsp(crq, adapter);
3251 case REQUEST_STATISTICS_RSP:
3252 netdev_dbg(netdev, "Got Statistics Response\n");
3253 complete(&adapter->stats_done);
3255 case QUERY_IP_OFFLOAD_RSP:
3256 netdev_dbg(netdev, "Got Query IP offload Response\n");
3257 handle_query_ip_offload_rsp(adapter);
3259 case MULTICAST_CTRL_RSP:
3260 netdev_dbg(netdev, "Got multicast control Response\n");
3262 case CONTROL_IP_OFFLOAD_RSP:
3263 netdev_dbg(netdev, "Got Control IP offload Response\n");
3264 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3265 sizeof(adapter->ip_offload_ctrl),
3267 complete(&adapter->init_done);
3269 case COLLECT_FW_TRACE_RSP:
3270 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3271 complete(&adapter->fw_done);
3274 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3279 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3281 struct ibmvnic_adapter *adapter = instance;
3283 tasklet_schedule(&adapter->tasklet);
3287 static void ibmvnic_tasklet(void *data)
3289 struct ibmvnic_adapter *adapter = data;
3290 struct ibmvnic_crq_queue *queue = &adapter->crq;
3291 union ibmvnic_crq *crq;
3292 unsigned long flags;
3295 spin_lock_irqsave(&queue->lock, flags);
3297 /* Pull all the valid messages off the CRQ */
3298 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3299 ibmvnic_handle_crq(crq, adapter);
3300 crq->generic.first = 0;
3303 /* remain in tasklet until all
3304 * capabilities responses are received
3306 if (!adapter->wait_capability)
3309 /* if capabilities CRQ's were sent in this tasklet, the following
3310 * tasklet must wait until all responses are received
3312 if (atomic_read(&adapter->running_cap_crqs) != 0)
3313 adapter->wait_capability = true;
3314 spin_unlock_irqrestore(&queue->lock, flags);
3317 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3319 struct vio_dev *vdev = adapter->vdev;
3323 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3324 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3327 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3332 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3334 struct ibmvnic_crq_queue *crq = &adapter->crq;
3335 struct device *dev = &adapter->vdev->dev;
3336 struct vio_dev *vdev = adapter->vdev;
3341 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3342 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3344 /* Clean out the queue */
3345 memset(crq->msgs, 0, PAGE_SIZE);
3348 /* And re-open it again */
3349 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3350 crq->msg_token, PAGE_SIZE);
3353 /* Adapter is good, but other end is not ready */
3354 dev_warn(dev, "Partner adapter not ready\n");
3356 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3361 static void release_crq_queue(struct ibmvnic_adapter *adapter)
3363 struct ibmvnic_crq_queue *crq = &adapter->crq;
3364 struct vio_dev *vdev = adapter->vdev;
3370 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3371 free_irq(vdev->irq, adapter);
3372 tasklet_kill(&adapter->tasklet);
3374 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3375 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3377 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3379 free_page((unsigned long)crq->msgs);
3383 static int init_crq_queue(struct ibmvnic_adapter *adapter)
3385 struct ibmvnic_crq_queue *crq = &adapter->crq;
3386 struct device *dev = &adapter->vdev->dev;
3387 struct vio_dev *vdev = adapter->vdev;
3388 int rc, retrc = -ENOMEM;
3393 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3394 /* Should we allocate more than one page? */
3399 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3400 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3402 if (dma_mapping_error(dev, crq->msg_token))
3405 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3406 crq->msg_token, PAGE_SIZE);
3408 if (rc == H_RESOURCE)
3409 /* maybe kexecing and resource is busy. try a reset */
3410 rc = ibmvnic_reset_crq(adapter);
3413 if (rc == H_CLOSED) {
3414 dev_warn(dev, "Partner adapter not ready\n");
3416 dev_warn(dev, "Error %d opening adapter\n", rc);
3417 goto reg_crq_failed;
3422 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3423 (unsigned long)adapter);
3425 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3426 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3429 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3431 goto req_irq_failed;
3434 rc = vio_enable_interrupts(vdev);
3436 dev_err(dev, "Error %d enabling interrupts\n", rc);
3437 goto req_irq_failed;
3441 spin_lock_init(&crq->lock);
3446 tasklet_kill(&adapter->tasklet);
3448 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3449 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3451 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3453 free_page((unsigned long)crq->msgs);
3458 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3460 struct device *dev = &adapter->vdev->dev;
3461 unsigned long timeout = msecs_to_jiffies(30000);
3464 rc = init_crq_queue(adapter);
3466 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3470 init_completion(&adapter->init_done);
3471 ibmvnic_send_crq_init(adapter);
3472 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3473 dev_err(dev, "Initialization sequence timed out\n");
3474 release_crq_queue(adapter);
3478 rc = init_sub_crqs(adapter);
3480 dev_err(dev, "Initialization of sub crqs failed\n");
3481 release_crq_queue(adapter);
3487 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3489 struct ibmvnic_adapter *adapter;
3490 struct net_device *netdev;
3491 unsigned char *mac_addr_p;
3494 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3497 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3498 VETH_MAC_ADDR, NULL);
3501 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3502 __FILE__, __LINE__);
3506 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3507 IBMVNIC_MAX_TX_QUEUES);
3511 adapter = netdev_priv(netdev);
3512 adapter->state = VNIC_PROBING;
3513 dev_set_drvdata(&dev->dev, netdev);
3514 adapter->vdev = dev;
3515 adapter->netdev = netdev;
3517 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3518 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3519 netdev->irq = dev->irq;
3520 netdev->netdev_ops = &ibmvnic_netdev_ops;
3521 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3522 SET_NETDEV_DEV(netdev, &dev->dev);
3524 spin_lock_init(&adapter->stats_lock);
3526 INIT_LIST_HEAD(&adapter->errors);
3527 spin_lock_init(&adapter->error_list_lock);
3529 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
3530 INIT_LIST_HEAD(&adapter->rwi_list);
3531 mutex_init(&adapter->reset_lock);
3532 mutex_init(&adapter->rwi_lock);
3533 adapter->resetting = false;
3535 rc = ibmvnic_init(adapter);
3537 free_netdev(netdev);
3541 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3543 rc = register_netdev(netdev);
3545 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3546 free_netdev(netdev);
3549 dev_info(&dev->dev, "ibmvnic registered\n");
3551 adapter->state = VNIC_PROBED;
3555 static int ibmvnic_remove(struct vio_dev *dev)
3557 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3558 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3560 adapter->state = VNIC_REMOVING;
3561 unregister_netdev(netdev);
3562 mutex_lock(&adapter->reset_lock);
3564 release_resources(adapter);
3565 release_sub_crqs(adapter);
3566 release_crq_queue(adapter);
3568 adapter->state = VNIC_REMOVED;
3570 mutex_unlock(&adapter->reset_lock);
3571 free_netdev(netdev);
3572 dev_set_drvdata(&dev->dev, NULL);
3577 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3579 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3580 struct ibmvnic_adapter *adapter;
3581 struct iommu_table *tbl;
3582 unsigned long ret = 0;
3585 tbl = get_iommu_table_base(&vdev->dev);
3587 /* netdev inits at probe time along with the structures we need below*/
3589 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3591 adapter = netdev_priv(netdev);
3593 ret += PAGE_SIZE; /* the crq message queue */
3594 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3596 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3597 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3599 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3601 ret += adapter->rx_pool[i].size *
3602 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3607 static int ibmvnic_resume(struct device *dev)
3609 struct net_device *netdev = dev_get_drvdata(dev);
3610 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3613 /* kick the interrupt handlers just in case we lost an interrupt */
3614 for (i = 0; i < adapter->req_rx_queues; i++)
3615 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3616 adapter->rx_scrq[i]);
3621 static struct vio_device_id ibmvnic_device_table[] = {
3622 {"network", "IBM,vnic"},
3625 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3627 static const struct dev_pm_ops ibmvnic_pm_ops = {
3628 .resume = ibmvnic_resume
3631 static struct vio_driver ibmvnic_driver = {
3632 .id_table = ibmvnic_device_table,
3633 .probe = ibmvnic_probe,
3634 .remove = ibmvnic_remove,
3635 .get_desired_dma = ibmvnic_get_desired_dma,
3636 .name = ibmvnic_driver_name,
3637 .pm = &ibmvnic_pm_ops,
3640 /* module functions */
3641 static int __init ibmvnic_module_init(void)
3643 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3644 IBMVNIC_DRIVER_VERSION);
3646 return vio_register_driver(&ibmvnic_driver);
3649 static void __exit ibmvnic_module_exit(void)
3651 vio_unregister_driver(&ibmvnic_driver);
3654 module_init(ibmvnic_module_init);
3655 module_exit(ibmvnic_module_exit);