]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/ibm/ibmvnic.c
ibmvnic: Move resource initialization to its own routine
[karo-tx-linux.git] / drivers / net / ethernet / ibm / ibmvnic.c
1 /**************************************************************************/
2 /*                                                                        */
3 /*  IBM System i and System p Virtual NIC Device Driver                   */
4 /*  Copyright (C) 2014 IBM Corp.                                          */
5 /*  Santiago Leon (santi_leon@yahoo.com)                                  */
6 /*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
7 /*  John Allen (jallen@linux.vnet.ibm.com)                                */
8 /*                                                                        */
9 /*  This program is free software; you can redistribute it and/or modify  */
10 /*  it under the terms of the GNU General Public License as published by  */
11 /*  the Free Software Foundation; either version 2 of the License, or     */
12 /*  (at your option) any later version.                                   */
13 /*                                                                        */
14 /*  This program is distributed in the hope that it will be useful,       */
15 /*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
16 /*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
17 /*  GNU General Public License for more details.                          */
18 /*                                                                        */
19 /*  You should have received a copy of the GNU General Public License     */
20 /*  along with this program.                                              */
21 /*                                                                        */
22 /* This module contains the implementation of a virtual ethernet device   */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN    */
24 /* option of the RS/6000 Platform Architecture to interface with virtual  */
25 /* ethernet NICs that are presented to the partition by the hypervisor.   */
26 /*                                                                         */
27 /* Messages are passed between the VNIC driver and the VNIC server using  */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to  */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but    */
31 /* are used by the driver to notify the server that a packet is           */
32 /* ready for transmission or that a buffer has been added to receive a    */
33 /* packet. Subsequently, sCRQs are used by the server to notify the       */
34 /* driver that a packet transmission has been completed or that a packet  */
35 /* has been received and placed in a waiting buffer.                      */
36 /*                                                                        */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in    */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit   */
39 /* or receive has been completed, the VNIC driver is required to use      */
40 /* "long term mapping". This entails that large, continuous DMA mapped    */
41 /* buffers are allocated on driver initialization and these buffers are   */
42 /* then continuously reused to pass skbs to and from the VNIC server.     */
43 /*                                                                        */
44 /**************************************************************************/
45
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/mm.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
62 #include <linux/in.h>
63 #include <linux/ip.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/interrupt.h>
69 #include <net/net_namespace.h>
70 #include <asm/hvcall.h>
71 #include <linux/atomic.h>
72 #include <asm/vio.h>
73 #include <asm/iommu.h>
74 #include <linux/uaccess.h>
75 #include <asm/firmware.h>
76 #include <linux/workqueue.h>
77 #include <linux/if_vlan.h>
78
79 #include "ibmvnic.h"
80
81 static const char ibmvnic_driver_name[] = "ibmvnic";
82 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86 MODULE_LICENSE("GPL");
87 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90 static int ibmvnic_remove(struct vio_dev *);
91 static void release_sub_crqs(struct ibmvnic_adapter *);
92 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97                        union sub_crq *sub_crq);
98 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
99 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100 static int enable_scrq_irq(struct ibmvnic_adapter *,
101                            struct ibmvnic_sub_crq_queue *);
102 static int disable_scrq_irq(struct ibmvnic_adapter *,
103                             struct ibmvnic_sub_crq_queue *);
104 static int pending_scrq(struct ibmvnic_adapter *,
105                         struct ibmvnic_sub_crq_queue *);
106 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107                                         struct ibmvnic_sub_crq_queue *);
108 static int ibmvnic_poll(struct napi_struct *napi, int data);
109 static void send_map_query(struct ibmvnic_adapter *adapter);
110 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111 static void send_request_unmap(struct ibmvnic_adapter *, u8);
112 static void send_login(struct ibmvnic_adapter *adapter);
113 static void send_cap_queries(struct ibmvnic_adapter *adapter);
114 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
115 static int ibmvnic_init(struct ibmvnic_adapter *);
116 static void release_crq_queue(struct ibmvnic_adapter *);
117
118 struct ibmvnic_stat {
119         char name[ETH_GSTRING_LEN];
120         int offset;
121 };
122
123 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124                              offsetof(struct ibmvnic_statistics, stat))
125 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
126
127 static const struct ibmvnic_stat ibmvnic_stats[] = {
128         {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
129         {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
130         {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
131         {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
132         {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
133         {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
134         {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
135         {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
136         {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
137         {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
138         {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
139         {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
140         {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
141         {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
142         {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
143         {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
144         {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
145         {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
146         {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
147         {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
148         {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
149         {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
150 };
151
152 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
153                           unsigned long length, unsigned long *number,
154                           unsigned long *irq)
155 {
156         unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
157         long rc;
158
159         rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
160         *number = retbuf[0];
161         *irq = retbuf[1];
162
163         return rc;
164 }
165
166 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
167                                 struct ibmvnic_long_term_buff *ltb, int size)
168 {
169         struct device *dev = &adapter->vdev->dev;
170
171         ltb->size = size;
172         ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
173                                        GFP_KERNEL);
174
175         if (!ltb->buff) {
176                 dev_err(dev, "Couldn't alloc long term buffer\n");
177                 return -ENOMEM;
178         }
179         ltb->map_id = adapter->map_id;
180         adapter->map_id++;
181
182         init_completion(&adapter->fw_done);
183         send_request_map(adapter, ltb->addr,
184                          ltb->size, ltb->map_id);
185         wait_for_completion(&adapter->fw_done);
186         return 0;
187 }
188
189 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
190                                 struct ibmvnic_long_term_buff *ltb)
191 {
192         struct device *dev = &adapter->vdev->dev;
193
194         if (!ltb->buff)
195                 return;
196
197         if (!adapter->failover)
198                 send_request_unmap(adapter, ltb->map_id);
199         dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
200 }
201
202 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
203                               struct ibmvnic_rx_pool *pool)
204 {
205         int count = pool->size - atomic_read(&pool->available);
206         struct device *dev = &adapter->vdev->dev;
207         int buffers_added = 0;
208         unsigned long lpar_rc;
209         union sub_crq sub_crq;
210         struct sk_buff *skb;
211         unsigned int offset;
212         dma_addr_t dma_addr;
213         unsigned char *dst;
214         u64 *handle_array;
215         int shift = 0;
216         int index;
217         int i;
218
219         handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
220                                       be32_to_cpu(adapter->login_rsp_buf->
221                                       off_rxadd_subcrqs));
222
223         for (i = 0; i < count; ++i) {
224                 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
225                 if (!skb) {
226                         dev_err(dev, "Couldn't replenish rx buff\n");
227                         adapter->replenish_no_mem++;
228                         break;
229                 }
230
231                 index = pool->free_map[pool->next_free];
232
233                 if (pool->rx_buff[index].skb)
234                         dev_err(dev, "Inconsistent free_map!\n");
235
236                 /* Copy the skb to the long term mapped DMA buffer */
237                 offset = index * pool->buff_size;
238                 dst = pool->long_term_buff.buff + offset;
239                 memset(dst, 0, pool->buff_size);
240                 dma_addr = pool->long_term_buff.addr + offset;
241                 pool->rx_buff[index].data = dst;
242
243                 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
244                 pool->rx_buff[index].dma = dma_addr;
245                 pool->rx_buff[index].skb = skb;
246                 pool->rx_buff[index].pool_index = pool->index;
247                 pool->rx_buff[index].size = pool->buff_size;
248
249                 memset(&sub_crq, 0, sizeof(sub_crq));
250                 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
251                 sub_crq.rx_add.correlator =
252                     cpu_to_be64((u64)&pool->rx_buff[index]);
253                 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
254                 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
255
256                 /* The length field of the sCRQ is defined to be 24 bits so the
257                  * buffer size needs to be left shifted by a byte before it is
258                  * converted to big endian to prevent the last byte from being
259                  * truncated.
260                  */
261 #ifdef __LITTLE_ENDIAN__
262                 shift = 8;
263 #endif
264                 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
265
266                 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
267                                       &sub_crq);
268                 if (lpar_rc != H_SUCCESS)
269                         goto failure;
270
271                 buffers_added++;
272                 adapter->replenish_add_buff_success++;
273                 pool->next_free = (pool->next_free + 1) % pool->size;
274         }
275         atomic_add(buffers_added, &pool->available);
276         return;
277
278 failure:
279         dev_info(dev, "replenish pools failure\n");
280         pool->free_map[pool->next_free] = index;
281         pool->rx_buff[index].skb = NULL;
282         if (!dma_mapping_error(dev, dma_addr))
283                 dma_unmap_single(dev, dma_addr, pool->buff_size,
284                                  DMA_FROM_DEVICE);
285
286         dev_kfree_skb_any(skb);
287         adapter->replenish_add_buff_failure++;
288         atomic_add(buffers_added, &pool->available);
289 }
290
291 static void replenish_pools(struct ibmvnic_adapter *adapter)
292 {
293         int i;
294
295         if (adapter->migrated)
296                 return;
297
298         adapter->replenish_task_cycles++;
299         for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
300              i++) {
301                 if (adapter->rx_pool[i].active)
302                         replenish_rx_pool(adapter, &adapter->rx_pool[i]);
303         }
304 }
305
306 static void release_stats_token(struct ibmvnic_adapter *adapter)
307 {
308         struct device *dev = &adapter->vdev->dev;
309
310         if (!adapter->stats_token)
311                 return;
312
313         dma_unmap_single(dev, adapter->stats_token,
314                          sizeof(struct ibmvnic_statistics),
315                          DMA_FROM_DEVICE);
316         adapter->stats_token = 0;
317 }
318
319 static int init_stats_token(struct ibmvnic_adapter *adapter)
320 {
321         struct device *dev = &adapter->vdev->dev;
322         dma_addr_t stok;
323
324         stok = dma_map_single(dev, &adapter->stats,
325                               sizeof(struct ibmvnic_statistics),
326                               DMA_FROM_DEVICE);
327         if (dma_mapping_error(dev, stok)) {
328                 dev_err(dev, "Couldn't map stats buffer\n");
329                 return -1;
330         }
331
332         adapter->stats_token = stok;
333         return 0;
334 }
335
336 static void release_rx_pools(struct ibmvnic_adapter *adapter)
337 {
338         struct ibmvnic_rx_pool *rx_pool;
339         int rx_scrqs;
340         int i, j;
341
342         if (!adapter->rx_pool)
343                 return;
344
345         rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
346         for (i = 0; i < rx_scrqs; i++) {
347                 rx_pool = &adapter->rx_pool[i];
348
349                 kfree(rx_pool->free_map);
350                 free_long_term_buff(adapter, &rx_pool->long_term_buff);
351
352                 if (!rx_pool->rx_buff)
353                 continue;
354
355                 for (j = 0; j < rx_pool->size; j++) {
356                         if (rx_pool->rx_buff[j].skb) {
357                                 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
358                                 rx_pool->rx_buff[i].skb = NULL;
359                         }
360                 }
361
362                 kfree(rx_pool->rx_buff);
363         }
364
365         kfree(adapter->rx_pool);
366         adapter->rx_pool = NULL;
367 }
368
369 static int init_rx_pools(struct net_device *netdev)
370 {
371         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
372         struct device *dev = &adapter->vdev->dev;
373         struct ibmvnic_rx_pool *rx_pool;
374         int rxadd_subcrqs;
375         u64 *size_array;
376         int i, j;
377
378         rxadd_subcrqs =
379                 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
380         size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
381                 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
382
383         adapter->rx_pool = kcalloc(rxadd_subcrqs,
384                                    sizeof(struct ibmvnic_rx_pool),
385                                    GFP_KERNEL);
386         if (!adapter->rx_pool) {
387                 dev_err(dev, "Failed to allocate rx pools\n");
388                 return -1;
389         }
390
391         for (i = 0; i < rxadd_subcrqs; i++) {
392                 rx_pool = &adapter->rx_pool[i];
393
394                 netdev_dbg(adapter->netdev,
395                            "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
396                            i, adapter->req_rx_add_entries_per_subcrq,
397                            be64_to_cpu(size_array[i]));
398
399                 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
400                 rx_pool->index = i;
401                 rx_pool->buff_size = be64_to_cpu(size_array[i]);
402                 rx_pool->active = 1;
403
404                 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
405                                             GFP_KERNEL);
406                 if (!rx_pool->free_map) {
407                         release_rx_pools(adapter);
408                         return -1;
409                 }
410
411                 rx_pool->rx_buff = kcalloc(rx_pool->size,
412                                            sizeof(struct ibmvnic_rx_buff),
413                                            GFP_KERNEL);
414                 if (!rx_pool->rx_buff) {
415                         dev_err(dev, "Couldn't alloc rx buffers\n");
416                         release_rx_pools(adapter);
417                         return -1;
418                 }
419
420                 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
421                                          rx_pool->size * rx_pool->buff_size)) {
422                         release_rx_pools(adapter);
423                         return -1;
424                 }
425
426                 for (j = 0; j < rx_pool->size; ++j)
427                         rx_pool->free_map[j] = j;
428
429                 atomic_set(&rx_pool->available, 0);
430                 rx_pool->next_alloc = 0;
431                 rx_pool->next_free = 0;
432         }
433
434         return 0;
435 }
436
437 static void release_tx_pools(struct ibmvnic_adapter *adapter)
438 {
439         struct ibmvnic_tx_pool *tx_pool;
440         int i, tx_scrqs;
441
442         if (!adapter->tx_pool)
443                 return;
444
445         tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
446         for (i = 0; i < tx_scrqs; i++) {
447                 tx_pool = &adapter->tx_pool[i];
448                 kfree(tx_pool->tx_buff);
449                 free_long_term_buff(adapter, &tx_pool->long_term_buff);
450                 kfree(tx_pool->free_map);
451         }
452
453         kfree(adapter->tx_pool);
454         adapter->tx_pool = NULL;
455 }
456
457 static int init_tx_pools(struct net_device *netdev)
458 {
459         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
460         struct device *dev = &adapter->vdev->dev;
461         struct ibmvnic_tx_pool *tx_pool;
462         int tx_subcrqs;
463         int i, j;
464
465         tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
466         adapter->tx_pool = kcalloc(tx_subcrqs,
467                                    sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
468         if (!adapter->tx_pool)
469                 return -1;
470
471         for (i = 0; i < tx_subcrqs; i++) {
472                 tx_pool = &adapter->tx_pool[i];
473                 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
474                                            sizeof(struct ibmvnic_tx_buff),
475                                            GFP_KERNEL);
476                 if (!tx_pool->tx_buff) {
477                         dev_err(dev, "tx pool buffer allocation failed\n");
478                         release_tx_pools(adapter);
479                         return -1;
480                 }
481
482                 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
483                                          adapter->req_tx_entries_per_subcrq *
484                                          adapter->req_mtu)) {
485                         release_tx_pools(adapter);
486                         return -1;
487                 }
488
489                 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
490                                             sizeof(int), GFP_KERNEL);
491                 if (!tx_pool->free_map) {
492                         release_tx_pools(adapter);
493                         return -1;
494                 }
495
496                 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
497                         tx_pool->free_map[j] = j;
498
499                 tx_pool->consumer_index = 0;
500                 tx_pool->producer_index = 0;
501         }
502
503         return 0;
504 }
505
506 static void release_error_buffers(struct ibmvnic_adapter *adapter)
507 {
508         struct device *dev = &adapter->vdev->dev;
509         struct ibmvnic_error_buff *error_buff, *tmp;
510         unsigned long flags;
511
512         spin_lock_irqsave(&adapter->error_list_lock, flags);
513         list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
514                 list_del(&error_buff->list);
515                 dma_unmap_single(dev, error_buff->dma, error_buff->len,
516                                  DMA_FROM_DEVICE);
517                 kfree(error_buff->buff);
518                 kfree(error_buff);
519         }
520         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
521 }
522
523 static int ibmvnic_login(struct net_device *netdev)
524 {
525         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
526         unsigned long timeout = msecs_to_jiffies(30000);
527         struct device *dev = &adapter->vdev->dev;
528
529         do {
530                 if (adapter->renegotiate) {
531                         adapter->renegotiate = false;
532                         release_sub_crqs(adapter);
533
534                         reinit_completion(&adapter->init_done);
535                         send_cap_queries(adapter);
536                         if (!wait_for_completion_timeout(&adapter->init_done,
537                                                          timeout)) {
538                                 dev_err(dev, "Capabilities query timeout\n");
539                                 return -1;
540                         }
541                 }
542
543                 reinit_completion(&adapter->init_done);
544                 send_login(adapter);
545                 if (!wait_for_completion_timeout(&adapter->init_done,
546                                                  timeout)) {
547                         dev_err(dev, "Login timeout\n");
548                         return -1;
549                 }
550         } while (adapter->renegotiate);
551
552         return 0;
553 }
554
555 static void release_resources(struct ibmvnic_adapter *adapter)
556 {
557         release_tx_pools(adapter);
558         release_rx_pools(adapter);
559
560         release_stats_token(adapter);
561         release_error_buffers(adapter);
562 }
563
564 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
565 {
566         struct net_device *netdev = adapter->netdev;
567         unsigned long timeout = msecs_to_jiffies(30000);
568         union ibmvnic_crq crq;
569         bool resend;
570         int rc;
571
572         if (adapter->logical_link_state == link_state) {
573                 netdev_dbg(netdev, "Link state already %d\n", link_state);
574                 return 0;
575         }
576
577         netdev_err(netdev, "setting link state %d\n", link_state);
578         memset(&crq, 0, sizeof(crq));
579         crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
580         crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
581         crq.logical_link_state.link_state = link_state;
582
583         do {
584                 resend = false;
585
586                 reinit_completion(&adapter->init_done);
587                 rc = ibmvnic_send_crq(adapter, &crq);
588                 if (rc) {
589                         netdev_err(netdev, "Failed to set link state\n");
590                         return rc;
591                 }
592
593                 if (!wait_for_completion_timeout(&adapter->init_done,
594                                                  timeout)) {
595                         netdev_err(netdev, "timeout setting link state\n");
596                         return -1;
597                 }
598
599                 if (adapter->init_done_rc == 1) {
600                         /* Partuial success, delay and re-send */
601                         mdelay(1000);
602                         resend = true;
603                 }
604         } while (resend);
605
606         return 0;
607 }
608
609 static int set_real_num_queues(struct net_device *netdev)
610 {
611         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
612         int rc;
613
614         rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
615         if (rc) {
616                 netdev_err(netdev, "failed to set the number of tx queues\n");
617                 return rc;
618         }
619
620         rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
621         if (rc)
622                 netdev_err(netdev, "failed to set the number of rx queues\n");
623
624         return rc;
625 }
626
627 static int init_resources(struct ibmvnic_adapter *adapter)
628 {
629         struct net_device *netdev = adapter->netdev;
630         int i, rc;
631
632         rc = set_real_num_queues(netdev);
633         if (rc)
634                 return rc;
635
636         rc = init_sub_crq_irqs(adapter);
637         if (rc) {
638                 netdev_err(netdev, "failed to initialize sub crq irqs\n");
639                 return -1;
640         }
641
642         rc = init_stats_token(adapter);
643         if (rc)
644                 return rc;
645
646         adapter->map_id = 1;
647         adapter->napi = kcalloc(adapter->req_rx_queues,
648                                 sizeof(struct napi_struct), GFP_KERNEL);
649         if (!adapter->napi)
650                 return -ENOMEM;
651
652         for (i = 0; i < adapter->req_rx_queues; i++) {
653                 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
654                                NAPI_POLL_WEIGHT);
655         }
656
657         send_map_query(adapter);
658
659         rc = init_rx_pools(netdev);
660         if (rc)
661                 return rc;
662
663         rc = init_tx_pools(netdev);
664         return rc;
665 }
666
667 static int ibmvnic_open(struct net_device *netdev)
668 {
669         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
670         int i, rc;
671
672         if (adapter->is_closed) {
673                 rc = ibmvnic_init(adapter);
674                 if (rc)
675                         return rc;
676         }
677
678         rc = ibmvnic_login(netdev);
679         if (rc)
680                 return rc;
681
682         rc = init_resources(adapter);
683         if (rc)
684                 return rc;
685
686         replenish_pools(adapter);
687
688         for (i = 0; i < adapter->req_rx_queues; i++)
689                 napi_enable(&adapter->napi[i]);
690
691         /* We're ready to receive frames, enable the sub-crq interrupts and
692          * set the logical link state to up
693          */
694         for (i = 0; i < adapter->req_rx_queues; i++)
695                 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
696
697         for (i = 0; i < adapter->req_tx_queues; i++)
698                 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
699
700         rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
701         if (rc) {
702                 for (i = 0; i < adapter->req_rx_queues; i++)
703                         napi_disable(&adapter->napi[i]);
704                 release_resources(adapter);
705         } else {
706                 netif_tx_start_all_queues(netdev);
707                 adapter->is_closed = false;
708         }
709
710         return rc;
711 }
712
713 static void disable_sub_crqs(struct ibmvnic_adapter *adapter)
714 {
715         int i;
716
717         if (adapter->tx_scrq) {
718                 for (i = 0; i < adapter->req_tx_queues; i++)
719                         if (adapter->tx_scrq[i])
720                                 disable_irq(adapter->tx_scrq[i]->irq);
721         }
722
723         if (adapter->rx_scrq) {
724                 for (i = 0; i < adapter->req_rx_queues; i++)
725                         if (adapter->rx_scrq[i])
726                                 disable_irq(adapter->rx_scrq[i]->irq);
727         }
728 }
729
730 static int ibmvnic_close(struct net_device *netdev)
731 {
732         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
733         int rc = 0;
734         int i;
735
736         adapter->closing = true;
737         disable_sub_crqs(adapter);
738
739         if (adapter->napi) {
740                 for (i = 0; i < adapter->req_rx_queues; i++)
741                         napi_disable(&adapter->napi[i]);
742         }
743
744         if (!adapter->failover)
745                 netif_tx_stop_all_queues(netdev);
746
747         rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
748
749         release_resources(adapter);
750
751         adapter->is_closed = true;
752         adapter->closing = false;
753         return rc;
754 }
755
756 /**
757  * build_hdr_data - creates L2/L3/L4 header data buffer
758  * @hdr_field - bitfield determining needed headers
759  * @skb - socket buffer
760  * @hdr_len - array of header lengths
761  * @tot_len - total length of data
762  *
763  * Reads hdr_field to determine which headers are needed by firmware.
764  * Builds a buffer containing these headers.  Saves individual header
765  * lengths and total buffer length to be used to build descriptors.
766  */
767 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
768                           int *hdr_len, u8 *hdr_data)
769 {
770         int len = 0;
771         u8 *hdr;
772
773         hdr_len[0] = sizeof(struct ethhdr);
774
775         if (skb->protocol == htons(ETH_P_IP)) {
776                 hdr_len[1] = ip_hdr(skb)->ihl * 4;
777                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
778                         hdr_len[2] = tcp_hdrlen(skb);
779                 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
780                         hdr_len[2] = sizeof(struct udphdr);
781         } else if (skb->protocol == htons(ETH_P_IPV6)) {
782                 hdr_len[1] = sizeof(struct ipv6hdr);
783                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
784                         hdr_len[2] = tcp_hdrlen(skb);
785                 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
786                         hdr_len[2] = sizeof(struct udphdr);
787         }
788
789         memset(hdr_data, 0, 120);
790         if ((hdr_field >> 6) & 1) {
791                 hdr = skb_mac_header(skb);
792                 memcpy(hdr_data, hdr, hdr_len[0]);
793                 len += hdr_len[0];
794         }
795
796         if ((hdr_field >> 5) & 1) {
797                 hdr = skb_network_header(skb);
798                 memcpy(hdr_data + len, hdr, hdr_len[1]);
799                 len += hdr_len[1];
800         }
801
802         if ((hdr_field >> 4) & 1) {
803                 hdr = skb_transport_header(skb);
804                 memcpy(hdr_data + len, hdr, hdr_len[2]);
805                 len += hdr_len[2];
806         }
807         return len;
808 }
809
810 /**
811  * create_hdr_descs - create header and header extension descriptors
812  * @hdr_field - bitfield determining needed headers
813  * @data - buffer containing header data
814  * @len - length of data buffer
815  * @hdr_len - array of individual header lengths
816  * @scrq_arr - descriptor array
817  *
818  * Creates header and, if needed, header extension descriptors and
819  * places them in a descriptor array, scrq_arr
820  */
821
822 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
823                              union sub_crq *scrq_arr)
824 {
825         union sub_crq hdr_desc;
826         int tmp_len = len;
827         u8 *data, *cur;
828         int tmp;
829
830         while (tmp_len > 0) {
831                 cur = hdr_data + len - tmp_len;
832
833                 memset(&hdr_desc, 0, sizeof(hdr_desc));
834                 if (cur != hdr_data) {
835                         data = hdr_desc.hdr_ext.data;
836                         tmp = tmp_len > 29 ? 29 : tmp_len;
837                         hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
838                         hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
839                         hdr_desc.hdr_ext.len = tmp;
840                 } else {
841                         data = hdr_desc.hdr.data;
842                         tmp = tmp_len > 24 ? 24 : tmp_len;
843                         hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
844                         hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
845                         hdr_desc.hdr.len = tmp;
846                         hdr_desc.hdr.l2_len = (u8)hdr_len[0];
847                         hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
848                         hdr_desc.hdr.l4_len = (u8)hdr_len[2];
849                         hdr_desc.hdr.flag = hdr_field << 1;
850                 }
851                 memcpy(data, cur, tmp);
852                 tmp_len -= tmp;
853                 *scrq_arr = hdr_desc;
854                 scrq_arr++;
855         }
856 }
857
858 /**
859  * build_hdr_descs_arr - build a header descriptor array
860  * @skb - socket buffer
861  * @num_entries - number of descriptors to be sent
862  * @subcrq - first TX descriptor
863  * @hdr_field - bit field determining which headers will be sent
864  *
865  * This function will build a TX descriptor array with applicable
866  * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
867  */
868
869 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
870                                 int *num_entries, u8 hdr_field)
871 {
872         int hdr_len[3] = {0, 0, 0};
873         int tot_len, len;
874         u8 *hdr_data = txbuff->hdr_data;
875
876         tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
877                                  txbuff->hdr_data);
878         len = tot_len;
879         len -= 24;
880         if (len > 0)
881                 num_entries += len % 29 ? len / 29 + 1 : len / 29;
882         create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
883                          txbuff->indir_arr + 1);
884 }
885
886 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
887 {
888         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
889         int queue_num = skb_get_queue_mapping(skb);
890         u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
891         struct device *dev = &adapter->vdev->dev;
892         struct ibmvnic_tx_buff *tx_buff = NULL;
893         struct ibmvnic_sub_crq_queue *tx_scrq;
894         struct ibmvnic_tx_pool *tx_pool;
895         unsigned int tx_send_failed = 0;
896         unsigned int tx_map_failed = 0;
897         unsigned int tx_dropped = 0;
898         unsigned int tx_packets = 0;
899         unsigned int tx_bytes = 0;
900         dma_addr_t data_dma_addr;
901         struct netdev_queue *txq;
902         unsigned long lpar_rc;
903         union sub_crq tx_crq;
904         unsigned int offset;
905         int num_entries = 1;
906         unsigned char *dst;
907         u64 *handle_array;
908         int index = 0;
909         int ret = 0;
910
911         tx_pool = &adapter->tx_pool[queue_num];
912         tx_scrq = adapter->tx_scrq[queue_num];
913         txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
914         handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
915                                    be32_to_cpu(adapter->login_rsp_buf->
916                                                off_txsubm_subcrqs));
917         if (adapter->migrated) {
918                 if (!netif_subqueue_stopped(netdev, skb))
919                         netif_stop_subqueue(netdev, queue_num);
920                 dev_kfree_skb_any(skb);
921
922                 tx_send_failed++;
923                 tx_dropped++;
924                 ret = NETDEV_TX_OK;
925                 goto out;
926         }
927
928         index = tx_pool->free_map[tx_pool->consumer_index];
929         offset = index * adapter->req_mtu;
930         dst = tx_pool->long_term_buff.buff + offset;
931         memset(dst, 0, adapter->req_mtu);
932         skb_copy_from_linear_data(skb, dst, skb->len);
933         data_dma_addr = tx_pool->long_term_buff.addr + offset;
934
935         tx_pool->consumer_index =
936             (tx_pool->consumer_index + 1) %
937                 adapter->req_tx_entries_per_subcrq;
938
939         tx_buff = &tx_pool->tx_buff[index];
940         tx_buff->skb = skb;
941         tx_buff->data_dma[0] = data_dma_addr;
942         tx_buff->data_len[0] = skb->len;
943         tx_buff->index = index;
944         tx_buff->pool_index = queue_num;
945         tx_buff->last_frag = true;
946
947         memset(&tx_crq, 0, sizeof(tx_crq));
948         tx_crq.v1.first = IBMVNIC_CRQ_CMD;
949         tx_crq.v1.type = IBMVNIC_TX_DESC;
950         tx_crq.v1.n_crq_elem = 1;
951         tx_crq.v1.n_sge = 1;
952         tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
953         tx_crq.v1.correlator = cpu_to_be32(index);
954         tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
955         tx_crq.v1.sge_len = cpu_to_be32(skb->len);
956         tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
957
958         if (adapter->vlan_header_insertion) {
959                 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
960                 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
961         }
962
963         if (skb->protocol == htons(ETH_P_IP)) {
964                 if (ip_hdr(skb)->version == 4)
965                         tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
966                 else if (ip_hdr(skb)->version == 6)
967                         tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
968
969                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
970                         tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
971                 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
972                         tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
973         }
974
975         if (skb->ip_summed == CHECKSUM_PARTIAL) {
976                 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
977                 hdrs += 2;
978         }
979         /* determine if l2/3/4 headers are sent to firmware */
980         if ((*hdrs >> 7) & 1 &&
981             (skb->protocol == htons(ETH_P_IP) ||
982              skb->protocol == htons(ETH_P_IPV6))) {
983                 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
984                 tx_crq.v1.n_crq_elem = num_entries;
985                 tx_buff->indir_arr[0] = tx_crq;
986                 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
987                                                     sizeof(tx_buff->indir_arr),
988                                                     DMA_TO_DEVICE);
989                 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
990                         dev_kfree_skb_any(skb);
991                         tx_buff->skb = NULL;
992                         if (!firmware_has_feature(FW_FEATURE_CMO))
993                                 dev_err(dev, "tx: unable to map descriptor array\n");
994                         tx_map_failed++;
995                         tx_dropped++;
996                         ret = NETDEV_TX_OK;
997                         goto out;
998                 }
999                 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1000                                                (u64)tx_buff->indir_dma,
1001                                                (u64)num_entries);
1002         } else {
1003                 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1004                                       &tx_crq);
1005         }
1006         if (lpar_rc != H_SUCCESS) {
1007                 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1008
1009                 if (tx_pool->consumer_index == 0)
1010                         tx_pool->consumer_index =
1011                                 adapter->req_tx_entries_per_subcrq - 1;
1012                 else
1013                         tx_pool->consumer_index--;
1014
1015                 dev_kfree_skb_any(skb);
1016                 tx_buff->skb = NULL;
1017
1018                 if (lpar_rc == H_CLOSED)
1019                         netif_stop_subqueue(netdev, queue_num);
1020
1021                 tx_send_failed++;
1022                 tx_dropped++;
1023                 ret = NETDEV_TX_OK;
1024                 goto out;
1025         }
1026
1027         if (atomic_inc_return(&tx_scrq->used)
1028                                         >= adapter->req_tx_entries_per_subcrq) {
1029                 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1030                 netif_stop_subqueue(netdev, queue_num);
1031         }
1032
1033         tx_packets++;
1034         tx_bytes += skb->len;
1035         txq->trans_start = jiffies;
1036         ret = NETDEV_TX_OK;
1037
1038 out:
1039         netdev->stats.tx_dropped += tx_dropped;
1040         netdev->stats.tx_bytes += tx_bytes;
1041         netdev->stats.tx_packets += tx_packets;
1042         adapter->tx_send_failed += tx_send_failed;
1043         adapter->tx_map_failed += tx_map_failed;
1044
1045         return ret;
1046 }
1047
1048 static void ibmvnic_set_multi(struct net_device *netdev)
1049 {
1050         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1051         struct netdev_hw_addr *ha;
1052         union ibmvnic_crq crq;
1053
1054         memset(&crq, 0, sizeof(crq));
1055         crq.request_capability.first = IBMVNIC_CRQ_CMD;
1056         crq.request_capability.cmd = REQUEST_CAPABILITY;
1057
1058         if (netdev->flags & IFF_PROMISC) {
1059                 if (!adapter->promisc_supported)
1060                         return;
1061         } else {
1062                 if (netdev->flags & IFF_ALLMULTI) {
1063                         /* Accept all multicast */
1064                         memset(&crq, 0, sizeof(crq));
1065                         crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1066                         crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1067                         crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1068                         ibmvnic_send_crq(adapter, &crq);
1069                 } else if (netdev_mc_empty(netdev)) {
1070                         /* Reject all multicast */
1071                         memset(&crq, 0, sizeof(crq));
1072                         crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1073                         crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1074                         crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1075                         ibmvnic_send_crq(adapter, &crq);
1076                 } else {
1077                         /* Accept one or more multicast(s) */
1078                         netdev_for_each_mc_addr(ha, netdev) {
1079                                 memset(&crq, 0, sizeof(crq));
1080                                 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1081                                 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1082                                 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1083                                 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1084                                                 ha->addr);
1085                                 ibmvnic_send_crq(adapter, &crq);
1086                         }
1087                 }
1088         }
1089 }
1090
1091 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1092 {
1093         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1094         struct sockaddr *addr = p;
1095         union ibmvnic_crq crq;
1096
1097         if (!is_valid_ether_addr(addr->sa_data))
1098                 return -EADDRNOTAVAIL;
1099
1100         memset(&crq, 0, sizeof(crq));
1101         crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1102         crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1103         ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1104         ibmvnic_send_crq(adapter, &crq);
1105         /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1106         return 0;
1107 }
1108
1109 static void ibmvnic_tx_timeout(struct net_device *dev)
1110 {
1111         struct ibmvnic_adapter *adapter = netdev_priv(dev);
1112         int rc;
1113
1114         /* Adapter timed out, resetting it */
1115         release_sub_crqs(adapter);
1116         rc = ibmvnic_reset_crq(adapter);
1117         if (rc)
1118                 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
1119         else
1120                 ibmvnic_send_crq_init(adapter);
1121 }
1122
1123 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1124                                   struct ibmvnic_rx_buff *rx_buff)
1125 {
1126         struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1127
1128         rx_buff->skb = NULL;
1129
1130         pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1131         pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1132
1133         atomic_dec(&pool->available);
1134 }
1135
1136 static int ibmvnic_poll(struct napi_struct *napi, int budget)
1137 {
1138         struct net_device *netdev = napi->dev;
1139         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1140         int scrq_num = (int)(napi - adapter->napi);
1141         int frames_processed = 0;
1142 restart_poll:
1143         while (frames_processed < budget) {
1144                 struct sk_buff *skb;
1145                 struct ibmvnic_rx_buff *rx_buff;
1146                 union sub_crq *next;
1147                 u32 length;
1148                 u16 offset;
1149                 u8 flags = 0;
1150
1151                 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1152                         break;
1153                 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1154                 rx_buff =
1155                     (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1156                                                           rx_comp.correlator);
1157                 /* do error checking */
1158                 if (next->rx_comp.rc) {
1159                         netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1160                         /* free the entry */
1161                         next->rx_comp.first = 0;
1162                         remove_buff_from_pool(adapter, rx_buff);
1163                         break;
1164                 }
1165
1166                 length = be32_to_cpu(next->rx_comp.len);
1167                 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1168                 flags = next->rx_comp.flags;
1169                 skb = rx_buff->skb;
1170                 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1171                                         length);
1172
1173                 /* VLAN Header has been stripped by the system firmware and
1174                  * needs to be inserted by the driver
1175                  */
1176                 if (adapter->rx_vlan_header_insertion &&
1177                     (flags & IBMVNIC_VLAN_STRIPPED))
1178                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1179                                                ntohs(next->rx_comp.vlan_tci));
1180
1181                 /* free the entry */
1182                 next->rx_comp.first = 0;
1183                 remove_buff_from_pool(adapter, rx_buff);
1184
1185                 skb_put(skb, length);
1186                 skb->protocol = eth_type_trans(skb, netdev);
1187
1188                 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1189                     flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1190                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1191                 }
1192
1193                 length = skb->len;
1194                 napi_gro_receive(napi, skb); /* send it up */
1195                 netdev->stats.rx_packets++;
1196                 netdev->stats.rx_bytes += length;
1197                 frames_processed++;
1198         }
1199         replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
1200
1201         if (frames_processed < budget) {
1202                 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1203                 napi_complete_done(napi, frames_processed);
1204                 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1205                     napi_reschedule(napi)) {
1206                         disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1207                         goto restart_poll;
1208                 }
1209         }
1210         return frames_processed;
1211 }
1212
1213 #ifdef CONFIG_NET_POLL_CONTROLLER
1214 static void ibmvnic_netpoll_controller(struct net_device *dev)
1215 {
1216         struct ibmvnic_adapter *adapter = netdev_priv(dev);
1217         int i;
1218
1219         replenish_pools(netdev_priv(dev));
1220         for (i = 0; i < adapter->req_rx_queues; i++)
1221                 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1222                                      adapter->rx_scrq[i]);
1223 }
1224 #endif
1225
1226 static const struct net_device_ops ibmvnic_netdev_ops = {
1227         .ndo_open               = ibmvnic_open,
1228         .ndo_stop               = ibmvnic_close,
1229         .ndo_start_xmit         = ibmvnic_xmit,
1230         .ndo_set_rx_mode        = ibmvnic_set_multi,
1231         .ndo_set_mac_address    = ibmvnic_set_mac,
1232         .ndo_validate_addr      = eth_validate_addr,
1233         .ndo_tx_timeout         = ibmvnic_tx_timeout,
1234 #ifdef CONFIG_NET_POLL_CONTROLLER
1235         .ndo_poll_controller    = ibmvnic_netpoll_controller,
1236 #endif
1237 };
1238
1239 /* ethtool functions */
1240
1241 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1242                                       struct ethtool_link_ksettings *cmd)
1243 {
1244         u32 supported, advertising;
1245
1246         supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1247                           SUPPORTED_FIBRE);
1248         advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1249                             ADVERTISED_FIBRE);
1250         cmd->base.speed = SPEED_1000;
1251         cmd->base.duplex = DUPLEX_FULL;
1252         cmd->base.port = PORT_FIBRE;
1253         cmd->base.phy_address = 0;
1254         cmd->base.autoneg = AUTONEG_ENABLE;
1255
1256         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1257                                                 supported);
1258         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1259                                                 advertising);
1260
1261         return 0;
1262 }
1263
1264 static void ibmvnic_get_drvinfo(struct net_device *dev,
1265                                 struct ethtool_drvinfo *info)
1266 {
1267         strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1268         strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1269 }
1270
1271 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1272 {
1273         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1274
1275         return adapter->msg_enable;
1276 }
1277
1278 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1279 {
1280         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1281
1282         adapter->msg_enable = data;
1283 }
1284
1285 static u32 ibmvnic_get_link(struct net_device *netdev)
1286 {
1287         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1288
1289         /* Don't need to send a query because we request a logical link up at
1290          * init and then we wait for link state indications
1291          */
1292         return adapter->logical_link_state;
1293 }
1294
1295 static void ibmvnic_get_ringparam(struct net_device *netdev,
1296                                   struct ethtool_ringparam *ring)
1297 {
1298         ring->rx_max_pending = 0;
1299         ring->tx_max_pending = 0;
1300         ring->rx_mini_max_pending = 0;
1301         ring->rx_jumbo_max_pending = 0;
1302         ring->rx_pending = 0;
1303         ring->tx_pending = 0;
1304         ring->rx_mini_pending = 0;
1305         ring->rx_jumbo_pending = 0;
1306 }
1307
1308 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1309 {
1310         int i;
1311
1312         if (stringset != ETH_SS_STATS)
1313                 return;
1314
1315         for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1316                 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1317 }
1318
1319 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1320 {
1321         switch (sset) {
1322         case ETH_SS_STATS:
1323                 return ARRAY_SIZE(ibmvnic_stats);
1324         default:
1325                 return -EOPNOTSUPP;
1326         }
1327 }
1328
1329 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1330                                       struct ethtool_stats *stats, u64 *data)
1331 {
1332         struct ibmvnic_adapter *adapter = netdev_priv(dev);
1333         union ibmvnic_crq crq;
1334         int i;
1335
1336         memset(&crq, 0, sizeof(crq));
1337         crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1338         crq.request_statistics.cmd = REQUEST_STATISTICS;
1339         crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1340         crq.request_statistics.len =
1341             cpu_to_be32(sizeof(struct ibmvnic_statistics));
1342
1343         /* Wait for data to be written */
1344         init_completion(&adapter->stats_done);
1345         ibmvnic_send_crq(adapter, &crq);
1346         wait_for_completion(&adapter->stats_done);
1347
1348         for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1349                 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1350 }
1351
1352 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1353         .get_drvinfo            = ibmvnic_get_drvinfo,
1354         .get_msglevel           = ibmvnic_get_msglevel,
1355         .set_msglevel           = ibmvnic_set_msglevel,
1356         .get_link               = ibmvnic_get_link,
1357         .get_ringparam          = ibmvnic_get_ringparam,
1358         .get_strings            = ibmvnic_get_strings,
1359         .get_sset_count         = ibmvnic_get_sset_count,
1360         .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
1361         .get_link_ksettings     = ibmvnic_get_link_ksettings,
1362 };
1363
1364 /* Routines for managing CRQs/sCRQs  */
1365
1366 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1367                                   struct ibmvnic_sub_crq_queue *scrq)
1368 {
1369         struct device *dev = &adapter->vdev->dev;
1370         long rc;
1371
1372         netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1373
1374         /* Close the sub-crqs */
1375         do {
1376                 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1377                                         adapter->vdev->unit_address,
1378                                         scrq->crq_num);
1379         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1380
1381         if (rc) {
1382                 netdev_err(adapter->netdev,
1383                            "Failed to release sub-CRQ %16lx, rc = %ld\n",
1384                            scrq->crq_num, rc);
1385         }
1386
1387         dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1388                          DMA_BIDIRECTIONAL);
1389         free_pages((unsigned long)scrq->msgs, 2);
1390         kfree(scrq);
1391 }
1392
1393 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1394                                                         *adapter)
1395 {
1396         struct device *dev = &adapter->vdev->dev;
1397         struct ibmvnic_sub_crq_queue *scrq;
1398         int rc;
1399
1400         scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
1401         if (!scrq)
1402                 return NULL;
1403
1404         scrq->msgs =
1405                 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
1406         if (!scrq->msgs) {
1407                 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1408                 goto zero_page_failed;
1409         }
1410
1411         scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1412                                          DMA_BIDIRECTIONAL);
1413         if (dma_mapping_error(dev, scrq->msg_token)) {
1414                 dev_warn(dev, "Couldn't map crq queue messages page\n");
1415                 goto map_failed;
1416         }
1417
1418         rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1419                            4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1420
1421         if (rc == H_RESOURCE)
1422                 rc = ibmvnic_reset_crq(adapter);
1423
1424         if (rc == H_CLOSED) {
1425                 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1426         } else if (rc) {
1427                 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1428                 goto reg_failed;
1429         }
1430
1431         scrq->adapter = adapter;
1432         scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1433         spin_lock_init(&scrq->lock);
1434
1435         netdev_dbg(adapter->netdev,
1436                    "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1437                    scrq->crq_num, scrq->hw_irq, scrq->irq);
1438
1439         return scrq;
1440
1441 reg_failed:
1442         dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1443                          DMA_BIDIRECTIONAL);
1444 map_failed:
1445         free_pages((unsigned long)scrq->msgs, 2);
1446 zero_page_failed:
1447         kfree(scrq);
1448
1449         return NULL;
1450 }
1451
1452 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1453 {
1454         int i;
1455
1456         if (adapter->tx_scrq) {
1457                 for (i = 0; i < adapter->req_tx_queues; i++) {
1458                         if (!adapter->tx_scrq[i])
1459                                 continue;
1460
1461                         if (adapter->tx_scrq[i]->irq) {
1462                                 free_irq(adapter->tx_scrq[i]->irq,
1463                                          adapter->tx_scrq[i]);
1464                                 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1465                                 adapter->tx_scrq[i]->irq = 0;
1466                         }
1467
1468                         release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1469                 }
1470
1471                 kfree(adapter->tx_scrq);
1472                 adapter->tx_scrq = NULL;
1473         }
1474
1475         if (adapter->rx_scrq) {
1476                 for (i = 0; i < adapter->req_rx_queues; i++) {
1477                         if (!adapter->rx_scrq[i])
1478                                 continue;
1479
1480                         if (adapter->rx_scrq[i]->irq) {
1481                                 free_irq(adapter->rx_scrq[i]->irq,
1482                                          adapter->rx_scrq[i]);
1483                                 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1484                                 adapter->rx_scrq[i]->irq = 0;
1485                         }
1486
1487                         release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1488                 }
1489
1490                 kfree(adapter->rx_scrq);
1491                 adapter->rx_scrq = NULL;
1492         }
1493 }
1494
1495 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1496                             struct ibmvnic_sub_crq_queue *scrq)
1497 {
1498         struct device *dev = &adapter->vdev->dev;
1499         unsigned long rc;
1500
1501         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1502                                 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1503         if (rc)
1504                 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1505                         scrq->hw_irq, rc);
1506         return rc;
1507 }
1508
1509 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1510                            struct ibmvnic_sub_crq_queue *scrq)
1511 {
1512         struct device *dev = &adapter->vdev->dev;
1513         unsigned long rc;
1514
1515         if (scrq->hw_irq > 0x100000000ULL) {
1516                 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1517                 return 1;
1518         }
1519
1520         rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1521                                 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1522         if (rc)
1523                 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1524                         scrq->hw_irq, rc);
1525         return rc;
1526 }
1527
1528 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1529                                struct ibmvnic_sub_crq_queue *scrq)
1530 {
1531         struct device *dev = &adapter->vdev->dev;
1532         struct ibmvnic_tx_buff *txbuff;
1533         union sub_crq *next;
1534         int index;
1535         int i, j;
1536         u8 first;
1537
1538 restart_loop:
1539         while (pending_scrq(adapter, scrq)) {
1540                 unsigned int pool = scrq->pool_index;
1541
1542                 next = ibmvnic_next_scrq(adapter, scrq);
1543                 for (i = 0; i < next->tx_comp.num_comps; i++) {
1544                         if (next->tx_comp.rcs[i]) {
1545                                 dev_err(dev, "tx error %x\n",
1546                                         next->tx_comp.rcs[i]);
1547                                 continue;
1548                         }
1549                         index = be32_to_cpu(next->tx_comp.correlators[i]);
1550                         txbuff = &adapter->tx_pool[pool].tx_buff[index];
1551
1552                         for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1553                                 if (!txbuff->data_dma[j])
1554                                         continue;
1555
1556                                 txbuff->data_dma[j] = 0;
1557                         }
1558                         /* if sub_crq was sent indirectly */
1559                         first = txbuff->indir_arr[0].generic.first;
1560                         if (first == IBMVNIC_CRQ_CMD) {
1561                                 dma_unmap_single(dev, txbuff->indir_dma,
1562                                                  sizeof(txbuff->indir_arr),
1563                                                  DMA_TO_DEVICE);
1564                         }
1565
1566                         if (txbuff->last_frag) {
1567                                 if (atomic_sub_return(next->tx_comp.num_comps,
1568                                                       &scrq->used) <=
1569                                     (adapter->req_tx_entries_per_subcrq / 2) &&
1570                                     netif_subqueue_stopped(adapter->netdev,
1571                                                            txbuff->skb)) {
1572                                         netif_wake_subqueue(adapter->netdev,
1573                                                             scrq->pool_index);
1574                                         netdev_dbg(adapter->netdev,
1575                                                    "Started queue %d\n",
1576                                                    scrq->pool_index);
1577                                 }
1578
1579                                 dev_kfree_skb_any(txbuff->skb);
1580                         }
1581
1582                         adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1583                                                      producer_index] = index;
1584                         adapter->tx_pool[pool].producer_index =
1585                             (adapter->tx_pool[pool].producer_index + 1) %
1586                             adapter->req_tx_entries_per_subcrq;
1587                 }
1588                 /* remove tx_comp scrq*/
1589                 next->tx_comp.first = 0;
1590         }
1591
1592         enable_scrq_irq(adapter, scrq);
1593
1594         if (pending_scrq(adapter, scrq)) {
1595                 disable_scrq_irq(adapter, scrq);
1596                 goto restart_loop;
1597         }
1598
1599         return 0;
1600 }
1601
1602 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1603 {
1604         struct ibmvnic_sub_crq_queue *scrq = instance;
1605         struct ibmvnic_adapter *adapter = scrq->adapter;
1606
1607         disable_scrq_irq(adapter, scrq);
1608         ibmvnic_complete_tx(adapter, scrq);
1609
1610         return IRQ_HANDLED;
1611 }
1612
1613 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1614 {
1615         struct ibmvnic_sub_crq_queue *scrq = instance;
1616         struct ibmvnic_adapter *adapter = scrq->adapter;
1617
1618         if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1619                 disable_scrq_irq(adapter, scrq);
1620                 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1621         }
1622
1623         return IRQ_HANDLED;
1624 }
1625
1626 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1627 {
1628         struct device *dev = &adapter->vdev->dev;
1629         struct ibmvnic_sub_crq_queue *scrq;
1630         int i = 0, j = 0;
1631         int rc = 0;
1632
1633         for (i = 0; i < adapter->req_tx_queues; i++) {
1634                 scrq = adapter->tx_scrq[i];
1635                 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1636
1637                 if (!scrq->irq) {
1638                         rc = -EINVAL;
1639                         dev_err(dev, "Error mapping irq\n");
1640                         goto req_tx_irq_failed;
1641                 }
1642
1643                 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1644                                  0, "ibmvnic_tx", scrq);
1645
1646                 if (rc) {
1647                         dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1648                                 scrq->irq, rc);
1649                         irq_dispose_mapping(scrq->irq);
1650                         goto req_rx_irq_failed;
1651                 }
1652         }
1653
1654         for (i = 0; i < adapter->req_rx_queues; i++) {
1655                 scrq = adapter->rx_scrq[i];
1656                 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1657                 if (!scrq->irq) {
1658                         rc = -EINVAL;
1659                         dev_err(dev, "Error mapping irq\n");
1660                         goto req_rx_irq_failed;
1661                 }
1662                 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1663                                  0, "ibmvnic_rx", scrq);
1664                 if (rc) {
1665                         dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1666                                 scrq->irq, rc);
1667                         irq_dispose_mapping(scrq->irq);
1668                         goto req_rx_irq_failed;
1669                 }
1670         }
1671         return rc;
1672
1673 req_rx_irq_failed:
1674         for (j = 0; j < i; j++) {
1675                 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1676                 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1677         }
1678         i = adapter->req_tx_queues;
1679 req_tx_irq_failed:
1680         for (j = 0; j < i; j++) {
1681                 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1682                 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1683         }
1684         release_sub_crqs(adapter);
1685         return rc;
1686 }
1687
1688 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
1689 {
1690         struct device *dev = &adapter->vdev->dev;
1691         struct ibmvnic_sub_crq_queue **allqueues;
1692         int registered_queues = 0;
1693         int total_queues;
1694         int more = 0;
1695         int i;
1696
1697         total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1698
1699         allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
1700         if (!allqueues)
1701                 return -1;
1702
1703         for (i = 0; i < total_queues; i++) {
1704                 allqueues[i] = init_sub_crq_queue(adapter);
1705                 if (!allqueues[i]) {
1706                         dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1707                         break;
1708                 }
1709                 registered_queues++;
1710         }
1711
1712         /* Make sure we were able to register the minimum number of queues */
1713         if (registered_queues <
1714             adapter->min_tx_queues + adapter->min_rx_queues) {
1715                 dev_err(dev, "Fatal: Couldn't init  min number of sub-crqs\n");
1716                 goto tx_failed;
1717         }
1718
1719         /* Distribute the failed allocated queues*/
1720         for (i = 0; i < total_queues - registered_queues + more ; i++) {
1721                 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1722                 switch (i % 3) {
1723                 case 0:
1724                         if (adapter->req_rx_queues > adapter->min_rx_queues)
1725                                 adapter->req_rx_queues--;
1726                         else
1727                                 more++;
1728                         break;
1729                 case 1:
1730                         if (adapter->req_tx_queues > adapter->min_tx_queues)
1731                                 adapter->req_tx_queues--;
1732                         else
1733                                 more++;
1734                         break;
1735                 }
1736         }
1737
1738         adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1739                                    sizeof(*adapter->tx_scrq), GFP_KERNEL);
1740         if (!adapter->tx_scrq)
1741                 goto tx_failed;
1742
1743         for (i = 0; i < adapter->req_tx_queues; i++) {
1744                 adapter->tx_scrq[i] = allqueues[i];
1745                 adapter->tx_scrq[i]->pool_index = i;
1746         }
1747
1748         adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1749                                    sizeof(*adapter->rx_scrq), GFP_KERNEL);
1750         if (!adapter->rx_scrq)
1751                 goto rx_failed;
1752
1753         for (i = 0; i < adapter->req_rx_queues; i++) {
1754                 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1755                 adapter->rx_scrq[i]->scrq_num = i;
1756         }
1757
1758         kfree(allqueues);
1759         return 0;
1760
1761 rx_failed:
1762         kfree(adapter->tx_scrq);
1763         adapter->tx_scrq = NULL;
1764 tx_failed:
1765         for (i = 0; i < registered_queues; i++)
1766                 release_sub_crq_queue(adapter, allqueues[i]);
1767         kfree(allqueues);
1768         return -1;
1769 }
1770
1771 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
1772 {
1773         struct device *dev = &adapter->vdev->dev;
1774         union ibmvnic_crq crq;
1775
1776         if (!retry) {
1777                 /* Sub-CRQ entries are 32 byte long */
1778                 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1779
1780                 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1781                     adapter->min_rx_add_entries_per_subcrq > entries_page) {
1782                         dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1783                         return;
1784                 }
1785
1786                 /* Get the minimum between the queried max and the entries
1787                  * that fit in our PAGE_SIZE
1788                  */
1789                 adapter->req_tx_entries_per_subcrq =
1790                     adapter->max_tx_entries_per_subcrq > entries_page ?
1791                     entries_page : adapter->max_tx_entries_per_subcrq;
1792                 adapter->req_rx_add_entries_per_subcrq =
1793                     adapter->max_rx_add_entries_per_subcrq > entries_page ?
1794                     entries_page : adapter->max_rx_add_entries_per_subcrq;
1795
1796                 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1797                 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
1798                 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1799
1800                 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
1801         }
1802
1803         memset(&crq, 0, sizeof(crq));
1804         crq.request_capability.first = IBMVNIC_CRQ_CMD;
1805         crq.request_capability.cmd = REQUEST_CAPABILITY;
1806
1807         crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1808         crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1809         atomic_inc(&adapter->running_cap_crqs);
1810         ibmvnic_send_crq(adapter, &crq);
1811
1812         crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1813         crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1814         atomic_inc(&adapter->running_cap_crqs);
1815         ibmvnic_send_crq(adapter, &crq);
1816
1817         crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1818         crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1819         atomic_inc(&adapter->running_cap_crqs);
1820         ibmvnic_send_crq(adapter, &crq);
1821
1822         crq.request_capability.capability =
1823             cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1824         crq.request_capability.number =
1825             cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1826         atomic_inc(&adapter->running_cap_crqs);
1827         ibmvnic_send_crq(adapter, &crq);
1828
1829         crq.request_capability.capability =
1830             cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1831         crq.request_capability.number =
1832             cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1833         atomic_inc(&adapter->running_cap_crqs);
1834         ibmvnic_send_crq(adapter, &crq);
1835
1836         crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1837         crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1838         atomic_inc(&adapter->running_cap_crqs);
1839         ibmvnic_send_crq(adapter, &crq);
1840
1841         if (adapter->netdev->flags & IFF_PROMISC) {
1842                 if (adapter->promisc_supported) {
1843                         crq.request_capability.capability =
1844                             cpu_to_be16(PROMISC_REQUESTED);
1845                         crq.request_capability.number = cpu_to_be64(1);
1846                         atomic_inc(&adapter->running_cap_crqs);
1847                         ibmvnic_send_crq(adapter, &crq);
1848                 }
1849         } else {
1850                 crq.request_capability.capability =
1851                     cpu_to_be16(PROMISC_REQUESTED);
1852                 crq.request_capability.number = cpu_to_be64(0);
1853                 atomic_inc(&adapter->running_cap_crqs);
1854                 ibmvnic_send_crq(adapter, &crq);
1855         }
1856 }
1857
1858 static int pending_scrq(struct ibmvnic_adapter *adapter,
1859                         struct ibmvnic_sub_crq_queue *scrq)
1860 {
1861         union sub_crq *entry = &scrq->msgs[scrq->cur];
1862
1863         if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1864                 return 1;
1865         else
1866                 return 0;
1867 }
1868
1869 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1870                                         struct ibmvnic_sub_crq_queue *scrq)
1871 {
1872         union sub_crq *entry;
1873         unsigned long flags;
1874
1875         spin_lock_irqsave(&scrq->lock, flags);
1876         entry = &scrq->msgs[scrq->cur];
1877         if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1878                 if (++scrq->cur == scrq->size)
1879                         scrq->cur = 0;
1880         } else {
1881                 entry = NULL;
1882         }
1883         spin_unlock_irqrestore(&scrq->lock, flags);
1884
1885         return entry;
1886 }
1887
1888 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1889 {
1890         struct ibmvnic_crq_queue *queue = &adapter->crq;
1891         union ibmvnic_crq *crq;
1892
1893         crq = &queue->msgs[queue->cur];
1894         if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1895                 if (++queue->cur == queue->size)
1896                         queue->cur = 0;
1897         } else {
1898                 crq = NULL;
1899         }
1900
1901         return crq;
1902 }
1903
1904 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1905                        union sub_crq *sub_crq)
1906 {
1907         unsigned int ua = adapter->vdev->unit_address;
1908         struct device *dev = &adapter->vdev->dev;
1909         u64 *u64_crq = (u64 *)sub_crq;
1910         int rc;
1911
1912         netdev_dbg(adapter->netdev,
1913                    "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1914                    (unsigned long int)cpu_to_be64(remote_handle),
1915                    (unsigned long int)cpu_to_be64(u64_crq[0]),
1916                    (unsigned long int)cpu_to_be64(u64_crq[1]),
1917                    (unsigned long int)cpu_to_be64(u64_crq[2]),
1918                    (unsigned long int)cpu_to_be64(u64_crq[3]));
1919
1920         /* Make sure the hypervisor sees the complete request */
1921         mb();
1922
1923         rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1924                                 cpu_to_be64(remote_handle),
1925                                 cpu_to_be64(u64_crq[0]),
1926                                 cpu_to_be64(u64_crq[1]),
1927                                 cpu_to_be64(u64_crq[2]),
1928                                 cpu_to_be64(u64_crq[3]));
1929
1930         if (rc) {
1931                 if (rc == H_CLOSED)
1932                         dev_warn(dev, "CRQ Queue closed\n");
1933                 dev_err(dev, "Send error (rc=%d)\n", rc);
1934         }
1935
1936         return rc;
1937 }
1938
1939 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1940                                 u64 remote_handle, u64 ioba, u64 num_entries)
1941 {
1942         unsigned int ua = adapter->vdev->unit_address;
1943         struct device *dev = &adapter->vdev->dev;
1944         int rc;
1945
1946         /* Make sure the hypervisor sees the complete request */
1947         mb();
1948         rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1949                                 cpu_to_be64(remote_handle),
1950                                 ioba, num_entries);
1951
1952         if (rc) {
1953                 if (rc == H_CLOSED)
1954                         dev_warn(dev, "CRQ Queue closed\n");
1955                 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1956         }
1957
1958         return rc;
1959 }
1960
1961 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1962                             union ibmvnic_crq *crq)
1963 {
1964         unsigned int ua = adapter->vdev->unit_address;
1965         struct device *dev = &adapter->vdev->dev;
1966         u64 *u64_crq = (u64 *)crq;
1967         int rc;
1968
1969         netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1970                    (unsigned long int)cpu_to_be64(u64_crq[0]),
1971                    (unsigned long int)cpu_to_be64(u64_crq[1]));
1972
1973         /* Make sure the hypervisor sees the complete request */
1974         mb();
1975
1976         rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1977                                 cpu_to_be64(u64_crq[0]),
1978                                 cpu_to_be64(u64_crq[1]));
1979
1980         if (rc) {
1981                 if (rc == H_CLOSED)
1982                         dev_warn(dev, "CRQ Queue closed\n");
1983                 dev_warn(dev, "Send error (rc=%d)\n", rc);
1984         }
1985
1986         return rc;
1987 }
1988
1989 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1990 {
1991         union ibmvnic_crq crq;
1992
1993         memset(&crq, 0, sizeof(crq));
1994         crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1995         crq.generic.cmd = IBMVNIC_CRQ_INIT;
1996         netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1997
1998         return ibmvnic_send_crq(adapter, &crq);
1999 }
2000
2001 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
2002 {
2003         union ibmvnic_crq crq;
2004
2005         memset(&crq, 0, sizeof(crq));
2006         crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
2007         crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
2008         netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
2009
2010         return ibmvnic_send_crq(adapter, &crq);
2011 }
2012
2013 static int send_version_xchg(struct ibmvnic_adapter *adapter)
2014 {
2015         union ibmvnic_crq crq;
2016
2017         memset(&crq, 0, sizeof(crq));
2018         crq.version_exchange.first = IBMVNIC_CRQ_CMD;
2019         crq.version_exchange.cmd = VERSION_EXCHANGE;
2020         crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
2021
2022         return ibmvnic_send_crq(adapter, &crq);
2023 }
2024
2025 static void send_login(struct ibmvnic_adapter *adapter)
2026 {
2027         struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
2028         struct ibmvnic_login_buffer *login_buffer;
2029         struct device *dev = &adapter->vdev->dev;
2030         dma_addr_t rsp_buffer_token;
2031         dma_addr_t buffer_token;
2032         size_t rsp_buffer_size;
2033         union ibmvnic_crq crq;
2034         size_t buffer_size;
2035         __be64 *tx_list_p;
2036         __be64 *rx_list_p;
2037         int i;
2038
2039         buffer_size =
2040             sizeof(struct ibmvnic_login_buffer) +
2041             sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
2042
2043         login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
2044         if (!login_buffer)
2045                 goto buf_alloc_failed;
2046
2047         buffer_token = dma_map_single(dev, login_buffer, buffer_size,
2048                                       DMA_TO_DEVICE);
2049         if (dma_mapping_error(dev, buffer_token)) {
2050                 dev_err(dev, "Couldn't map login buffer\n");
2051                 goto buf_map_failed;
2052         }
2053
2054         rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
2055                           sizeof(u64) * adapter->req_tx_queues +
2056                           sizeof(u64) * adapter->req_rx_queues +
2057                           sizeof(u64) * adapter->req_rx_queues +
2058                           sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
2059
2060         login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2061         if (!login_rsp_buffer)
2062                 goto buf_rsp_alloc_failed;
2063
2064         rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2065                                           rsp_buffer_size, DMA_FROM_DEVICE);
2066         if (dma_mapping_error(dev, rsp_buffer_token)) {
2067                 dev_err(dev, "Couldn't map login rsp buffer\n");
2068                 goto buf_rsp_map_failed;
2069         }
2070
2071         adapter->login_buf = login_buffer;
2072         adapter->login_buf_token = buffer_token;
2073         adapter->login_buf_sz = buffer_size;
2074         adapter->login_rsp_buf = login_rsp_buffer;
2075         adapter->login_rsp_buf_token = rsp_buffer_token;
2076         adapter->login_rsp_buf_sz = rsp_buffer_size;
2077
2078         login_buffer->len = cpu_to_be32(buffer_size);
2079         login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2080         login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2081         login_buffer->off_txcomp_subcrqs =
2082             cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2083         login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2084         login_buffer->off_rxcomp_subcrqs =
2085             cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2086                         sizeof(u64) * adapter->req_tx_queues);
2087         login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2088         login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2089
2090         tx_list_p = (__be64 *)((char *)login_buffer +
2091                                       sizeof(struct ibmvnic_login_buffer));
2092         rx_list_p = (__be64 *)((char *)login_buffer +
2093                                       sizeof(struct ibmvnic_login_buffer) +
2094                                       sizeof(u64) * adapter->req_tx_queues);
2095
2096         for (i = 0; i < adapter->req_tx_queues; i++) {
2097                 if (adapter->tx_scrq[i]) {
2098                         tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2099                                                    crq_num);
2100                 }
2101         }
2102
2103         for (i = 0; i < adapter->req_rx_queues; i++) {
2104                 if (adapter->rx_scrq[i]) {
2105                         rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2106                                                    crq_num);
2107                 }
2108         }
2109
2110         netdev_dbg(adapter->netdev, "Login Buffer:\n");
2111         for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2112                 netdev_dbg(adapter->netdev, "%016lx\n",
2113                            ((unsigned long int *)(adapter->login_buf))[i]);
2114         }
2115
2116         memset(&crq, 0, sizeof(crq));
2117         crq.login.first = IBMVNIC_CRQ_CMD;
2118         crq.login.cmd = LOGIN;
2119         crq.login.ioba = cpu_to_be32(buffer_token);
2120         crq.login.len = cpu_to_be32(buffer_size);
2121         ibmvnic_send_crq(adapter, &crq);
2122
2123         return;
2124
2125 buf_rsp_map_failed:
2126         kfree(login_rsp_buffer);
2127 buf_rsp_alloc_failed:
2128         dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2129 buf_map_failed:
2130         kfree(login_buffer);
2131 buf_alloc_failed:
2132         return;
2133 }
2134
2135 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2136                              u32 len, u8 map_id)
2137 {
2138         union ibmvnic_crq crq;
2139
2140         memset(&crq, 0, sizeof(crq));
2141         crq.request_map.first = IBMVNIC_CRQ_CMD;
2142         crq.request_map.cmd = REQUEST_MAP;
2143         crq.request_map.map_id = map_id;
2144         crq.request_map.ioba = cpu_to_be32(addr);
2145         crq.request_map.len = cpu_to_be32(len);
2146         ibmvnic_send_crq(adapter, &crq);
2147 }
2148
2149 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2150 {
2151         union ibmvnic_crq crq;
2152
2153         memset(&crq, 0, sizeof(crq));
2154         crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2155         crq.request_unmap.cmd = REQUEST_UNMAP;
2156         crq.request_unmap.map_id = map_id;
2157         ibmvnic_send_crq(adapter, &crq);
2158 }
2159
2160 static void send_map_query(struct ibmvnic_adapter *adapter)
2161 {
2162         union ibmvnic_crq crq;
2163
2164         memset(&crq, 0, sizeof(crq));
2165         crq.query_map.first = IBMVNIC_CRQ_CMD;
2166         crq.query_map.cmd = QUERY_MAP;
2167         ibmvnic_send_crq(adapter, &crq);
2168 }
2169
2170 /* Send a series of CRQs requesting various capabilities of the VNIC server */
2171 static void send_cap_queries(struct ibmvnic_adapter *adapter)
2172 {
2173         union ibmvnic_crq crq;
2174
2175         atomic_set(&adapter->running_cap_crqs, 0);
2176         memset(&crq, 0, sizeof(crq));
2177         crq.query_capability.first = IBMVNIC_CRQ_CMD;
2178         crq.query_capability.cmd = QUERY_CAPABILITY;
2179
2180         crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
2181         atomic_inc(&adapter->running_cap_crqs);
2182         ibmvnic_send_crq(adapter, &crq);
2183
2184         crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
2185         atomic_inc(&adapter->running_cap_crqs);
2186         ibmvnic_send_crq(adapter, &crq);
2187
2188         crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
2189         atomic_inc(&adapter->running_cap_crqs);
2190         ibmvnic_send_crq(adapter, &crq);
2191
2192         crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
2193         atomic_inc(&adapter->running_cap_crqs);
2194         ibmvnic_send_crq(adapter, &crq);
2195
2196         crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
2197         atomic_inc(&adapter->running_cap_crqs);
2198         ibmvnic_send_crq(adapter, &crq);
2199
2200         crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
2201         atomic_inc(&adapter->running_cap_crqs);
2202         ibmvnic_send_crq(adapter, &crq);
2203
2204         crq.query_capability.capability =
2205             cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2206         atomic_inc(&adapter->running_cap_crqs);
2207         ibmvnic_send_crq(adapter, &crq);
2208
2209         crq.query_capability.capability =
2210             cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2211         atomic_inc(&adapter->running_cap_crqs);
2212         ibmvnic_send_crq(adapter, &crq);
2213
2214         crq.query_capability.capability =
2215             cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2216         atomic_inc(&adapter->running_cap_crqs);
2217         ibmvnic_send_crq(adapter, &crq);
2218
2219         crq.query_capability.capability =
2220             cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2221         atomic_inc(&adapter->running_cap_crqs);
2222         ibmvnic_send_crq(adapter, &crq);
2223
2224         crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2225         atomic_inc(&adapter->running_cap_crqs);
2226         ibmvnic_send_crq(adapter, &crq);
2227
2228         crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2229         atomic_inc(&adapter->running_cap_crqs);
2230         ibmvnic_send_crq(adapter, &crq);
2231
2232         crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2233         atomic_inc(&adapter->running_cap_crqs);
2234         ibmvnic_send_crq(adapter, &crq);
2235
2236         crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2237         atomic_inc(&adapter->running_cap_crqs);
2238         ibmvnic_send_crq(adapter, &crq);
2239
2240         crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2241         atomic_inc(&adapter->running_cap_crqs);
2242         ibmvnic_send_crq(adapter, &crq);
2243
2244         crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2245         atomic_inc(&adapter->running_cap_crqs);
2246         ibmvnic_send_crq(adapter, &crq);
2247
2248         crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
2249         atomic_inc(&adapter->running_cap_crqs);
2250         ibmvnic_send_crq(adapter, &crq);
2251
2252         crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2253         atomic_inc(&adapter->running_cap_crqs);
2254         ibmvnic_send_crq(adapter, &crq);
2255
2256         crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2257         atomic_inc(&adapter->running_cap_crqs);
2258         ibmvnic_send_crq(adapter, &crq);
2259
2260         crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2261         atomic_inc(&adapter->running_cap_crqs);
2262         ibmvnic_send_crq(adapter, &crq);
2263
2264         crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2265         atomic_inc(&adapter->running_cap_crqs);
2266         ibmvnic_send_crq(adapter, &crq);
2267
2268         crq.query_capability.capability =
2269                         cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2270         atomic_inc(&adapter->running_cap_crqs);
2271         ibmvnic_send_crq(adapter, &crq);
2272
2273         crq.query_capability.capability =
2274                         cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2275         atomic_inc(&adapter->running_cap_crqs);
2276         ibmvnic_send_crq(adapter, &crq);
2277
2278         crq.query_capability.capability =
2279                         cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2280         atomic_inc(&adapter->running_cap_crqs);
2281         ibmvnic_send_crq(adapter, &crq);
2282
2283         crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2284         atomic_inc(&adapter->running_cap_crqs);
2285         ibmvnic_send_crq(adapter, &crq);
2286 }
2287
2288 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2289 {
2290         struct device *dev = &adapter->vdev->dev;
2291         struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2292         union ibmvnic_crq crq;
2293         int i;
2294
2295         dma_unmap_single(dev, adapter->ip_offload_tok,
2296                          sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2297
2298         netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2299         for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2300                 netdev_dbg(adapter->netdev, "%016lx\n",
2301                            ((unsigned long int *)(buf))[i]);
2302
2303         netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2304         netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2305         netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2306                    buf->tcp_ipv4_chksum);
2307         netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2308                    buf->tcp_ipv6_chksum);
2309         netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2310                    buf->udp_ipv4_chksum);
2311         netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2312                    buf->udp_ipv6_chksum);
2313         netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2314                    buf->large_tx_ipv4);
2315         netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2316                    buf->large_tx_ipv6);
2317         netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2318                    buf->large_rx_ipv4);
2319         netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2320                    buf->large_rx_ipv6);
2321         netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2322                    buf->max_ipv4_header_size);
2323         netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2324                    buf->max_ipv6_header_size);
2325         netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2326                    buf->max_tcp_header_size);
2327         netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2328                    buf->max_udp_header_size);
2329         netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2330                    buf->max_large_tx_size);
2331         netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2332                    buf->max_large_rx_size);
2333         netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2334                    buf->ipv6_extension_header);
2335         netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2336                    buf->tcp_pseudosum_req);
2337         netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2338                    buf->num_ipv6_ext_headers);
2339         netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2340                    buf->off_ipv6_ext_headers);
2341
2342         adapter->ip_offload_ctrl_tok =
2343             dma_map_single(dev, &adapter->ip_offload_ctrl,
2344                            sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2345
2346         if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2347                 dev_err(dev, "Couldn't map ip offload control buffer\n");
2348                 return;
2349         }
2350
2351         adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2352         adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2353         adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2354         adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2355         adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2356
2357         /* large_tx/rx disabled for now, additional features needed */
2358         adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2359         adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2360         adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2361         adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2362
2363         adapter->netdev->features = NETIF_F_GSO;
2364
2365         if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2366                 adapter->netdev->features |= NETIF_F_IP_CSUM;
2367
2368         if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2369                 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2370
2371         if ((adapter->netdev->features &
2372             (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2373                 adapter->netdev->features |= NETIF_F_RXCSUM;
2374
2375         memset(&crq, 0, sizeof(crq));
2376         crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2377         crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2378         crq.control_ip_offload.len =
2379             cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2380         crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2381         ibmvnic_send_crq(adapter, &crq);
2382 }
2383
2384 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2385                                   struct ibmvnic_adapter *adapter)
2386 {
2387         struct device *dev = &adapter->vdev->dev;
2388         struct ibmvnic_error_buff *error_buff, *tmp;
2389         unsigned long flags;
2390         bool found = false;
2391         int i;
2392
2393         if (!crq->request_error_rsp.rc.code) {
2394                 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2395                          crq->request_error_rsp.rc.code);
2396                 return;
2397         }
2398
2399         spin_lock_irqsave(&adapter->error_list_lock, flags);
2400         list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2401                 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2402                         found = true;
2403                         list_del(&error_buff->list);
2404                         break;
2405                 }
2406         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2407
2408         if (!found) {
2409                 dev_err(dev, "Couldn't find error id %x\n",
2410                         be32_to_cpu(crq->request_error_rsp.error_id));
2411                 return;
2412         }
2413
2414         dev_err(dev, "Detailed info for error id %x:",
2415                 be32_to_cpu(crq->request_error_rsp.error_id));
2416
2417         for (i = 0; i < error_buff->len; i++) {
2418                 pr_cont("%02x", (int)error_buff->buff[i]);
2419                 if (i % 8 == 7)
2420                         pr_cont(" ");
2421         }
2422         pr_cont("\n");
2423
2424         dma_unmap_single(dev, error_buff->dma, error_buff->len,
2425                          DMA_FROM_DEVICE);
2426         kfree(error_buff->buff);
2427         kfree(error_buff);
2428 }
2429
2430 static void request_error_information(struct ibmvnic_adapter *adapter,
2431                                       union ibmvnic_crq *err_crq)
2432 {
2433         struct device *dev = &adapter->vdev->dev;
2434         struct net_device *netdev = adapter->netdev;
2435         struct ibmvnic_error_buff *error_buff;
2436         unsigned long timeout = msecs_to_jiffies(30000);
2437         union ibmvnic_crq crq;
2438         unsigned long flags;
2439         int rc, detail_len;
2440
2441         error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2442         if (!error_buff)
2443                 return;
2444
2445         detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
2446         error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2447         if (!error_buff->buff) {
2448                 kfree(error_buff);
2449                 return;
2450         }
2451
2452         error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2453                                          DMA_FROM_DEVICE);
2454         if (dma_mapping_error(dev, error_buff->dma)) {
2455                 netdev_err(netdev, "Couldn't map error buffer\n");
2456                 kfree(error_buff->buff);
2457                 kfree(error_buff);
2458                 return;
2459         }
2460
2461         error_buff->len = detail_len;
2462         error_buff->error_id = err_crq->error_indication.error_id;
2463
2464         spin_lock_irqsave(&adapter->error_list_lock, flags);
2465         list_add_tail(&error_buff->list, &adapter->errors);
2466         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2467
2468         memset(&crq, 0, sizeof(crq));
2469         crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2470         crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2471         crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2472         crq.request_error_info.len = cpu_to_be32(detail_len);
2473         crq.request_error_info.error_id = err_crq->error_indication.error_id;
2474
2475         rc = ibmvnic_send_crq(adapter, &crq);
2476         if (rc) {
2477                 netdev_err(netdev, "failed to request error information\n");
2478                 goto err_info_fail;
2479         }
2480
2481         if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2482                 netdev_err(netdev, "timeout waiting for error information\n");
2483                 goto err_info_fail;
2484         }
2485
2486         return;
2487
2488 err_info_fail:
2489         spin_lock_irqsave(&adapter->error_list_lock, flags);
2490         list_del(&error_buff->list);
2491         spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2492
2493         kfree(error_buff->buff);
2494         kfree(error_buff);
2495 }
2496
2497 static void handle_error_indication(union ibmvnic_crq *crq,
2498                                     struct ibmvnic_adapter *adapter)
2499 {
2500         struct device *dev = &adapter->vdev->dev;
2501
2502         dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2503                 crq->error_indication.flags
2504                         & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2505                 be32_to_cpu(crq->error_indication.error_id),
2506                 be16_to_cpu(crq->error_indication.error_cause));
2507
2508         if (be32_to_cpu(crq->error_indication.error_id))
2509                 request_error_information(adapter, crq);
2510 }
2511
2512 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2513                                   struct ibmvnic_adapter *adapter)
2514 {
2515         struct net_device *netdev = adapter->netdev;
2516         struct device *dev = &adapter->vdev->dev;
2517         long rc;
2518
2519         rc = crq->change_mac_addr_rsp.rc.code;
2520         if (rc) {
2521                 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2522                 return;
2523         }
2524         memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2525                ETH_ALEN);
2526 }
2527
2528 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2529                                    struct ibmvnic_adapter *adapter)
2530 {
2531         struct device *dev = &adapter->vdev->dev;
2532         u64 *req_value;
2533         char *name;
2534
2535         atomic_dec(&adapter->running_cap_crqs);
2536         switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2537         case REQ_TX_QUEUES:
2538                 req_value = &adapter->req_tx_queues;
2539                 name = "tx";
2540                 break;
2541         case REQ_RX_QUEUES:
2542                 req_value = &adapter->req_rx_queues;
2543                 name = "rx";
2544                 break;
2545         case REQ_RX_ADD_QUEUES:
2546                 req_value = &adapter->req_rx_add_queues;
2547                 name = "rx_add";
2548                 break;
2549         case REQ_TX_ENTRIES_PER_SUBCRQ:
2550                 req_value = &adapter->req_tx_entries_per_subcrq;
2551                 name = "tx_entries_per_subcrq";
2552                 break;
2553         case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2554                 req_value = &adapter->req_rx_add_entries_per_subcrq;
2555                 name = "rx_add_entries_per_subcrq";
2556                 break;
2557         case REQ_MTU:
2558                 req_value = &adapter->req_mtu;
2559                 name = "mtu";
2560                 break;
2561         case PROMISC_REQUESTED:
2562                 req_value = &adapter->promisc;
2563                 name = "promisc";
2564                 break;
2565         default:
2566                 dev_err(dev, "Got invalid cap request rsp %d\n",
2567                         crq->request_capability.capability);
2568                 return;
2569         }
2570
2571         switch (crq->request_capability_rsp.rc.code) {
2572         case SUCCESS:
2573                 break;
2574         case PARTIALSUCCESS:
2575                 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2576                          *req_value,
2577                          (long int)be64_to_cpu(crq->request_capability_rsp.
2578                                                number), name);
2579                 release_sub_crqs(adapter);
2580                 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
2581                 ibmvnic_send_req_caps(adapter, 1);
2582                 return;
2583         default:
2584                 dev_err(dev, "Error %d in request cap rsp\n",
2585                         crq->request_capability_rsp.rc.code);
2586                 return;
2587         }
2588
2589         /* Done receiving requested capabilities, query IP offload support */
2590         if (atomic_read(&adapter->running_cap_crqs) == 0) {
2591                 union ibmvnic_crq newcrq;
2592                 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2593                 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2594                     &adapter->ip_offload_buf;
2595
2596                 adapter->wait_capability = false;
2597                 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2598                                                          buf_sz,
2599                                                          DMA_FROM_DEVICE);
2600
2601                 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2602                         if (!firmware_has_feature(FW_FEATURE_CMO))
2603                                 dev_err(dev, "Couldn't map offload buffer\n");
2604                         return;
2605                 }
2606
2607                 memset(&newcrq, 0, sizeof(newcrq));
2608                 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2609                 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2610                 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2611                 newcrq.query_ip_offload.ioba =
2612                     cpu_to_be32(adapter->ip_offload_tok);
2613
2614                 ibmvnic_send_crq(adapter, &newcrq);
2615         }
2616 }
2617
2618 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2619                             struct ibmvnic_adapter *adapter)
2620 {
2621         struct device *dev = &adapter->vdev->dev;
2622         struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2623         struct ibmvnic_login_buffer *login = adapter->login_buf;
2624         int i;
2625
2626         dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2627                          DMA_BIDIRECTIONAL);
2628         dma_unmap_single(dev, adapter->login_rsp_buf_token,
2629                          adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2630
2631         /* If the number of queues requested can't be allocated by the
2632          * server, the login response will return with code 1. We will need
2633          * to resend the login buffer with fewer queues requested.
2634          */
2635         if (login_rsp_crq->generic.rc.code) {
2636                 adapter->renegotiate = true;
2637                 complete(&adapter->init_done);
2638                 return 0;
2639         }
2640
2641         netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2642         for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2643                 netdev_dbg(adapter->netdev, "%016lx\n",
2644                            ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2645         }
2646
2647         /* Sanity checks */
2648         if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2649             (be32_to_cpu(login->num_rxcomp_subcrqs) *
2650              adapter->req_rx_add_queues !=
2651              be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2652                 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2653                 ibmvnic_remove(adapter->vdev);
2654                 return -EIO;
2655         }
2656         complete(&adapter->init_done);
2657
2658         return 0;
2659 }
2660
2661 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2662                                    struct ibmvnic_adapter *adapter)
2663 {
2664         struct device *dev = &adapter->vdev->dev;
2665         u8 map_id = crq->request_map_rsp.map_id;
2666         int tx_subcrqs;
2667         int rx_subcrqs;
2668         long rc;
2669         int i;
2670
2671         tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2672         rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2673
2674         rc = crq->request_map_rsp.rc.code;
2675         if (rc) {
2676                 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2677                 adapter->map_id--;
2678                 /* need to find and zero tx/rx_pool map_id */
2679                 for (i = 0; i < tx_subcrqs; i++) {
2680                         if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2681                                 adapter->tx_pool[i].long_term_buff.map_id = 0;
2682                 }
2683                 for (i = 0; i < rx_subcrqs; i++) {
2684                         if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2685                                 adapter->rx_pool[i].long_term_buff.map_id = 0;
2686                 }
2687         }
2688         complete(&adapter->fw_done);
2689 }
2690
2691 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2692                                      struct ibmvnic_adapter *adapter)
2693 {
2694         struct device *dev = &adapter->vdev->dev;
2695         long rc;
2696
2697         rc = crq->request_unmap_rsp.rc.code;
2698         if (rc)
2699                 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2700 }
2701
2702 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2703                                  struct ibmvnic_adapter *adapter)
2704 {
2705         struct net_device *netdev = adapter->netdev;
2706         struct device *dev = &adapter->vdev->dev;
2707         long rc;
2708
2709         rc = crq->query_map_rsp.rc.code;
2710         if (rc) {
2711                 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2712                 return;
2713         }
2714         netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2715                    crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2716                    crq->query_map_rsp.free_pages);
2717 }
2718
2719 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2720                                  struct ibmvnic_adapter *adapter)
2721 {
2722         struct net_device *netdev = adapter->netdev;
2723         struct device *dev = &adapter->vdev->dev;
2724         long rc;
2725
2726         atomic_dec(&adapter->running_cap_crqs);
2727         netdev_dbg(netdev, "Outstanding queries: %d\n",
2728                    atomic_read(&adapter->running_cap_crqs));
2729         rc = crq->query_capability.rc.code;
2730         if (rc) {
2731                 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2732                 goto out;
2733         }
2734
2735         switch (be16_to_cpu(crq->query_capability.capability)) {
2736         case MIN_TX_QUEUES:
2737                 adapter->min_tx_queues =
2738                     be64_to_cpu(crq->query_capability.number);
2739                 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2740                            adapter->min_tx_queues);
2741                 break;
2742         case MIN_RX_QUEUES:
2743                 adapter->min_rx_queues =
2744                     be64_to_cpu(crq->query_capability.number);
2745                 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2746                            adapter->min_rx_queues);
2747                 break;
2748         case MIN_RX_ADD_QUEUES:
2749                 adapter->min_rx_add_queues =
2750                     be64_to_cpu(crq->query_capability.number);
2751                 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2752                            adapter->min_rx_add_queues);
2753                 break;
2754         case MAX_TX_QUEUES:
2755                 adapter->max_tx_queues =
2756                     be64_to_cpu(crq->query_capability.number);
2757                 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2758                            adapter->max_tx_queues);
2759                 break;
2760         case MAX_RX_QUEUES:
2761                 adapter->max_rx_queues =
2762                     be64_to_cpu(crq->query_capability.number);
2763                 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2764                            adapter->max_rx_queues);
2765                 break;
2766         case MAX_RX_ADD_QUEUES:
2767                 adapter->max_rx_add_queues =
2768                     be64_to_cpu(crq->query_capability.number);
2769                 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2770                            adapter->max_rx_add_queues);
2771                 break;
2772         case MIN_TX_ENTRIES_PER_SUBCRQ:
2773                 adapter->min_tx_entries_per_subcrq =
2774                     be64_to_cpu(crq->query_capability.number);
2775                 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2776                            adapter->min_tx_entries_per_subcrq);
2777                 break;
2778         case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2779                 adapter->min_rx_add_entries_per_subcrq =
2780                     be64_to_cpu(crq->query_capability.number);
2781                 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2782                            adapter->min_rx_add_entries_per_subcrq);
2783                 break;
2784         case MAX_TX_ENTRIES_PER_SUBCRQ:
2785                 adapter->max_tx_entries_per_subcrq =
2786                     be64_to_cpu(crq->query_capability.number);
2787                 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2788                            adapter->max_tx_entries_per_subcrq);
2789                 break;
2790         case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2791                 adapter->max_rx_add_entries_per_subcrq =
2792                     be64_to_cpu(crq->query_capability.number);
2793                 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2794                            adapter->max_rx_add_entries_per_subcrq);
2795                 break;
2796         case TCP_IP_OFFLOAD:
2797                 adapter->tcp_ip_offload =
2798                     be64_to_cpu(crq->query_capability.number);
2799                 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2800                            adapter->tcp_ip_offload);
2801                 break;
2802         case PROMISC_SUPPORTED:
2803                 adapter->promisc_supported =
2804                     be64_to_cpu(crq->query_capability.number);
2805                 netdev_dbg(netdev, "promisc_supported = %lld\n",
2806                            adapter->promisc_supported);
2807                 break;
2808         case MIN_MTU:
2809                 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2810                 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2811                 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2812                 break;
2813         case MAX_MTU:
2814                 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2815                 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2816                 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2817                 break;
2818         case MAX_MULTICAST_FILTERS:
2819                 adapter->max_multicast_filters =
2820                     be64_to_cpu(crq->query_capability.number);
2821                 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2822                            adapter->max_multicast_filters);
2823                 break;
2824         case VLAN_HEADER_INSERTION:
2825                 adapter->vlan_header_insertion =
2826                     be64_to_cpu(crq->query_capability.number);
2827                 if (adapter->vlan_header_insertion)
2828                         netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2829                 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2830                            adapter->vlan_header_insertion);
2831                 break;
2832         case RX_VLAN_HEADER_INSERTION:
2833                 adapter->rx_vlan_header_insertion =
2834                     be64_to_cpu(crq->query_capability.number);
2835                 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
2836                            adapter->rx_vlan_header_insertion);
2837                 break;
2838         case MAX_TX_SG_ENTRIES:
2839                 adapter->max_tx_sg_entries =
2840                     be64_to_cpu(crq->query_capability.number);
2841                 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2842                            adapter->max_tx_sg_entries);
2843                 break;
2844         case RX_SG_SUPPORTED:
2845                 adapter->rx_sg_supported =
2846                     be64_to_cpu(crq->query_capability.number);
2847                 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2848                            adapter->rx_sg_supported);
2849                 break;
2850         case OPT_TX_COMP_SUB_QUEUES:
2851                 adapter->opt_tx_comp_sub_queues =
2852                     be64_to_cpu(crq->query_capability.number);
2853                 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2854                            adapter->opt_tx_comp_sub_queues);
2855                 break;
2856         case OPT_RX_COMP_QUEUES:
2857                 adapter->opt_rx_comp_queues =
2858                     be64_to_cpu(crq->query_capability.number);
2859                 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2860                            adapter->opt_rx_comp_queues);
2861                 break;
2862         case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2863                 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2864                     be64_to_cpu(crq->query_capability.number);
2865                 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2866                            adapter->opt_rx_bufadd_q_per_rx_comp_q);
2867                 break;
2868         case OPT_TX_ENTRIES_PER_SUBCRQ:
2869                 adapter->opt_tx_entries_per_subcrq =
2870                     be64_to_cpu(crq->query_capability.number);
2871                 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2872                            adapter->opt_tx_entries_per_subcrq);
2873                 break;
2874         case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2875                 adapter->opt_rxba_entries_per_subcrq =
2876                     be64_to_cpu(crq->query_capability.number);
2877                 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2878                            adapter->opt_rxba_entries_per_subcrq);
2879                 break;
2880         case TX_RX_DESC_REQ:
2881                 adapter->tx_rx_desc_req = crq->query_capability.number;
2882                 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2883                            adapter->tx_rx_desc_req);
2884                 break;
2885
2886         default:
2887                 netdev_err(netdev, "Got invalid cap rsp %d\n",
2888                            crq->query_capability.capability);
2889         }
2890
2891 out:
2892         if (atomic_read(&adapter->running_cap_crqs) == 0) {
2893                 adapter->wait_capability = false;
2894                 ibmvnic_send_req_caps(adapter, 0);
2895         }
2896 }
2897
2898 static void ibmvnic_xport_event(struct work_struct *work)
2899 {
2900         struct ibmvnic_adapter *adapter = container_of(work,
2901                                                        struct ibmvnic_adapter,
2902                                                        ibmvnic_xport);
2903         struct device *dev = &adapter->vdev->dev;
2904         long rc;
2905
2906         release_sub_crqs(adapter);
2907         if (adapter->migrated) {
2908                 rc = ibmvnic_reenable_crq_queue(adapter);
2909                 if (rc)
2910                         dev_err(dev, "Error after enable rc=%ld\n", rc);
2911                 adapter->migrated = false;
2912                 rc = ibmvnic_send_crq_init(adapter);
2913                 if (rc)
2914                         dev_err(dev, "Error sending init rc=%ld\n", rc);
2915         }
2916 }
2917
2918 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
2919                                struct ibmvnic_adapter *adapter)
2920 {
2921         struct ibmvnic_generic_crq *gen_crq = &crq->generic;
2922         struct net_device *netdev = adapter->netdev;
2923         struct device *dev = &adapter->vdev->dev;
2924         u64 *u64_crq = (u64 *)crq;
2925         long rc;
2926
2927         netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
2928                    (unsigned long int)cpu_to_be64(u64_crq[0]),
2929                    (unsigned long int)cpu_to_be64(u64_crq[1]));
2930         switch (gen_crq->first) {
2931         case IBMVNIC_CRQ_INIT_RSP:
2932                 switch (gen_crq->cmd) {
2933                 case IBMVNIC_CRQ_INIT:
2934                         dev_info(dev, "Partner initialized\n");
2935                         /* Send back a response */
2936                         rc = ibmvnic_send_crq_init_complete(adapter);
2937                         if (!rc)
2938                                 schedule_work(&adapter->vnic_crq_init);
2939                         else
2940                                 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
2941                         break;
2942                 case IBMVNIC_CRQ_INIT_COMPLETE:
2943                         dev_info(dev, "Partner initialization complete\n");
2944                         send_version_xchg(adapter);
2945                         break;
2946                 default:
2947                         dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
2948                 }
2949                 return;
2950         case IBMVNIC_CRQ_XPORT_EVENT:
2951                 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
2952                         dev_info(dev, "Re-enabling adapter\n");
2953                         adapter->migrated = true;
2954                         schedule_work(&adapter->ibmvnic_xport);
2955                 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
2956                         dev_info(dev, "Backing device failover detected\n");
2957                         netif_carrier_off(netdev);
2958                         adapter->failover = true;
2959                 } else {
2960                         /* The adapter lost the connection */
2961                         dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
2962                                 gen_crq->cmd);
2963                         schedule_work(&adapter->ibmvnic_xport);
2964                 }
2965                 return;
2966         case IBMVNIC_CRQ_CMD_RSP:
2967                 break;
2968         default:
2969                 dev_err(dev, "Got an invalid msg type 0x%02x\n",
2970                         gen_crq->first);
2971                 return;
2972         }
2973
2974         switch (gen_crq->cmd) {
2975         case VERSION_EXCHANGE_RSP:
2976                 rc = crq->version_exchange_rsp.rc.code;
2977                 if (rc) {
2978                         dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
2979                         break;
2980                 }
2981                 dev_info(dev, "Partner protocol version is %d\n",
2982                          crq->version_exchange_rsp.version);
2983                 if (be16_to_cpu(crq->version_exchange_rsp.version) <
2984                     ibmvnic_version)
2985                         ibmvnic_version =
2986                             be16_to_cpu(crq->version_exchange_rsp.version);
2987                 send_cap_queries(adapter);
2988                 break;
2989         case QUERY_CAPABILITY_RSP:
2990                 handle_query_cap_rsp(crq, adapter);
2991                 break;
2992         case QUERY_MAP_RSP:
2993                 handle_query_map_rsp(crq, adapter);
2994                 break;
2995         case REQUEST_MAP_RSP:
2996                 handle_request_map_rsp(crq, adapter);
2997                 break;
2998         case REQUEST_UNMAP_RSP:
2999                 handle_request_unmap_rsp(crq, adapter);
3000                 break;
3001         case REQUEST_CAPABILITY_RSP:
3002                 handle_request_cap_rsp(crq, adapter);
3003                 break;
3004         case LOGIN_RSP:
3005                 netdev_dbg(netdev, "Got Login Response\n");
3006                 handle_login_rsp(crq, adapter);
3007                 break;
3008         case LOGICAL_LINK_STATE_RSP:
3009                 netdev_dbg(netdev,
3010                            "Got Logical Link State Response, state: %d rc: %d\n",
3011                            crq->logical_link_state_rsp.link_state,
3012                            crq->logical_link_state_rsp.rc.code);
3013                 adapter->logical_link_state =
3014                     crq->logical_link_state_rsp.link_state;
3015                 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
3016                 complete(&adapter->init_done);
3017                 break;
3018         case LINK_STATE_INDICATION:
3019                 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3020                 adapter->phys_link_state =
3021                     crq->link_state_indication.phys_link_state;
3022                 adapter->logical_link_state =
3023                     crq->link_state_indication.logical_link_state;
3024                 break;
3025         case CHANGE_MAC_ADDR_RSP:
3026                 netdev_dbg(netdev, "Got MAC address change Response\n");
3027                 handle_change_mac_rsp(crq, adapter);
3028                 break;
3029         case ERROR_INDICATION:
3030                 netdev_dbg(netdev, "Got Error Indication\n");
3031                 handle_error_indication(crq, adapter);
3032                 break;
3033         case REQUEST_ERROR_RSP:
3034                 netdev_dbg(netdev, "Got Error Detail Response\n");
3035                 handle_error_info_rsp(crq, adapter);
3036                 break;
3037         case REQUEST_STATISTICS_RSP:
3038                 netdev_dbg(netdev, "Got Statistics Response\n");
3039                 complete(&adapter->stats_done);
3040                 break;
3041         case QUERY_IP_OFFLOAD_RSP:
3042                 netdev_dbg(netdev, "Got Query IP offload Response\n");
3043                 handle_query_ip_offload_rsp(adapter);
3044                 break;
3045         case MULTICAST_CTRL_RSP:
3046                 netdev_dbg(netdev, "Got multicast control Response\n");
3047                 break;
3048         case CONTROL_IP_OFFLOAD_RSP:
3049                 netdev_dbg(netdev, "Got Control IP offload Response\n");
3050                 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3051                                  sizeof(adapter->ip_offload_ctrl),
3052                                  DMA_TO_DEVICE);
3053                 complete(&adapter->init_done);
3054                 break;
3055         case COLLECT_FW_TRACE_RSP:
3056                 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3057                 complete(&adapter->fw_done);
3058                 break;
3059         default:
3060                 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3061                            gen_crq->cmd);
3062         }
3063 }
3064
3065 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3066 {
3067         struct ibmvnic_adapter *adapter = instance;
3068
3069         tasklet_schedule(&adapter->tasklet);
3070         return IRQ_HANDLED;
3071 }
3072
3073 static void ibmvnic_tasklet(void *data)
3074 {
3075         struct ibmvnic_adapter *adapter = data;
3076         struct ibmvnic_crq_queue *queue = &adapter->crq;
3077         union ibmvnic_crq *crq;
3078         unsigned long flags;
3079         bool done = false;
3080
3081         spin_lock_irqsave(&queue->lock, flags);
3082         while (!done) {
3083                 /* Pull all the valid messages off the CRQ */
3084                 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3085                         ibmvnic_handle_crq(crq, adapter);
3086                         crq->generic.first = 0;
3087                 }
3088
3089                 /* remain in tasklet until all
3090                  * capabilities responses are received
3091                  */
3092                 if (!adapter->wait_capability)
3093                         done = true;
3094         }
3095         /* if capabilities CRQ's were sent in this tasklet, the following
3096          * tasklet must wait until all responses are received
3097          */
3098         if (atomic_read(&adapter->running_cap_crqs) != 0)
3099                 adapter->wait_capability = true;
3100         spin_unlock_irqrestore(&queue->lock, flags);
3101 }
3102
3103 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3104 {
3105         struct vio_dev *vdev = adapter->vdev;
3106         int rc;
3107
3108         do {
3109                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3110         } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3111
3112         if (rc)
3113                 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3114
3115         return rc;
3116 }
3117
3118 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3119 {
3120         struct ibmvnic_crq_queue *crq = &adapter->crq;
3121         struct device *dev = &adapter->vdev->dev;
3122         struct vio_dev *vdev = adapter->vdev;
3123         int rc;
3124
3125         /* Close the CRQ */
3126         do {
3127                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3128         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3129
3130         /* Clean out the queue */
3131         memset(crq->msgs, 0, PAGE_SIZE);
3132         crq->cur = 0;
3133
3134         /* And re-open it again */
3135         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3136                                 crq->msg_token, PAGE_SIZE);
3137
3138         if (rc == H_CLOSED)
3139                 /* Adapter is good, but other end is not ready */
3140                 dev_warn(dev, "Partner adapter not ready\n");
3141         else if (rc != 0)
3142                 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3143
3144         return rc;
3145 }
3146
3147 static void release_crq_queue(struct ibmvnic_adapter *adapter)
3148 {
3149         struct ibmvnic_crq_queue *crq = &adapter->crq;
3150         struct vio_dev *vdev = adapter->vdev;
3151         long rc;
3152
3153         if (!crq->msgs)
3154                 return;
3155
3156         netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3157         free_irq(vdev->irq, adapter);
3158         tasklet_kill(&adapter->tasklet);
3159         do {
3160                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3161         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3162
3163         dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3164                          DMA_BIDIRECTIONAL);
3165         free_page((unsigned long)crq->msgs);
3166         crq->msgs = NULL;
3167 }
3168
3169 static int init_crq_queue(struct ibmvnic_adapter *adapter)
3170 {
3171         struct ibmvnic_crq_queue *crq = &adapter->crq;
3172         struct device *dev = &adapter->vdev->dev;
3173         struct vio_dev *vdev = adapter->vdev;
3174         int rc, retrc = -ENOMEM;
3175
3176         if (crq->msgs)
3177                 return 0;
3178
3179         crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3180         /* Should we allocate more than one page? */
3181
3182         if (!crq->msgs)
3183                 return -ENOMEM;
3184
3185         crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3186         crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3187                                         DMA_BIDIRECTIONAL);
3188         if (dma_mapping_error(dev, crq->msg_token))
3189                 goto map_failed;
3190
3191         rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3192                                 crq->msg_token, PAGE_SIZE);
3193
3194         if (rc == H_RESOURCE)
3195                 /* maybe kexecing and resource is busy. try a reset */
3196                 rc = ibmvnic_reset_crq(adapter);
3197         retrc = rc;
3198
3199         if (rc == H_CLOSED) {
3200                 dev_warn(dev, "Partner adapter not ready\n");
3201         } else if (rc) {
3202                 dev_warn(dev, "Error %d opening adapter\n", rc);
3203                 goto reg_crq_failed;
3204         }
3205
3206         retrc = 0;
3207
3208         tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3209                      (unsigned long)adapter);
3210
3211         netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3212         rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3213                          adapter);
3214         if (rc) {
3215                 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3216                         vdev->irq, rc);
3217                 goto req_irq_failed;
3218         }
3219
3220         rc = vio_enable_interrupts(vdev);
3221         if (rc) {
3222                 dev_err(dev, "Error %d enabling interrupts\n", rc);
3223                 goto req_irq_failed;
3224         }
3225
3226         crq->cur = 0;
3227         spin_lock_init(&crq->lock);
3228
3229         return retrc;
3230
3231 req_irq_failed:
3232         tasklet_kill(&adapter->tasklet);
3233         do {
3234                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3235         } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3236 reg_crq_failed:
3237         dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3238 map_failed:
3239         free_page((unsigned long)crq->msgs);
3240         crq->msgs = NULL;
3241         return retrc;
3242 }
3243
3244 static void handle_crq_init_rsp(struct work_struct *work)
3245 {
3246         struct ibmvnic_adapter *adapter = container_of(work,
3247                                                        struct ibmvnic_adapter,
3248                                                        vnic_crq_init);
3249         struct device *dev = &adapter->vdev->dev;
3250         struct net_device *netdev = adapter->netdev;
3251         unsigned long timeout = msecs_to_jiffies(30000);
3252         bool restart = false;
3253         int rc;
3254
3255         if (adapter->failover) {
3256                 release_sub_crqs(adapter);
3257                 if (netif_running(netdev)) {
3258                         netif_tx_disable(netdev);
3259                         ibmvnic_close(netdev);
3260                         restart = true;
3261                 }
3262         }
3263
3264         reinit_completion(&adapter->init_done);
3265         send_version_xchg(adapter);
3266         if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3267                 dev_err(dev, "Passive init timeout\n");
3268                 goto task_failed;
3269         }
3270
3271         netdev->mtu = adapter->req_mtu - ETH_HLEN;
3272
3273         if (adapter->failover) {
3274                 adapter->failover = false;
3275                 if (restart) {
3276                         rc = ibmvnic_open(netdev);
3277                         if (rc)
3278                                 goto restart_failed;
3279                 }
3280                 netif_carrier_on(netdev);
3281                 return;
3282         }
3283
3284         rc = register_netdev(netdev);
3285         if (rc) {
3286                 dev_err(dev,
3287                         "failed to register netdev rc=%d\n", rc);
3288                 goto register_failed;
3289         }
3290         dev_info(dev, "ibmvnic registered\n");
3291
3292         return;
3293
3294 restart_failed:
3295         dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3296 register_failed:
3297         release_sub_crqs(adapter);
3298 task_failed:
3299         dev_err(dev, "Passive initialization was not successful\n");
3300 }
3301
3302 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3303 {
3304         struct device *dev = &adapter->vdev->dev;
3305         unsigned long timeout = msecs_to_jiffies(30000);
3306         int rc;
3307
3308         rc = init_crq_queue(adapter);
3309         if (rc) {
3310                 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3311                 return rc;
3312         }
3313
3314         init_completion(&adapter->init_done);
3315         ibmvnic_send_crq_init(adapter);
3316         if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3317                 dev_err(dev, "Initialization sequence timed out\n");
3318                 release_crq_queue(adapter);
3319                 return -1;
3320         }
3321
3322         rc = init_sub_crqs(adapter);
3323         if (rc) {
3324                 dev_err(dev, "Initialization of sub crqs failed\n");
3325                 release_crq_queue(adapter);
3326         }
3327
3328         return rc;
3329 }
3330
3331 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3332 {
3333         struct ibmvnic_adapter *adapter;
3334         struct net_device *netdev;
3335         unsigned char *mac_addr_p;
3336         int rc;
3337
3338         dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3339                 dev->unit_address);
3340
3341         mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3342                                                         VETH_MAC_ADDR, NULL);
3343         if (!mac_addr_p) {
3344                 dev_err(&dev->dev,
3345                         "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3346                         __FILE__, __LINE__);
3347                 return 0;
3348         }
3349
3350         netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3351                                    IBMVNIC_MAX_TX_QUEUES);
3352         if (!netdev)
3353                 return -ENOMEM;
3354
3355         adapter = netdev_priv(netdev);
3356         dev_set_drvdata(&dev->dev, netdev);
3357         adapter->vdev = dev;
3358         adapter->netdev = netdev;
3359         adapter->failover = false;
3360
3361         ether_addr_copy(adapter->mac_addr, mac_addr_p);
3362         ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3363         netdev->irq = dev->irq;
3364         netdev->netdev_ops = &ibmvnic_netdev_ops;
3365         netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3366         SET_NETDEV_DEV(netdev, &dev->dev);
3367
3368         INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3369         INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3370
3371         spin_lock_init(&adapter->stats_lock);
3372
3373         INIT_LIST_HEAD(&adapter->errors);
3374         spin_lock_init(&adapter->error_list_lock);
3375
3376         rc = ibmvnic_init(adapter);
3377         if (rc) {
3378                 free_netdev(netdev);
3379                 return rc;
3380         }
3381
3382         netdev->mtu = adapter->req_mtu - ETH_HLEN;
3383         adapter->is_closed = false;
3384
3385         rc = register_netdev(netdev);
3386         if (rc) {
3387                 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3388                 free_netdev(netdev);
3389                 return rc;
3390         }
3391         dev_info(&dev->dev, "ibmvnic registered\n");
3392
3393         return 0;
3394 }
3395
3396 static int ibmvnic_remove(struct vio_dev *dev)
3397 {
3398         struct net_device *netdev = dev_get_drvdata(&dev->dev);
3399         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3400
3401         unregister_netdev(netdev);
3402
3403         release_resources(adapter);
3404         release_sub_crqs(adapter);
3405         release_crq_queue(adapter);
3406
3407         free_netdev(netdev);
3408         dev_set_drvdata(&dev->dev, NULL);
3409
3410         return 0;
3411 }
3412
3413 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3414 {
3415         struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3416         struct ibmvnic_adapter *adapter;
3417         struct iommu_table *tbl;
3418         unsigned long ret = 0;
3419         int i;
3420
3421         tbl = get_iommu_table_base(&vdev->dev);
3422
3423         /* netdev inits at probe time along with the structures we need below*/
3424         if (!netdev)
3425                 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3426
3427         adapter = netdev_priv(netdev);
3428
3429         ret += PAGE_SIZE; /* the crq message queue */
3430         ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3431
3432         for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3433                 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3434
3435         for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3436              i++)
3437                 ret += adapter->rx_pool[i].size *
3438                     IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3439
3440         return ret;
3441 }
3442
3443 static int ibmvnic_resume(struct device *dev)
3444 {
3445         struct net_device *netdev = dev_get_drvdata(dev);
3446         struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3447         int i;
3448
3449         /* kick the interrupt handlers just in case we lost an interrupt */
3450         for (i = 0; i < adapter->req_rx_queues; i++)
3451                 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3452                                      adapter->rx_scrq[i]);
3453
3454         return 0;
3455 }
3456
3457 static struct vio_device_id ibmvnic_device_table[] = {
3458         {"network", "IBM,vnic"},
3459         {"", "" }
3460 };
3461 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3462
3463 static const struct dev_pm_ops ibmvnic_pm_ops = {
3464         .resume = ibmvnic_resume
3465 };
3466
3467 static struct vio_driver ibmvnic_driver = {
3468         .id_table       = ibmvnic_device_table,
3469         .probe          = ibmvnic_probe,
3470         .remove         = ibmvnic_remove,
3471         .get_desired_dma = ibmvnic_get_desired_dma,
3472         .name           = ibmvnic_driver_name,
3473         .pm             = &ibmvnic_pm_ops,
3474 };
3475
3476 /* module functions */
3477 static int __init ibmvnic_module_init(void)
3478 {
3479         pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3480                 IBMVNIC_DRIVER_VERSION);
3481
3482         return vio_register_driver(&ibmvnic_driver);
3483 }
3484
3485 static void __exit ibmvnic_module_exit(void)
3486 {
3487         vio_unregister_driver(&ibmvnic_driver);
3488 }
3489
3490 module_init(ibmvnic_module_init);
3491 module_exit(ibmvnic_module_exit);