]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/sun/sunvnet.c
Merge remote-tracking branch 'usb-chipidea-next/ci-for-usb-next'
[karo-tx-linux.git] / drivers / net / ethernet / sun / sunvnet.c
1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
2  *
3  * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/etherdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/highmem.h>
19 #include <linux/if_vlan.h>
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/sunvnet.h>
22
23 #if IS_ENABLED(CONFIG_IPV6)
24 #include <linux/icmpv6.h>
25 #endif
26
27 #include <net/ip.h>
28 #include <net/icmp.h>
29 #include <net/route.h>
30
31 #include <asm/vio.h>
32 #include <asm/ldc.h>
33
34 #include "sunvnet.h"
35
36 #define DRV_MODULE_NAME         "sunvnet"
37 #define DRV_MODULE_VERSION      "1.0"
38 #define DRV_MODULE_RELDATE      "June 25, 2007"
39
40 static char version[] =
41         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
46
47 #define VNET_MAX_TXQS           16
48
49 /* Heuristic for the number of times to exponentially backoff and
50  * retry sending an LDC trigger when EAGAIN is encountered
51  */
52 #define VNET_MAX_RETRIES        10
53
54 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
55 static void vnet_port_reset(struct vnet_port *port);
56
57 /* Ordered from largest major to lowest */
58 static struct vio_version vnet_versions[] = {
59         { .major = 1, .minor = 8 },
60         { .major = 1, .minor = 7 },
61         { .major = 1, .minor = 6 },
62         { .major = 1, .minor = 0 },
63 };
64
65 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
66 {
67         return vio_dring_avail(dr, VNET_TX_RING_SIZE);
68 }
69
70 static int vnet_handle_unknown(struct vnet_port *port, void *arg)
71 {
72         struct vio_msg_tag *pkt = arg;
73
74         pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
75                pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
76         pr_err("Resetting connection\n");
77
78         ldc_disconnect(port->vio.lp);
79
80         return -ECONNRESET;
81 }
82
83 static int vnet_port_alloc_tx_ring(struct vnet_port *port);
84
85 static int vnet_send_attr(struct vio_driver_state *vio)
86 {
87         struct vnet_port *port = to_vnet_port(vio);
88         struct net_device *dev = port->vp->dev;
89         struct vio_net_attr_info pkt;
90         int framelen = ETH_FRAME_LEN;
91         int i, err;
92
93         err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
94         if (err)
95                 return err;
96
97         memset(&pkt, 0, sizeof(pkt));
98         pkt.tag.type = VIO_TYPE_CTRL;
99         pkt.tag.stype = VIO_SUBTYPE_INFO;
100         pkt.tag.stype_env = VIO_ATTR_INFO;
101         pkt.tag.sid = vio_send_sid(vio);
102         if (vio_version_before(vio, 1, 2))
103                 pkt.xfer_mode = VIO_DRING_MODE;
104         else
105                 pkt.xfer_mode = VIO_NEW_DRING_MODE;
106         pkt.addr_type = VNET_ADDR_ETHERMAC;
107         pkt.ack_freq = 0;
108         for (i = 0; i < 6; i++)
109                 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
110         if (vio_version_after(vio, 1, 3)) {
111                 if (port->rmtu) {
112                         port->rmtu = min(VNET_MAXPACKET, port->rmtu);
113                         pkt.mtu = port->rmtu;
114                 } else {
115                         port->rmtu = VNET_MAXPACKET;
116                         pkt.mtu = port->rmtu;
117                 }
118                 if (vio_version_after_eq(vio, 1, 6))
119                         pkt.options = VIO_TX_DRING;
120         } else if (vio_version_before(vio, 1, 3)) {
121                 pkt.mtu = framelen;
122         } else { /* v1.3 */
123                 pkt.mtu = framelen + VLAN_HLEN;
124         }
125
126         pkt.cflags = 0;
127         if (vio_version_after_eq(vio, 1, 7) && port->tso) {
128                 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
129                 if (!port->tsolen)
130                         port->tsolen = VNET_MAXTSO;
131                 pkt.ipv4_lso_maxlen = port->tsolen;
132         }
133
134         pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
135
136         viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
137                "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
138                "cflags[0x%04x] lso_max[%u]\n",
139                pkt.xfer_mode, pkt.addr_type,
140                (unsigned long long)pkt.addr,
141                pkt.ack_freq, pkt.plnk_updt, pkt.options,
142                (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
143
144
145         return vio_ldc_send(vio, &pkt, sizeof(pkt));
146 }
147
148 static int handle_attr_info(struct vio_driver_state *vio,
149                             struct vio_net_attr_info *pkt)
150 {
151         struct vnet_port *port = to_vnet_port(vio);
152         u64     localmtu;
153         u8      xfer_mode;
154
155         viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
156                "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
157                " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
158                pkt->xfer_mode, pkt->addr_type,
159                (unsigned long long)pkt->addr,
160                pkt->ack_freq, pkt->plnk_updt, pkt->options,
161                (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
162                pkt->ipv4_lso_maxlen);
163
164         pkt->tag.sid = vio_send_sid(vio);
165
166         xfer_mode = pkt->xfer_mode;
167         /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
168         if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
169                 xfer_mode = VIO_NEW_DRING_MODE;
170
171         /* MTU negotiation:
172          *      < v1.3 - ETH_FRAME_LEN exactly
173          *      > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
174          *                      pkt->mtu for ACK
175          *      = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
176          */
177         if (vio_version_before(vio, 1, 3)) {
178                 localmtu = ETH_FRAME_LEN;
179         } else if (vio_version_after(vio, 1, 3)) {
180                 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
181                 localmtu = min(pkt->mtu, localmtu);
182                 pkt->mtu = localmtu;
183         } else { /* v1.3 */
184                 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
185         }
186         port->rmtu = localmtu;
187
188         /* LSO negotiation */
189         if (vio_version_after_eq(vio, 1, 7))
190                 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
191         else
192                 port->tso = false;
193         if (port->tso) {
194                 if (!port->tsolen)
195                         port->tsolen = VNET_MAXTSO;
196                 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
197                 if (port->tsolen < VNET_MINTSO) {
198                         port->tso = false;
199                         port->tsolen = 0;
200                         pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
201                 }
202                 pkt->ipv4_lso_maxlen = port->tsolen;
203         } else {
204                 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
205                 pkt->ipv4_lso_maxlen = 0;
206         }
207
208         /* for version >= 1.6, ACK packet mode we support */
209         if (vio_version_after_eq(vio, 1, 6)) {
210                 pkt->xfer_mode = VIO_NEW_DRING_MODE;
211                 pkt->options = VIO_TX_DRING;
212         }
213
214         if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
215             pkt->addr_type != VNET_ADDR_ETHERMAC ||
216             pkt->mtu != localmtu) {
217                 viodbg(HS, "SEND NET ATTR NACK\n");
218
219                 pkt->tag.stype = VIO_SUBTYPE_NACK;
220
221                 (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
222
223                 return -ECONNRESET;
224         } else {
225                 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
226                        "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
227                        "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
228                        pkt->xfer_mode, pkt->addr_type,
229                        (unsigned long long)pkt->addr,
230                        pkt->ack_freq, pkt->plnk_updt, pkt->options,
231                        (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
232                        pkt->ipv4_lso_maxlen);
233
234                 pkt->tag.stype = VIO_SUBTYPE_ACK;
235
236                 return vio_ldc_send(vio, pkt, sizeof(*pkt));
237         }
238
239 }
240
241 static int handle_attr_ack(struct vio_driver_state *vio,
242                            struct vio_net_attr_info *pkt)
243 {
244         viodbg(HS, "GOT NET ATTR ACK\n");
245
246         return 0;
247 }
248
249 static int handle_attr_nack(struct vio_driver_state *vio,
250                             struct vio_net_attr_info *pkt)
251 {
252         viodbg(HS, "GOT NET ATTR NACK\n");
253
254         return -ECONNRESET;
255 }
256
257 static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
258 {
259         struct vio_net_attr_info *pkt = arg;
260
261         switch (pkt->tag.stype) {
262         case VIO_SUBTYPE_INFO:
263                 return handle_attr_info(vio, pkt);
264
265         case VIO_SUBTYPE_ACK:
266                 return handle_attr_ack(vio, pkt);
267
268         case VIO_SUBTYPE_NACK:
269                 return handle_attr_nack(vio, pkt);
270
271         default:
272                 return -ECONNRESET;
273         }
274 }
275
276 static void vnet_handshake_complete(struct vio_driver_state *vio)
277 {
278         struct vio_dring_state *dr;
279
280         dr = &vio->drings[VIO_DRIVER_RX_RING];
281         dr->snd_nxt = dr->rcv_nxt = 1;
282
283         dr = &vio->drings[VIO_DRIVER_TX_RING];
284         dr->snd_nxt = dr->rcv_nxt = 1;
285 }
286
287 /* The hypervisor interface that implements copying to/from imported
288  * memory from another domain requires that copies are done to 8-byte
289  * aligned buffers, and that the lengths of such copies are also 8-byte
290  * multiples.
291  *
292  * So we align skb->data to an 8-byte multiple and pad-out the data
293  * area so we can round the copy length up to the next multiple of
294  * 8 for the copy.
295  *
296  * The transmitter puts the actual start of the packet 6 bytes into
297  * the buffer it sends over, so that the IP headers after the ethernet
298  * header are aligned properly.  These 6 bytes are not in the descriptor
299  * length, they are simply implied.  This offset is represented using
300  * the VNET_PACKET_SKIP macro.
301  */
302 static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
303                                            unsigned int len)
304 {
305         struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
306         unsigned long addr, off;
307
308         if (unlikely(!skb))
309                 return NULL;
310
311         addr = (unsigned long) skb->data;
312         off = ((addr + 7UL) & ~7UL) - addr;
313         if (off)
314                 skb_reserve(skb, off);
315
316         return skb;
317 }
318
319 static inline void vnet_fullcsum(struct sk_buff *skb)
320 {
321         struct iphdr *iph = ip_hdr(skb);
322         int offset = skb_transport_offset(skb);
323
324         if (skb->protocol != htons(ETH_P_IP))
325                 return;
326         if (iph->protocol != IPPROTO_TCP &&
327             iph->protocol != IPPROTO_UDP)
328                 return;
329         skb->ip_summed = CHECKSUM_NONE;
330         skb->csum_level = 1;
331         skb->csum = 0;
332         if (iph->protocol == IPPROTO_TCP) {
333                 struct tcphdr *ptcp = tcp_hdr(skb);
334
335                 ptcp->check = 0;
336                 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
337                 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
338                                                 skb->len - offset, IPPROTO_TCP,
339                                                 skb->csum);
340         } else if (iph->protocol == IPPROTO_UDP) {
341                 struct udphdr *pudp = udp_hdr(skb);
342
343                 pudp->check = 0;
344                 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
345                 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
346                                                 skb->len - offset, IPPROTO_UDP,
347                                                 skb->csum);
348         }
349 }
350
351 static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
352 {
353         struct net_device *dev = port->vp->dev;
354         unsigned int len = desc->size;
355         unsigned int copy_len;
356         struct sk_buff *skb;
357         int maxlen;
358         int err;
359
360         err = -EMSGSIZE;
361         if (port->tso && port->tsolen > port->rmtu)
362                 maxlen = port->tsolen;
363         else
364                 maxlen = port->rmtu;
365         if (unlikely(len < ETH_ZLEN || len > maxlen)) {
366                 dev->stats.rx_length_errors++;
367                 goto out_dropped;
368         }
369
370         skb = alloc_and_align_skb(dev, len);
371         err = -ENOMEM;
372         if (unlikely(!skb)) {
373                 dev->stats.rx_missed_errors++;
374                 goto out_dropped;
375         }
376
377         copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
378         skb_put(skb, copy_len);
379         err = ldc_copy(port->vio.lp, LDC_COPY_IN,
380                        skb->data, copy_len, 0,
381                        desc->cookies, desc->ncookies);
382         if (unlikely(err < 0)) {
383                 dev->stats.rx_frame_errors++;
384                 goto out_free_skb;
385         }
386
387         skb_pull(skb, VNET_PACKET_SKIP);
388         skb_trim(skb, len);
389         skb->protocol = eth_type_trans(skb, dev);
390
391         if (vio_version_after_eq(&port->vio, 1, 8)) {
392                 struct vio_net_dext *dext = vio_net_ext(desc);
393
394                 skb_reset_network_header(skb);
395
396                 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
397                         if (skb->protocol == ETH_P_IP) {
398                                 struct iphdr *iph = ip_hdr(skb);
399
400                                 iph->check = 0;
401                                 ip_send_check(iph);
402                         }
403                 }
404                 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
405                     skb->ip_summed == CHECKSUM_NONE) {
406                         if (skb->protocol == htons(ETH_P_IP)) {
407                                 struct iphdr *iph = ip_hdr(skb);
408                                 int ihl = iph->ihl * 4;
409
410                                 skb_reset_transport_header(skb);
411                                 skb_set_transport_header(skb, ihl);
412                                 vnet_fullcsum(skb);
413                         }
414                 }
415                 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
416                         skb->ip_summed = CHECKSUM_PARTIAL;
417                         skb->csum_level = 0;
418                         if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
419                                 skb->csum_level = 1;
420                 }
421         }
422
423         skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
424
425         dev->stats.rx_packets++;
426         dev->stats.rx_bytes += len;
427         napi_gro_receive(&port->napi, skb);
428         return 0;
429
430 out_free_skb:
431         kfree_skb(skb);
432
433 out_dropped:
434         dev->stats.rx_dropped++;
435         return err;
436 }
437
438 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
439                          u32 start, u32 end, u8 vio_dring_state)
440 {
441         struct vio_dring_data hdr = {
442                 .tag = {
443                         .type           = VIO_TYPE_DATA,
444                         .stype          = VIO_SUBTYPE_ACK,
445                         .stype_env      = VIO_DRING_DATA,
446                         .sid            = vio_send_sid(&port->vio),
447                 },
448                 .dring_ident            = dr->ident,
449                 .start_idx              = start,
450                 .end_idx                = end,
451                 .state                  = vio_dring_state,
452         };
453         int err, delay;
454         int retries = 0;
455
456         hdr.seq = dr->snd_nxt;
457         delay = 1;
458         do {
459                 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
460                 if (err > 0) {
461                         dr->snd_nxt++;
462                         break;
463                 }
464                 udelay(delay);
465                 if ((delay <<= 1) > 128)
466                         delay = 128;
467                 if (retries++ > VNET_MAX_RETRIES) {
468                         pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
469                                 port->raddr[0], port->raddr[1],
470                                 port->raddr[2], port->raddr[3],
471                                 port->raddr[4], port->raddr[5]);
472                         break;
473                 }
474         } while (err == -EAGAIN);
475
476         if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
477                 port->stop_rx_idx = end;
478                 port->stop_rx = true;
479         } else {
480                 port->stop_rx_idx = 0;
481                 port->stop_rx = false;
482         }
483
484         return err;
485 }
486
487 static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
488                                         struct vio_dring_state *dr,
489                                         u32 index)
490 {
491         struct vio_net_desc *desc = port->vio.desc_buf;
492         int err;
493
494         err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
495                                   (index * dr->entry_size),
496                                   dr->cookies, dr->ncookies);
497         if (err < 0)
498                 return ERR_PTR(err);
499
500         return desc;
501 }
502
503 static int put_rx_desc(struct vnet_port *port,
504                        struct vio_dring_state *dr,
505                        struct vio_net_desc *desc,
506                        u32 index)
507 {
508         int err;
509
510         err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
511                                   (index * dr->entry_size),
512                                   dr->cookies, dr->ncookies);
513         if (err < 0)
514                 return err;
515
516         return 0;
517 }
518
519 static int vnet_walk_rx_one(struct vnet_port *port,
520                             struct vio_dring_state *dr,
521                             u32 index, int *needs_ack)
522 {
523         struct vio_net_desc *desc = get_rx_desc(port, dr, index);
524         struct vio_driver_state *vio = &port->vio;
525         int err;
526
527         BUG_ON(desc == NULL);
528         if (IS_ERR(desc))
529                 return PTR_ERR(desc);
530
531         if (desc->hdr.state != VIO_DESC_READY)
532                 return 1;
533
534         dma_rmb();
535
536         viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
537                desc->hdr.state, desc->hdr.ack,
538                desc->size, desc->ncookies,
539                desc->cookies[0].cookie_addr,
540                desc->cookies[0].cookie_size);
541
542         err = vnet_rx_one(port, desc);
543         if (err == -ECONNRESET)
544                 return err;
545         trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
546                           index, desc->hdr.ack);
547         desc->hdr.state = VIO_DESC_DONE;
548         err = put_rx_desc(port, dr, desc, index);
549         if (err < 0)
550                 return err;
551         *needs_ack = desc->hdr.ack;
552         return 0;
553 }
554
555 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
556                         u32 start, u32 end, int *npkts, int budget)
557 {
558         struct vio_driver_state *vio = &port->vio;
559         int ack_start = -1, ack_end = -1;
560         bool send_ack = true;
561
562         end = (end == (u32) -1) ? vio_dring_prev(dr, start)
563                                 : vio_dring_next(dr, end);
564
565         viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
566
567         while (start != end) {
568                 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
569                 if (err == -ECONNRESET)
570                         return err;
571                 if (err != 0)
572                         break;
573                 (*npkts)++;
574                 if (ack_start == -1)
575                         ack_start = start;
576                 ack_end = start;
577                 start = vio_dring_next(dr, start);
578                 if (ack && start != end) {
579                         err = vnet_send_ack(port, dr, ack_start, ack_end,
580                                             VIO_DRING_ACTIVE);
581                         if (err == -ECONNRESET)
582                                 return err;
583                         ack_start = -1;
584                 }
585                 if ((*npkts) >= budget) {
586                         send_ack = false;
587                         break;
588                 }
589         }
590         if (unlikely(ack_start == -1))
591                 ack_start = ack_end = vio_dring_prev(dr, start);
592         if (send_ack) {
593                 port->napi_resume = false;
594                 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
595                                                port->vio._peer_sid,
596                                                ack_end, *npkts);
597                 return vnet_send_ack(port, dr, ack_start, ack_end,
598                                      VIO_DRING_STOPPED);
599         } else  {
600                 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
601                                                 port->vio._peer_sid,
602                                                 ack_end, *npkts);
603                 port->napi_resume = true;
604                 port->napi_stop_idx = ack_end;
605                 return 1;
606         }
607 }
608
609 static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
610                    int budget)
611 {
612         struct vio_dring_data *pkt = msgbuf;
613         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
614         struct vio_driver_state *vio = &port->vio;
615
616         viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
617                pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
618
619         if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
620                 return 0;
621         if (unlikely(pkt->seq != dr->rcv_nxt)) {
622                 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
623                        pkt->seq, dr->rcv_nxt);
624                 return 0;
625         }
626
627         if (!port->napi_resume)
628                 dr->rcv_nxt++;
629
630         /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
631
632         return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
633                             npkts, budget);
634 }
635
636 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
637 {
638         u32 idx = dr->cons;
639         int found = 0;
640
641         while (idx != dr->prod) {
642                 if (idx == end) {
643                         found = 1;
644                         break;
645                 }
646                 idx = vio_dring_next(dr, idx);
647         }
648         return found;
649 }
650
651 static int vnet_ack(struct vnet_port *port, void *msgbuf)
652 {
653         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
654         struct vio_dring_data *pkt = msgbuf;
655         struct net_device *dev;
656         struct vnet *vp;
657         u32 end;
658         struct vio_net_desc *desc;
659         struct netdev_queue *txq;
660
661         if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
662                 return 0;
663
664         end = pkt->end_idx;
665         vp = port->vp;
666         dev = vp->dev;
667         netif_tx_lock(dev);
668         if (unlikely(!idx_is_pending(dr, end))) {
669                 netif_tx_unlock(dev);
670                 return 0;
671         }
672
673         /* sync for race conditions with vnet_start_xmit() and tell xmit it
674          * is time to send a trigger.
675          */
676         trace_vnet_rx_stopped_ack(port->vio._local_sid,
677                                   port->vio._peer_sid, end);
678         dr->cons = vio_dring_next(dr, end);
679         desc = vio_dring_entry(dr, dr->cons);
680         if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
681                 /* vnet_start_xmit() just populated this dring but missed
682                  * sending the "start" LDC message to the consumer.
683                  * Send a "start" trigger on its behalf.
684                  */
685                 if (__vnet_tx_trigger(port, dr->cons) > 0)
686                         port->start_cons = false;
687                 else
688                         port->start_cons = true;
689         } else {
690                 port->start_cons = true;
691         }
692         netif_tx_unlock(dev);
693
694         txq = netdev_get_tx_queue(dev, port->q_index);
695         if (unlikely(netif_tx_queue_stopped(txq) &&
696                      vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
697                 return 1;
698
699         return 0;
700 }
701
702 static int vnet_nack(struct vnet_port *port, void *msgbuf)
703 {
704         /* XXX just reset or similar XXX */
705         return 0;
706 }
707
708 static int handle_mcast(struct vnet_port *port, void *msgbuf)
709 {
710         struct vio_net_mcast_info *pkt = msgbuf;
711
712         if (pkt->tag.stype != VIO_SUBTYPE_ACK)
713                 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
714                        port->vp->dev->name,
715                        pkt->tag.type,
716                        pkt->tag.stype,
717                        pkt->tag.stype_env,
718                        pkt->tag.sid);
719
720         return 0;
721 }
722
723 /* Got back a STOPPED LDC message on port. If the queue is stopped,
724  * wake it up so that we'll send out another START message at the
725  * next TX.
726  */
727 static void maybe_tx_wakeup(struct vnet_port *port)
728 {
729         struct netdev_queue *txq;
730
731         txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
732         __netif_tx_lock(txq, smp_processor_id());
733         if (likely(netif_tx_queue_stopped(txq))) {
734                 struct vio_dring_state *dr;
735
736                 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
737                 netif_tx_wake_queue(txq);
738         }
739         __netif_tx_unlock(txq);
740 }
741
742 static inline bool port_is_up(struct vnet_port *vnet)
743 {
744         struct vio_driver_state *vio = &vnet->vio;
745
746         return !!(vio->hs_state & VIO_HS_COMPLETE);
747 }
748
749 static int vnet_event_napi(struct vnet_port *port, int budget)
750 {
751         struct vio_driver_state *vio = &port->vio;
752         int tx_wakeup, err;
753         int npkts = 0;
754         int event = (port->rx_event & LDC_EVENT_RESET);
755
756 ldc_ctrl:
757         if (unlikely(event == LDC_EVENT_RESET ||
758                      event == LDC_EVENT_UP)) {
759                 vio_link_state_change(vio, event);
760
761                 if (event == LDC_EVENT_RESET) {
762                         vnet_port_reset(port);
763                         vio_port_up(vio);
764                 }
765                 port->rx_event = 0;
766                 return 0;
767         }
768         /* We may have multiple LDC events in rx_event. Unroll send_events() */
769         event = (port->rx_event & LDC_EVENT_UP);
770         port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
771         if (event == LDC_EVENT_UP)
772                 goto ldc_ctrl;
773         event = port->rx_event;
774         if (!(event & LDC_EVENT_DATA_READY))
775                 return 0;
776
777         /* we dont expect any other bits than RESET, UP, DATA_READY */
778         BUG_ON(event != LDC_EVENT_DATA_READY);
779
780         tx_wakeup = err = 0;
781         while (1) {
782                 union {
783                         struct vio_msg_tag tag;
784                         u64 raw[8];
785                 } msgbuf;
786
787                 if (port->napi_resume) {
788                         struct vio_dring_data *pkt =
789                                 (struct vio_dring_data *)&msgbuf;
790                         struct vio_dring_state *dr =
791                                 &port->vio.drings[VIO_DRIVER_RX_RING];
792
793                         pkt->tag.type = VIO_TYPE_DATA;
794                         pkt->tag.stype = VIO_SUBTYPE_INFO;
795                         pkt->tag.stype_env = VIO_DRING_DATA;
796                         pkt->seq = dr->rcv_nxt;
797                         pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
798                         pkt->end_idx = -1;
799                         goto napi_resume;
800                 }
801                 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
802                 if (unlikely(err < 0)) {
803                         if (err == -ECONNRESET)
804                                 vio_conn_reset(vio);
805                         break;
806                 }
807                 if (err == 0)
808                         break;
809                 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
810                        msgbuf.tag.type,
811                        msgbuf.tag.stype,
812                        msgbuf.tag.stype_env,
813                        msgbuf.tag.sid);
814                 err = vio_validate_sid(vio, &msgbuf.tag);
815                 if (err < 0)
816                         break;
817 napi_resume:
818                 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
819                         if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
820                                 if (!port_is_up(port)) {
821                                         /* failures like handshake_failure()
822                                          * may have cleaned up dring, but
823                                          * NAPI polling may bring us here.
824                                          */
825                                         err = -ECONNRESET;
826                                         break;
827                                 }
828                                 err = vnet_rx(port, &msgbuf, &npkts, budget);
829                                 if (npkts >= budget)
830                                         break;
831                                 if (npkts == 0)
832                                         break;
833                         } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
834                                 err = vnet_ack(port, &msgbuf);
835                                 if (err > 0)
836                                         tx_wakeup |= err;
837                         } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
838                                 err = vnet_nack(port, &msgbuf);
839                         }
840                 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
841                         if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
842                                 err = handle_mcast(port, &msgbuf);
843                         else
844                                 err = vio_control_pkt_engine(vio, &msgbuf);
845                         if (err)
846                                 break;
847                 } else {
848                         err = vnet_handle_unknown(port, &msgbuf);
849                 }
850                 if (err == -ECONNRESET)
851                         break;
852         }
853         if (unlikely(tx_wakeup && err != -ECONNRESET))
854                 maybe_tx_wakeup(port);
855         return npkts;
856 }
857
858 static int vnet_poll(struct napi_struct *napi, int budget)
859 {
860         struct vnet_port *port = container_of(napi, struct vnet_port, napi);
861         struct vio_driver_state *vio = &port->vio;
862         int processed = vnet_event_napi(port, budget);
863
864         if (processed < budget) {
865                 napi_complete(napi);
866                 port->rx_event &= ~LDC_EVENT_DATA_READY;
867                 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
868         }
869         return processed;
870 }
871
872 static void vnet_event(void *arg, int event)
873 {
874         struct vnet_port *port = arg;
875         struct vio_driver_state *vio = &port->vio;
876
877         port->rx_event |= event;
878         vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
879         napi_schedule(&port->napi);
880
881 }
882
883 static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
884 {
885         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
886         struct vio_dring_data hdr = {
887                 .tag = {
888                         .type           = VIO_TYPE_DATA,
889                         .stype          = VIO_SUBTYPE_INFO,
890                         .stype_env      = VIO_DRING_DATA,
891                         .sid            = vio_send_sid(&port->vio),
892                 },
893                 .dring_ident            = dr->ident,
894                 .start_idx              = start,
895                 .end_idx                = (u32) -1,
896         };
897         int err, delay;
898         int retries = 0;
899
900         if (port->stop_rx) {
901                 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
902                                                   port->vio._peer_sid,
903                                                   port->stop_rx_idx, -1);
904                 err = vnet_send_ack(port,
905                                     &port->vio.drings[VIO_DRIVER_RX_RING],
906                                     port->stop_rx_idx, -1,
907                                     VIO_DRING_STOPPED);
908                 if (err <= 0)
909                         return err;
910         }
911
912         hdr.seq = dr->snd_nxt;
913         delay = 1;
914         do {
915                 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
916                 if (err > 0) {
917                         dr->snd_nxt++;
918                         break;
919                 }
920                 udelay(delay);
921                 if ((delay <<= 1) > 128)
922                         delay = 128;
923                 if (retries++ > VNET_MAX_RETRIES)
924                         break;
925         } while (err == -EAGAIN);
926         trace_vnet_tx_trigger(port->vio._local_sid,
927                               port->vio._peer_sid, start, err);
928
929         return err;
930 }
931
932 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
933 {
934         unsigned int hash = vnet_hashfn(skb->data);
935         struct hlist_head *hp = &vp->port_hash[hash];
936         struct vnet_port *port;
937
938         hlist_for_each_entry_rcu(port, hp, hash) {
939                 if (!port_is_up(port))
940                         continue;
941                 if (ether_addr_equal(port->raddr, skb->data))
942                         return port;
943         }
944         list_for_each_entry_rcu(port, &vp->port_list, list) {
945                 if (!port->switch_port)
946                         continue;
947                 if (!port_is_up(port))
948                         continue;
949                 return port;
950         }
951         return NULL;
952 }
953
954 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
955                                           unsigned *pending)
956 {
957         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
958         struct sk_buff *skb = NULL;
959         int i, txi;
960
961         *pending = 0;
962
963         txi = dr->prod;
964         for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
965                 struct vio_net_desc *d;
966
967                 --txi;
968                 if (txi < 0)
969                         txi = VNET_TX_RING_SIZE-1;
970
971                 d = vio_dring_entry(dr, txi);
972
973                 if (d->hdr.state == VIO_DESC_READY) {
974                         (*pending)++;
975                         continue;
976                 }
977                 if (port->tx_bufs[txi].skb) {
978                         if (d->hdr.state != VIO_DESC_DONE)
979                                 pr_notice("invalid ring buffer state %d\n",
980                                           d->hdr.state);
981                         BUG_ON(port->tx_bufs[txi].skb->next);
982
983                         port->tx_bufs[txi].skb->next = skb;
984                         skb = port->tx_bufs[txi].skb;
985                         port->tx_bufs[txi].skb = NULL;
986
987                         ldc_unmap(port->vio.lp,
988                                   port->tx_bufs[txi].cookies,
989                                   port->tx_bufs[txi].ncookies);
990                 } else if (d->hdr.state == VIO_DESC_FREE)
991                         break;
992                 d->hdr.state = VIO_DESC_FREE;
993         }
994         return skb;
995 }
996
997 static inline void vnet_free_skbs(struct sk_buff *skb)
998 {
999         struct sk_buff *next;
1000
1001         while (skb) {
1002                 next = skb->next;
1003                 skb->next = NULL;
1004                 dev_kfree_skb(skb);
1005                 skb = next;
1006         }
1007 }
1008
1009 static void vnet_clean_timer_expire(unsigned long port0)
1010 {
1011         struct vnet_port *port = (struct vnet_port *)port0;
1012         struct sk_buff *freeskbs;
1013         unsigned pending;
1014
1015         netif_tx_lock(port->vp->dev);
1016         freeskbs = vnet_clean_tx_ring(port, &pending);
1017         netif_tx_unlock(port->vp->dev);
1018
1019         vnet_free_skbs(freeskbs);
1020
1021         if (pending)
1022                 (void)mod_timer(&port->clean_timer,
1023                                 jiffies + VNET_CLEAN_TIMEOUT);
1024          else
1025                 del_timer(&port->clean_timer);
1026 }
1027
1028 static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
1029                                struct ldc_trans_cookie *cookies, int ncookies,
1030                                unsigned int map_perm)
1031 {
1032         int i, nc, err, blen;
1033
1034         /* header */
1035         blen = skb_headlen(skb);
1036         if (blen < ETH_ZLEN)
1037                 blen = ETH_ZLEN;
1038         blen += VNET_PACKET_SKIP;
1039         blen += 8 - (blen & 7);
1040
1041         err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
1042                              ncookies, map_perm);
1043         if (err < 0)
1044                 return err;
1045         nc = err;
1046
1047         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1048                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1049                 u8 *vaddr;
1050
1051                 if (nc < ncookies) {
1052                         vaddr = kmap_atomic(skb_frag_page(f));
1053                         blen = skb_frag_size(f);
1054                         blen += 8 - (blen & 7);
1055                         err = ldc_map_single(lp, vaddr + f->page_offset,
1056                                              blen, cookies + nc, ncookies - nc,
1057                                              map_perm);
1058                         kunmap_atomic(vaddr);
1059                 } else {
1060                         err = -EMSGSIZE;
1061                 }
1062
1063                 if (err < 0) {
1064                         ldc_unmap(lp, cookies, nc);
1065                         return err;
1066                 }
1067                 nc += err;
1068         }
1069         return nc;
1070 }
1071
1072 static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1073 {
1074         struct sk_buff *nskb;
1075         int i, len, pad, docopy;
1076
1077         len = skb->len;
1078         pad = 0;
1079         if (len < ETH_ZLEN) {
1080                 pad += ETH_ZLEN - skb->len;
1081                 len += pad;
1082         }
1083         len += VNET_PACKET_SKIP;
1084         pad += 8 - (len & 7);
1085
1086         /* make sure we have enough cookies and alignment in every frag */
1087         docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1088         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1089                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1090
1091                 docopy |= f->page_offset & 7;
1092         }
1093         if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1094             skb_tailroom(skb) < pad ||
1095             skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1096                 int start = 0, offset;
1097                 __wsum csum;
1098
1099                 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1100                 nskb = alloc_and_align_skb(skb->dev, len);
1101                 if (nskb == NULL) {
1102                         dev_kfree_skb(skb);
1103                         return NULL;
1104                 }
1105                 skb_reserve(nskb, VNET_PACKET_SKIP);
1106
1107                 nskb->protocol = skb->protocol;
1108                 offset = skb_mac_header(skb) - skb->data;
1109                 skb_set_mac_header(nskb, offset);
1110                 offset = skb_network_header(skb) - skb->data;
1111                 skb_set_network_header(nskb, offset);
1112                 offset = skb_transport_header(skb) - skb->data;
1113                 skb_set_transport_header(nskb, offset);
1114
1115                 offset = 0;
1116                 nskb->csum_offset = skb->csum_offset;
1117                 nskb->ip_summed = skb->ip_summed;
1118
1119                 if (skb->ip_summed == CHECKSUM_PARTIAL)
1120                         start = skb_checksum_start_offset(skb);
1121                 if (start) {
1122                         struct iphdr *iph = ip_hdr(nskb);
1123                         int offset = start + nskb->csum_offset;
1124
1125                         if (skb_copy_bits(skb, 0, nskb->data, start)) {
1126                                 dev_kfree_skb(nskb);
1127                                 dev_kfree_skb(skb);
1128                                 return NULL;
1129                         }
1130                         *(__sum16 *)(skb->data + offset) = 0;
1131                         csum = skb_copy_and_csum_bits(skb, start,
1132                                                       nskb->data + start,
1133                                                       skb->len - start, 0);
1134                         if (iph->protocol == IPPROTO_TCP ||
1135                             iph->protocol == IPPROTO_UDP) {
1136                                 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1137                                                          skb->len - start,
1138                                                          iph->protocol, csum);
1139                         }
1140                         *(__sum16 *)(nskb->data + offset) = csum;
1141
1142                         nskb->ip_summed = CHECKSUM_NONE;
1143                 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1144                         dev_kfree_skb(nskb);
1145                         dev_kfree_skb(skb);
1146                         return NULL;
1147                 }
1148                 (void)skb_put(nskb, skb->len);
1149                 if (skb_is_gso(skb)) {
1150                         skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1151                         skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1152                 }
1153                 nskb->queue_mapping = skb->queue_mapping;
1154                 dev_kfree_skb(skb);
1155                 skb = nskb;
1156         }
1157         return skb;
1158 }
1159
1160 static u16
1161 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
1162                   void *accel_priv, select_queue_fallback_t fallback)
1163 {
1164         struct vnet *vp = netdev_priv(dev);
1165         struct vnet_port *port = __tx_port_find(vp, skb);
1166
1167         if (port == NULL)
1168                 return 0;
1169         return port->q_index;
1170 }
1171
1172 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev);
1173
1174 static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
1175 {
1176         struct net_device *dev = port->vp->dev;
1177         struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1178         struct sk_buff *segs;
1179         int maclen, datalen;
1180         int status;
1181         int gso_size, gso_type, gso_segs;
1182         int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1183         int proto = IPPROTO_IP;
1184
1185         if (skb->protocol == htons(ETH_P_IP))
1186                 proto = ip_hdr(skb)->protocol;
1187         else if (skb->protocol == htons(ETH_P_IPV6))
1188                 proto = ipv6_hdr(skb)->nexthdr;
1189
1190         if (proto == IPPROTO_TCP)
1191                 hlen += tcp_hdr(skb)->doff * 4;
1192         else if (proto == IPPROTO_UDP)
1193                 hlen += sizeof(struct udphdr);
1194         else {
1195                 pr_err("vnet_handle_offloads GSO with unknown transport "
1196                        "protocol %d tproto %d\n", skb->protocol, proto);
1197                 hlen = 128; /* XXX */
1198         }
1199         datalen = port->tsolen - hlen;
1200
1201         gso_size = skb_shinfo(skb)->gso_size;
1202         gso_type = skb_shinfo(skb)->gso_type;
1203         gso_segs = skb_shinfo(skb)->gso_segs;
1204
1205         if (port->tso && gso_size < datalen)
1206                 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1207
1208         if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1209                 struct netdev_queue *txq;
1210
1211                 txq  = netdev_get_tx_queue(dev, port->q_index);
1212                 netif_tx_stop_queue(txq);
1213                 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1214                         return NETDEV_TX_BUSY;
1215                 netif_tx_wake_queue(txq);
1216         }
1217
1218         maclen = skb_network_header(skb) - skb_mac_header(skb);
1219         skb_pull(skb, maclen);
1220
1221         if (port->tso && gso_size < datalen) {
1222                 if (skb_unclone(skb, GFP_ATOMIC))
1223                         goto out_dropped;
1224
1225                 /* segment to TSO size */
1226                 skb_shinfo(skb)->gso_size = datalen;
1227                 skb_shinfo(skb)->gso_segs = gso_segs;
1228         }
1229         segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1230         if (IS_ERR(segs))
1231                 goto out_dropped;
1232
1233         skb_push(skb, maclen);
1234         skb_reset_mac_header(skb);
1235
1236         status = 0;
1237         while (segs) {
1238                 struct sk_buff *curr = segs;
1239
1240                 segs = segs->next;
1241                 curr->next = NULL;
1242                 if (port->tso && curr->len > dev->mtu) {
1243                         skb_shinfo(curr)->gso_size = gso_size;
1244                         skb_shinfo(curr)->gso_type = gso_type;
1245                         skb_shinfo(curr)->gso_segs =
1246                                 DIV_ROUND_UP(curr->len - hlen, gso_size);
1247                 } else
1248                         skb_shinfo(curr)->gso_size = 0;
1249
1250                 skb_push(curr, maclen);
1251                 skb_reset_mac_header(curr);
1252                 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1253                        maclen);
1254                 curr->csum_start = skb_transport_header(curr) - curr->head;
1255                 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1256                         curr->csum_offset = offsetof(struct tcphdr, check);
1257                 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1258                         curr->csum_offset = offsetof(struct udphdr, check);
1259
1260                 if (!(status & NETDEV_TX_MASK))
1261                         status = vnet_start_xmit(curr, dev);
1262                 if (status & NETDEV_TX_MASK)
1263                         dev_kfree_skb_any(curr);
1264         }
1265
1266         if (!(status & NETDEV_TX_MASK))
1267                 dev_kfree_skb_any(skb);
1268         return status;
1269 out_dropped:
1270         dev->stats.tx_dropped++;
1271         dev_kfree_skb_any(skb);
1272         return NETDEV_TX_OK;
1273 }
1274
1275 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1276 {
1277         struct vnet *vp = netdev_priv(dev);
1278         struct vnet_port *port = NULL;
1279         struct vio_dring_state *dr;
1280         struct vio_net_desc *d;
1281         unsigned int len;
1282         struct sk_buff *freeskbs = NULL;
1283         int i, err, txi;
1284         unsigned pending = 0;
1285         struct netdev_queue *txq;
1286
1287         rcu_read_lock();
1288         port = __tx_port_find(vp, skb);
1289         if (unlikely(!port)) {
1290                 rcu_read_unlock();
1291                 goto out_dropped;
1292         }
1293
1294         if (skb_is_gso(skb) && skb->len > port->tsolen) {
1295                 err = vnet_handle_offloads(port, skb);
1296                 rcu_read_unlock();
1297                 return err;
1298         }
1299
1300         if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1301                 unsigned long localmtu = port->rmtu - ETH_HLEN;
1302
1303                 if (vio_version_after_eq(&port->vio, 1, 3))
1304                         localmtu -= VLAN_HLEN;
1305
1306                 if (skb->protocol == htons(ETH_P_IP)) {
1307                         struct flowi4 fl4;
1308                         struct rtable *rt = NULL;
1309
1310                         memset(&fl4, 0, sizeof(fl4));
1311                         fl4.flowi4_oif = dev->ifindex;
1312                         fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1313                         fl4.daddr = ip_hdr(skb)->daddr;
1314                         fl4.saddr = ip_hdr(skb)->saddr;
1315
1316                         rt = ip_route_output_key(dev_net(dev), &fl4);
1317                         rcu_read_unlock();
1318                         if (!IS_ERR(rt)) {
1319                                 skb_dst_set(skb, &rt->dst);
1320                                 icmp_send(skb, ICMP_DEST_UNREACH,
1321                                           ICMP_FRAG_NEEDED,
1322                                           htonl(localmtu));
1323                         }
1324                 }
1325 #if IS_ENABLED(CONFIG_IPV6)
1326                 else if (skb->protocol == htons(ETH_P_IPV6))
1327                         icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1328 #endif
1329                 goto out_dropped;
1330         }
1331
1332         skb = vnet_skb_shape(skb, 2);
1333
1334         if (unlikely(!skb))
1335                 goto out_dropped;
1336
1337         if (skb->ip_summed == CHECKSUM_PARTIAL)
1338                 vnet_fullcsum(skb);
1339
1340         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1341         i = skb_get_queue_mapping(skb);
1342         txq = netdev_get_tx_queue(dev, i);
1343         if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1344                 if (!netif_tx_queue_stopped(txq)) {
1345                         netif_tx_stop_queue(txq);
1346
1347                         /* This is a hard error, log it. */
1348                         netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1349                         dev->stats.tx_errors++;
1350                 }
1351                 rcu_read_unlock();
1352                 return NETDEV_TX_BUSY;
1353         }
1354
1355         d = vio_dring_cur(dr);
1356
1357         txi = dr->prod;
1358
1359         freeskbs = vnet_clean_tx_ring(port, &pending);
1360
1361         BUG_ON(port->tx_bufs[txi].skb);
1362
1363         len = skb->len;
1364         if (len < ETH_ZLEN)
1365                 len = ETH_ZLEN;
1366
1367         err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1368                            (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1369         if (err < 0) {
1370                 netdev_info(dev, "tx buffer map error %d\n", err);
1371                 goto out_dropped;
1372         }
1373
1374         port->tx_bufs[txi].skb = skb;
1375         skb = NULL;
1376         port->tx_bufs[txi].ncookies = err;
1377
1378         /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1379          * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1380          * the protocol itself does not require it as long as the peer
1381          * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1382          *
1383          * An ACK for every packet in the ring is expensive as the
1384          * sending of LDC messages is slow and affects performance.
1385          */
1386         d->hdr.ack = VIO_ACK_DISABLE;
1387         d->size = len;
1388         d->ncookies = port->tx_bufs[txi].ncookies;
1389         for (i = 0; i < d->ncookies; i++)
1390                 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1391         if (vio_version_after_eq(&port->vio, 1, 7)) {
1392                 struct vio_net_dext *dext = vio_net_ext(d);
1393
1394                 memset(dext, 0, sizeof(*dext));
1395                 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1396                         dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1397                                              ->gso_size;
1398                         dext->flags |= VNET_PKT_IPV4_LSO;
1399                 }
1400                 if (vio_version_after_eq(&port->vio, 1, 8) &&
1401                     !port->switch_port) {
1402                         dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1403                         dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1404                 }
1405         }
1406
1407         /* This has to be a non-SMP write barrier because we are writing
1408          * to memory which is shared with the peer LDOM.
1409          */
1410         dma_wmb();
1411
1412         d->hdr.state = VIO_DESC_READY;
1413
1414         /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1415          * to notify the consumer that some descriptors are READY.
1416          * After that "start" trigger, no additional triggers are needed until
1417          * a DRING_STOPPED is received from the consumer. The dr->cons field
1418          * (set up by vnet_ack()) has the value of the next dring index
1419          * that has not yet been ack-ed. We send a "start" trigger here
1420          * if, and only if, start_cons is true (reset it afterward). Conversely,
1421          * vnet_ack() should check if the dring corresponding to cons
1422          * is marked READY, but start_cons was false.
1423          * If so, vnet_ack() should send out the missed "start" trigger.
1424          *
1425          * Note that the dma_wmb() above makes sure the cookies et al. are
1426          * not globally visible before the VIO_DESC_READY, and that the
1427          * stores are ordered correctly by the compiler. The consumer will
1428          * not proceed until the VIO_DESC_READY is visible assuring that
1429          * the consumer does not observe anything related to descriptors
1430          * out of order. The HV trap from the LDC start trigger is the
1431          * producer to consumer announcement that work is available to the
1432          * consumer
1433          */
1434         if (!port->start_cons) { /* previous trigger suffices */
1435                 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1436                                            port->vio._peer_sid, dr->cons);
1437                 goto ldc_start_done;
1438         }
1439
1440         err = __vnet_tx_trigger(port, dr->cons);
1441         if (unlikely(err < 0)) {
1442                 netdev_info(dev, "TX trigger error %d\n", err);
1443                 d->hdr.state = VIO_DESC_FREE;
1444                 skb = port->tx_bufs[txi].skb;
1445                 port->tx_bufs[txi].skb = NULL;
1446                 dev->stats.tx_carrier_errors++;
1447                 goto out_dropped;
1448         }
1449
1450 ldc_start_done:
1451         port->start_cons = false;
1452
1453         dev->stats.tx_packets++;
1454         dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1455
1456         dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1457         if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1458                 netif_tx_stop_queue(txq);
1459                 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1460                         netif_tx_wake_queue(txq);
1461         }
1462
1463         (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1464         rcu_read_unlock();
1465
1466         vnet_free_skbs(freeskbs);
1467
1468         return NETDEV_TX_OK;
1469
1470 out_dropped:
1471         if (pending)
1472                 (void)mod_timer(&port->clean_timer,
1473                                 jiffies + VNET_CLEAN_TIMEOUT);
1474         else if (port)
1475                 del_timer(&port->clean_timer);
1476         if (port)
1477                 rcu_read_unlock();
1478         if (skb)
1479                 dev_kfree_skb(skb);
1480         vnet_free_skbs(freeskbs);
1481         dev->stats.tx_dropped++;
1482         return NETDEV_TX_OK;
1483 }
1484
1485 static void vnet_tx_timeout(struct net_device *dev)
1486 {
1487         /* XXX Implement me XXX */
1488 }
1489
1490 static int vnet_open(struct net_device *dev)
1491 {
1492         netif_carrier_on(dev);
1493         netif_tx_start_all_queues(dev);
1494
1495         return 0;
1496 }
1497
1498 static int vnet_close(struct net_device *dev)
1499 {
1500         netif_tx_stop_all_queues(dev);
1501         netif_carrier_off(dev);
1502
1503         return 0;
1504 }
1505
1506 static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1507 {
1508         struct vnet_mcast_entry *m;
1509
1510         for (m = vp->mcast_list; m; m = m->next) {
1511                 if (ether_addr_equal(m->addr, addr))
1512                         return m;
1513         }
1514         return NULL;
1515 }
1516
1517 static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1518 {
1519         struct netdev_hw_addr *ha;
1520
1521         netdev_for_each_mc_addr(ha, dev) {
1522                 struct vnet_mcast_entry *m;
1523
1524                 m = __vnet_mc_find(vp, ha->addr);
1525                 if (m) {
1526                         m->hit = 1;
1527                         continue;
1528                 }
1529
1530                 if (!m) {
1531                         m = kzalloc(sizeof(*m), GFP_ATOMIC);
1532                         if (!m)
1533                                 continue;
1534                         memcpy(m->addr, ha->addr, ETH_ALEN);
1535                         m->hit = 1;
1536
1537                         m->next = vp->mcast_list;
1538                         vp->mcast_list = m;
1539                 }
1540         }
1541 }
1542
1543 static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1544 {
1545         struct vio_net_mcast_info info;
1546         struct vnet_mcast_entry *m, **pp;
1547         int n_addrs;
1548
1549         memset(&info, 0, sizeof(info));
1550
1551         info.tag.type = VIO_TYPE_CTRL;
1552         info.tag.stype = VIO_SUBTYPE_INFO;
1553         info.tag.stype_env = VNET_MCAST_INFO;
1554         info.tag.sid = vio_send_sid(&port->vio);
1555         info.set = 1;
1556
1557         n_addrs = 0;
1558         for (m = vp->mcast_list; m; m = m->next) {
1559                 if (m->sent)
1560                         continue;
1561                 m->sent = 1;
1562                 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1563                        m->addr, ETH_ALEN);
1564                 if (++n_addrs == VNET_NUM_MCAST) {
1565                         info.count = n_addrs;
1566
1567                         (void) vio_ldc_send(&port->vio, &info,
1568                                             sizeof(info));
1569                         n_addrs = 0;
1570                 }
1571         }
1572         if (n_addrs) {
1573                 info.count = n_addrs;
1574                 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1575         }
1576
1577         info.set = 0;
1578
1579         n_addrs = 0;
1580         pp = &vp->mcast_list;
1581         while ((m = *pp) != NULL) {
1582                 if (m->hit) {
1583                         m->hit = 0;
1584                         pp = &m->next;
1585                         continue;
1586                 }
1587
1588                 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1589                        m->addr, ETH_ALEN);
1590                 if (++n_addrs == VNET_NUM_MCAST) {
1591                         info.count = n_addrs;
1592                         (void) vio_ldc_send(&port->vio, &info,
1593                                             sizeof(info));
1594                         n_addrs = 0;
1595                 }
1596
1597                 *pp = m->next;
1598                 kfree(m);
1599         }
1600         if (n_addrs) {
1601                 info.count = n_addrs;
1602                 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1603         }
1604 }
1605
1606 static void vnet_set_rx_mode(struct net_device *dev)
1607 {
1608         struct vnet *vp = netdev_priv(dev);
1609         struct vnet_port *port;
1610
1611         rcu_read_lock();
1612         list_for_each_entry_rcu(port, &vp->port_list, list) {
1613
1614                 if (port->switch_port) {
1615                         __update_mc_list(vp, dev);
1616                         __send_mc_list(vp, port);
1617                         break;
1618                 }
1619         }
1620         rcu_read_unlock();
1621 }
1622
1623 static int vnet_change_mtu(struct net_device *dev, int new_mtu)
1624 {
1625         if (new_mtu < 68 || new_mtu > 65535)
1626                 return -EINVAL;
1627
1628         dev->mtu = new_mtu;
1629         return 0;
1630 }
1631
1632 static int vnet_set_mac_addr(struct net_device *dev, void *p)
1633 {
1634         return -EINVAL;
1635 }
1636
1637 static void vnet_get_drvinfo(struct net_device *dev,
1638                              struct ethtool_drvinfo *info)
1639 {
1640         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1641         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1642 }
1643
1644 static u32 vnet_get_msglevel(struct net_device *dev)
1645 {
1646         struct vnet *vp = netdev_priv(dev);
1647         return vp->msg_enable;
1648 }
1649
1650 static void vnet_set_msglevel(struct net_device *dev, u32 value)
1651 {
1652         struct vnet *vp = netdev_priv(dev);
1653         vp->msg_enable = value;
1654 }
1655
1656 static const struct ethtool_ops vnet_ethtool_ops = {
1657         .get_drvinfo            = vnet_get_drvinfo,
1658         .get_msglevel           = vnet_get_msglevel,
1659         .set_msglevel           = vnet_set_msglevel,
1660         .get_link               = ethtool_op_get_link,
1661 };
1662
1663 static void vnet_port_free_tx_bufs(struct vnet_port *port)
1664 {
1665         struct vio_dring_state *dr;
1666         int i;
1667
1668         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1669
1670         if (dr->base == NULL)
1671                 return;
1672
1673         for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1674                 struct vio_net_desc *d;
1675                 void *skb = port->tx_bufs[i].skb;
1676
1677                 if (!skb)
1678                         continue;
1679
1680                 d = vio_dring_entry(dr, i);
1681
1682                 ldc_unmap(port->vio.lp,
1683                           port->tx_bufs[i].cookies,
1684                           port->tx_bufs[i].ncookies);
1685                 dev_kfree_skb(skb);
1686                 port->tx_bufs[i].skb = NULL;
1687                 d->hdr.state = VIO_DESC_FREE;
1688         }
1689         ldc_free_exp_dring(port->vio.lp, dr->base,
1690                            (dr->entry_size * dr->num_entries),
1691                            dr->cookies, dr->ncookies);
1692         dr->base = NULL;
1693         dr->entry_size = 0;
1694         dr->num_entries = 0;
1695         dr->pending = 0;
1696         dr->ncookies = 0;
1697 }
1698
1699 static void vnet_port_reset(struct vnet_port *port)
1700 {
1701         del_timer(&port->clean_timer);
1702         vnet_port_free_tx_bufs(port);
1703         port->rmtu = 0;
1704         port->tso = true;
1705         port->tsolen = 0;
1706 }
1707
1708 static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1709 {
1710         struct vio_dring_state *dr;
1711         unsigned long len, elen;
1712         int i, err, ncookies;
1713         void *dring;
1714
1715         dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1716
1717         elen = sizeof(struct vio_net_desc) +
1718                sizeof(struct ldc_trans_cookie) * 2;
1719         if (vio_version_after_eq(&port->vio, 1, 7))
1720                 elen += sizeof(struct vio_net_dext);
1721         len = VNET_TX_RING_SIZE * elen;
1722
1723         ncookies = VIO_MAX_RING_COOKIES;
1724         dring = ldc_alloc_exp_dring(port->vio.lp, len,
1725                                     dr->cookies, &ncookies,
1726                                     (LDC_MAP_SHADOW |
1727                                      LDC_MAP_DIRECT |
1728                                      LDC_MAP_RW));
1729         if (IS_ERR(dring)) {
1730                 err = PTR_ERR(dring);
1731                 goto err_out;
1732         }
1733
1734         dr->base = dring;
1735         dr->entry_size = elen;
1736         dr->num_entries = VNET_TX_RING_SIZE;
1737         dr->prod = dr->cons = 0;
1738         port->start_cons  = true; /* need an initial trigger */
1739         dr->pending = VNET_TX_RING_SIZE;
1740         dr->ncookies = ncookies;
1741
1742         for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1743                 struct vio_net_desc *d;
1744
1745                 d = vio_dring_entry(dr, i);
1746                 d->hdr.state = VIO_DESC_FREE;
1747         }
1748         return 0;
1749
1750 err_out:
1751         vnet_port_free_tx_bufs(port);
1752
1753         return err;
1754 }
1755
1756 #ifdef CONFIG_NET_POLL_CONTROLLER
1757 static void vnet_poll_controller(struct net_device *dev)
1758 {
1759         struct vnet *vp = netdev_priv(dev);
1760         struct vnet_port *port;
1761         unsigned long flags;
1762
1763         spin_lock_irqsave(&vp->lock, flags);
1764         if (!list_empty(&vp->port_list)) {
1765                 port = list_entry(vp->port_list.next, struct vnet_port, list);
1766                 napi_schedule(&port->napi);
1767         }
1768         spin_unlock_irqrestore(&vp->lock, flags);
1769 }
1770 #endif
1771 static LIST_HEAD(vnet_list);
1772 static DEFINE_MUTEX(vnet_list_mutex);
1773
1774 static const struct net_device_ops vnet_ops = {
1775         .ndo_open               = vnet_open,
1776         .ndo_stop               = vnet_close,
1777         .ndo_set_rx_mode        = vnet_set_rx_mode,
1778         .ndo_set_mac_address    = vnet_set_mac_addr,
1779         .ndo_validate_addr      = eth_validate_addr,
1780         .ndo_tx_timeout         = vnet_tx_timeout,
1781         .ndo_change_mtu         = vnet_change_mtu,
1782         .ndo_start_xmit         = vnet_start_xmit,
1783         .ndo_select_queue       = vnet_select_queue,
1784 #ifdef CONFIG_NET_POLL_CONTROLLER
1785         .ndo_poll_controller    = vnet_poll_controller,
1786 #endif
1787 };
1788
1789 static struct vnet *vnet_new(const u64 *local_mac,
1790                              struct vio_dev *vdev)
1791 {
1792         struct net_device *dev;
1793         struct vnet *vp;
1794         int err, i;
1795
1796         dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
1797         if (!dev)
1798                 return ERR_PTR(-ENOMEM);
1799         dev->needed_headroom = VNET_PACKET_SKIP + 8;
1800         dev->needed_tailroom = 8;
1801
1802         for (i = 0; i < ETH_ALEN; i++)
1803                 dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
1804
1805         vp = netdev_priv(dev);
1806
1807         spin_lock_init(&vp->lock);
1808         vp->dev = dev;
1809
1810         INIT_LIST_HEAD(&vp->port_list);
1811         for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
1812                 INIT_HLIST_HEAD(&vp->port_hash[i]);
1813         INIT_LIST_HEAD(&vp->list);
1814         vp->local_mac = *local_mac;
1815
1816         dev->netdev_ops = &vnet_ops;
1817         dev->ethtool_ops = &vnet_ethtool_ops;
1818         dev->watchdog_timeo = VNET_TX_TIMEOUT;
1819
1820         dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
1821                            NETIF_F_HW_CSUM | NETIF_F_SG;
1822         dev->features = dev->hw_features;
1823
1824         SET_NETDEV_DEV(dev, &vdev->dev);
1825
1826         err = register_netdev(dev);
1827         if (err) {
1828                 pr_err("Cannot register net device, aborting\n");
1829                 goto err_out_free_dev;
1830         }
1831
1832         netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
1833
1834         list_add(&vp->list, &vnet_list);
1835
1836         return vp;
1837
1838 err_out_free_dev:
1839         free_netdev(dev);
1840
1841         return ERR_PTR(err);
1842 }
1843
1844 static struct vnet *vnet_find_or_create(const u64 *local_mac,
1845                                         struct vio_dev *vdev)
1846 {
1847         struct vnet *iter, *vp;
1848
1849         mutex_lock(&vnet_list_mutex);
1850         vp = NULL;
1851         list_for_each_entry(iter, &vnet_list, list) {
1852                 if (iter->local_mac == *local_mac) {
1853                         vp = iter;
1854                         break;
1855                 }
1856         }
1857         if (!vp)
1858                 vp = vnet_new(local_mac, vdev);
1859         mutex_unlock(&vnet_list_mutex);
1860
1861         return vp;
1862 }
1863
1864 static void vnet_cleanup(void)
1865 {
1866         struct vnet *vp;
1867         struct net_device *dev;
1868
1869         mutex_lock(&vnet_list_mutex);
1870         while (!list_empty(&vnet_list)) {
1871                 vp = list_first_entry(&vnet_list, struct vnet, list);
1872                 list_del(&vp->list);
1873                 dev = vp->dev;
1874                 /* vio_unregister_driver() should have cleaned up port_list */
1875                 BUG_ON(!list_empty(&vp->port_list));
1876                 unregister_netdev(dev);
1877                 free_netdev(dev);
1878         }
1879         mutex_unlock(&vnet_list_mutex);
1880 }
1881
1882 static const char *local_mac_prop = "local-mac-address";
1883
1884 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1885                                      u64 port_node,
1886                                      struct vio_dev *vdev)
1887 {
1888         const u64 *local_mac = NULL;
1889         u64 a;
1890
1891         mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
1892                 u64 target = mdesc_arc_target(hp, a);
1893                 const char *name;
1894
1895                 name = mdesc_get_property(hp, target, "name", NULL);
1896                 if (!name || strcmp(name, "network"))
1897                         continue;
1898
1899                 local_mac = mdesc_get_property(hp, target,
1900                                                local_mac_prop, NULL);
1901                 if (local_mac)
1902                         break;
1903         }
1904         if (!local_mac)
1905                 return ERR_PTR(-ENODEV);
1906
1907         return vnet_find_or_create(local_mac, vdev);
1908 }
1909
1910 static struct ldc_channel_config vnet_ldc_cfg = {
1911         .event          = vnet_event,
1912         .mtu            = 64,
1913         .mode           = LDC_MODE_UNRELIABLE,
1914 };
1915
1916 static struct vio_driver_ops vnet_vio_ops = {
1917         .send_attr              = vnet_send_attr,
1918         .handle_attr            = vnet_handle_attr,
1919         .handshake_complete     = vnet_handshake_complete,
1920 };
1921
1922 static void print_version(void)
1923 {
1924         printk_once(KERN_INFO "%s", version);
1925 }
1926
1927 const char *remote_macaddr_prop = "remote-mac-address";
1928
1929 static void
1930 vnet_port_add_txq(struct vnet_port *port)
1931 {
1932         struct vnet *vp = port->vp;
1933         int n;
1934
1935         n = vp->nports++;
1936         n = n & (VNET_MAX_TXQS - 1);
1937         port->q_index = n;
1938         netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
1939 }
1940
1941 static void
1942 vnet_port_rm_txq(struct vnet_port *port)
1943 {
1944         port->vp->nports--;
1945         netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
1946 }
1947
1948 static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1949 {
1950         struct mdesc_handle *hp;
1951         struct vnet_port *port;
1952         unsigned long flags;
1953         struct vnet *vp;
1954         const u64 *rmac;
1955         int len, i, err, switch_port;
1956
1957         print_version();
1958
1959         hp = mdesc_grab();
1960
1961         vp = vnet_find_parent(hp, vdev->mp, vdev);
1962         if (IS_ERR(vp)) {
1963                 pr_err("Cannot find port parent vnet\n");
1964                 err = PTR_ERR(vp);
1965                 goto err_out_put_mdesc;
1966         }
1967
1968         rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
1969         err = -ENODEV;
1970         if (!rmac) {
1971                 pr_err("Port lacks %s property\n", remote_macaddr_prop);
1972                 goto err_out_put_mdesc;
1973         }
1974
1975         port = kzalloc(sizeof(*port), GFP_KERNEL);
1976         err = -ENOMEM;
1977         if (!port)
1978                 goto err_out_put_mdesc;
1979
1980         for (i = 0; i < ETH_ALEN; i++)
1981                 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
1982
1983         port->vp = vp;
1984
1985         err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
1986                               vnet_versions, ARRAY_SIZE(vnet_versions),
1987                               &vnet_vio_ops, vp->dev->name);
1988         if (err)
1989                 goto err_out_free_port;
1990
1991         err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
1992         if (err)
1993                 goto err_out_free_port;
1994
1995         netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
1996
1997         INIT_HLIST_NODE(&port->hash);
1998         INIT_LIST_HEAD(&port->list);
1999
2000         switch_port = 0;
2001         if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
2002                 switch_port = 1;
2003         port->switch_port = switch_port;
2004         port->tso = true;
2005         port->tsolen = 0;
2006
2007         spin_lock_irqsave(&vp->lock, flags);
2008         if (switch_port)
2009                 list_add_rcu(&port->list, &vp->port_list);
2010         else
2011                 list_add_tail_rcu(&port->list, &vp->port_list);
2012         hlist_add_head_rcu(&port->hash,
2013                            &vp->port_hash[vnet_hashfn(port->raddr)]);
2014         vnet_port_add_txq(port);
2015         spin_unlock_irqrestore(&vp->lock, flags);
2016
2017         dev_set_drvdata(&vdev->dev, port);
2018
2019         pr_info("%s: PORT ( remote-mac %pM%s )\n",
2020                 vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
2021
2022         setup_timer(&port->clean_timer, vnet_clean_timer_expire,
2023                     (unsigned long)port);
2024
2025         napi_enable(&port->napi);
2026         vio_port_up(&port->vio);
2027
2028         mdesc_release(hp);
2029
2030         return 0;
2031
2032 err_out_free_port:
2033         kfree(port);
2034
2035 err_out_put_mdesc:
2036         mdesc_release(hp);
2037         return err;
2038 }
2039
2040 static int vnet_port_remove(struct vio_dev *vdev)
2041 {
2042         struct vnet_port *port = dev_get_drvdata(&vdev->dev);
2043
2044         if (port) {
2045
2046                 del_timer_sync(&port->vio.timer);
2047
2048                 napi_disable(&port->napi);
2049
2050                 list_del_rcu(&port->list);
2051                 hlist_del_rcu(&port->hash);
2052
2053                 synchronize_rcu();
2054                 del_timer_sync(&port->clean_timer);
2055                 vnet_port_rm_txq(port);
2056                 netif_napi_del(&port->napi);
2057                 vnet_port_free_tx_bufs(port);
2058                 vio_ldc_free(&port->vio);
2059
2060                 dev_set_drvdata(&vdev->dev, NULL);
2061
2062                 kfree(port);
2063
2064         }
2065         return 0;
2066 }
2067
2068 static const struct vio_device_id vnet_port_match[] = {
2069         {
2070                 .type = "vnet-port",
2071         },
2072         {},
2073 };
2074 MODULE_DEVICE_TABLE(vio, vnet_port_match);
2075
2076 static struct vio_driver vnet_port_driver = {
2077         .id_table       = vnet_port_match,
2078         .probe          = vnet_port_probe,
2079         .remove         = vnet_port_remove,
2080         .name           = "vnet_port",
2081 };
2082
2083 static int __init vnet_init(void)
2084 {
2085         return vio_register_driver(&vnet_port_driver);
2086 }
2087
2088 static void __exit vnet_exit(void)
2089 {
2090         vio_unregister_driver(&vnet_port_driver);
2091         vnet_cleanup();
2092 }
2093
2094 module_init(vnet_init);
2095 module_exit(vnet_exit);