1 /* sunvnet.c: Sun LDOM Virtual Network Driver.
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/module.h>
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/etherdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/highmem.h>
19 #include <linux/if_vlan.h>
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/sunvnet.h>
23 #if IS_ENABLED(CONFIG_IPV6)
24 #include <linux/icmpv6.h>
29 #include <net/route.h>
36 #define DRV_MODULE_NAME "sunvnet"
37 #define DRV_MODULE_VERSION "1.0"
38 #define DRV_MODULE_RELDATE "June 25, 2007"
40 static char version[] =
41 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
42 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
43 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_MODULE_VERSION);
47 #define VNET_MAX_TXQS 16
49 /* Heuristic for the number of times to exponentially backoff and
50 * retry sending an LDC trigger when EAGAIN is encountered
52 #define VNET_MAX_RETRIES 10
54 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
55 static void vnet_port_reset(struct vnet_port *port);
57 /* Ordered from largest major to lowest */
58 static struct vio_version vnet_versions[] = {
59 { .major = 1, .minor = 8 },
60 { .major = 1, .minor = 7 },
61 { .major = 1, .minor = 6 },
62 { .major = 1, .minor = 0 },
65 static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
67 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
70 static int vnet_handle_unknown(struct vnet_port *port, void *arg)
72 struct vio_msg_tag *pkt = arg;
74 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
75 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
76 pr_err("Resetting connection\n");
78 ldc_disconnect(port->vio.lp);
83 static int vnet_port_alloc_tx_ring(struct vnet_port *port);
85 static int vnet_send_attr(struct vio_driver_state *vio)
87 struct vnet_port *port = to_vnet_port(vio);
88 struct net_device *dev = port->vp->dev;
89 struct vio_net_attr_info pkt;
90 int framelen = ETH_FRAME_LEN;
93 err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
97 memset(&pkt, 0, sizeof(pkt));
98 pkt.tag.type = VIO_TYPE_CTRL;
99 pkt.tag.stype = VIO_SUBTYPE_INFO;
100 pkt.tag.stype_env = VIO_ATTR_INFO;
101 pkt.tag.sid = vio_send_sid(vio);
102 if (vio_version_before(vio, 1, 2))
103 pkt.xfer_mode = VIO_DRING_MODE;
105 pkt.xfer_mode = VIO_NEW_DRING_MODE;
106 pkt.addr_type = VNET_ADDR_ETHERMAC;
108 for (i = 0; i < 6; i++)
109 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
110 if (vio_version_after(vio, 1, 3)) {
112 port->rmtu = min(VNET_MAXPACKET, port->rmtu);
113 pkt.mtu = port->rmtu;
115 port->rmtu = VNET_MAXPACKET;
116 pkt.mtu = port->rmtu;
118 if (vio_version_after_eq(vio, 1, 6))
119 pkt.options = VIO_TX_DRING;
120 } else if (vio_version_before(vio, 1, 3)) {
123 pkt.mtu = framelen + VLAN_HLEN;
127 if (vio_version_after_eq(vio, 1, 7) && port->tso) {
128 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
130 port->tsolen = VNET_MAXTSO;
131 pkt.ipv4_lso_maxlen = port->tsolen;
134 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
136 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
137 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
138 "cflags[0x%04x] lso_max[%u]\n",
139 pkt.xfer_mode, pkt.addr_type,
140 (unsigned long long)pkt.addr,
141 pkt.ack_freq, pkt.plnk_updt, pkt.options,
142 (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
145 return vio_ldc_send(vio, &pkt, sizeof(pkt));
148 static int handle_attr_info(struct vio_driver_state *vio,
149 struct vio_net_attr_info *pkt)
151 struct vnet_port *port = to_vnet_port(vio);
155 viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
156 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
157 " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
158 pkt->xfer_mode, pkt->addr_type,
159 (unsigned long long)pkt->addr,
160 pkt->ack_freq, pkt->plnk_updt, pkt->options,
161 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
162 pkt->ipv4_lso_maxlen);
164 pkt->tag.sid = vio_send_sid(vio);
166 xfer_mode = pkt->xfer_mode;
167 /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
168 if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
169 xfer_mode = VIO_NEW_DRING_MODE;
172 * < v1.3 - ETH_FRAME_LEN exactly
173 * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
175 * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
177 if (vio_version_before(vio, 1, 3)) {
178 localmtu = ETH_FRAME_LEN;
179 } else if (vio_version_after(vio, 1, 3)) {
180 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
181 localmtu = min(pkt->mtu, localmtu);
184 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
186 port->rmtu = localmtu;
188 /* LSO negotiation */
189 if (vio_version_after_eq(vio, 1, 7))
190 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
195 port->tsolen = VNET_MAXTSO;
196 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
197 if (port->tsolen < VNET_MINTSO) {
200 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
202 pkt->ipv4_lso_maxlen = port->tsolen;
204 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
205 pkt->ipv4_lso_maxlen = 0;
208 /* for version >= 1.6, ACK packet mode we support */
209 if (vio_version_after_eq(vio, 1, 6)) {
210 pkt->xfer_mode = VIO_NEW_DRING_MODE;
211 pkt->options = VIO_TX_DRING;
214 if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
215 pkt->addr_type != VNET_ADDR_ETHERMAC ||
216 pkt->mtu != localmtu) {
217 viodbg(HS, "SEND NET ATTR NACK\n");
219 pkt->tag.stype = VIO_SUBTYPE_NACK;
221 (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
225 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
226 "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
227 "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
228 pkt->xfer_mode, pkt->addr_type,
229 (unsigned long long)pkt->addr,
230 pkt->ack_freq, pkt->plnk_updt, pkt->options,
231 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
232 pkt->ipv4_lso_maxlen);
234 pkt->tag.stype = VIO_SUBTYPE_ACK;
236 return vio_ldc_send(vio, pkt, sizeof(*pkt));
241 static int handle_attr_ack(struct vio_driver_state *vio,
242 struct vio_net_attr_info *pkt)
244 viodbg(HS, "GOT NET ATTR ACK\n");
249 static int handle_attr_nack(struct vio_driver_state *vio,
250 struct vio_net_attr_info *pkt)
252 viodbg(HS, "GOT NET ATTR NACK\n");
257 static int vnet_handle_attr(struct vio_driver_state *vio, void *arg)
259 struct vio_net_attr_info *pkt = arg;
261 switch (pkt->tag.stype) {
262 case VIO_SUBTYPE_INFO:
263 return handle_attr_info(vio, pkt);
265 case VIO_SUBTYPE_ACK:
266 return handle_attr_ack(vio, pkt);
268 case VIO_SUBTYPE_NACK:
269 return handle_attr_nack(vio, pkt);
276 static void vnet_handshake_complete(struct vio_driver_state *vio)
278 struct vio_dring_state *dr;
280 dr = &vio->drings[VIO_DRIVER_RX_RING];
281 dr->snd_nxt = dr->rcv_nxt = 1;
283 dr = &vio->drings[VIO_DRIVER_TX_RING];
284 dr->snd_nxt = dr->rcv_nxt = 1;
287 /* The hypervisor interface that implements copying to/from imported
288 * memory from another domain requires that copies are done to 8-byte
289 * aligned buffers, and that the lengths of such copies are also 8-byte
292 * So we align skb->data to an 8-byte multiple and pad-out the data
293 * area so we can round the copy length up to the next multiple of
296 * The transmitter puts the actual start of the packet 6 bytes into
297 * the buffer it sends over, so that the IP headers after the ethernet
298 * header are aligned properly. These 6 bytes are not in the descriptor
299 * length, they are simply implied. This offset is represented using
300 * the VNET_PACKET_SKIP macro.
302 static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
305 struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
306 unsigned long addr, off;
311 addr = (unsigned long) skb->data;
312 off = ((addr + 7UL) & ~7UL) - addr;
314 skb_reserve(skb, off);
319 static inline void vnet_fullcsum(struct sk_buff *skb)
321 struct iphdr *iph = ip_hdr(skb);
322 int offset = skb_transport_offset(skb);
324 if (skb->protocol != htons(ETH_P_IP))
326 if (iph->protocol != IPPROTO_TCP &&
327 iph->protocol != IPPROTO_UDP)
329 skb->ip_summed = CHECKSUM_NONE;
332 if (iph->protocol == IPPROTO_TCP) {
333 struct tcphdr *ptcp = tcp_hdr(skb);
336 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
337 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
338 skb->len - offset, IPPROTO_TCP,
340 } else if (iph->protocol == IPPROTO_UDP) {
341 struct udphdr *pudp = udp_hdr(skb);
344 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
345 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
346 skb->len - offset, IPPROTO_UDP,
351 static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
353 struct net_device *dev = port->vp->dev;
354 unsigned int len = desc->size;
355 unsigned int copy_len;
361 if (port->tso && port->tsolen > port->rmtu)
362 maxlen = port->tsolen;
365 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
366 dev->stats.rx_length_errors++;
370 skb = alloc_and_align_skb(dev, len);
372 if (unlikely(!skb)) {
373 dev->stats.rx_missed_errors++;
377 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
378 skb_put(skb, copy_len);
379 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
380 skb->data, copy_len, 0,
381 desc->cookies, desc->ncookies);
382 if (unlikely(err < 0)) {
383 dev->stats.rx_frame_errors++;
387 skb_pull(skb, VNET_PACKET_SKIP);
389 skb->protocol = eth_type_trans(skb, dev);
391 if (vio_version_after_eq(&port->vio, 1, 8)) {
392 struct vio_net_dext *dext = vio_net_ext(desc);
394 skb_reset_network_header(skb);
396 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
397 if (skb->protocol == ETH_P_IP) {
398 struct iphdr *iph = ip_hdr(skb);
404 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
405 skb->ip_summed == CHECKSUM_NONE) {
406 if (skb->protocol == htons(ETH_P_IP)) {
407 struct iphdr *iph = ip_hdr(skb);
408 int ihl = iph->ihl * 4;
410 skb_reset_transport_header(skb);
411 skb_set_transport_header(skb, ihl);
415 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
416 skb->ip_summed = CHECKSUM_PARTIAL;
418 if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
423 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
425 dev->stats.rx_packets++;
426 dev->stats.rx_bytes += len;
427 napi_gro_receive(&port->napi, skb);
434 dev->stats.rx_dropped++;
438 static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
439 u32 start, u32 end, u8 vio_dring_state)
441 struct vio_dring_data hdr = {
443 .type = VIO_TYPE_DATA,
444 .stype = VIO_SUBTYPE_ACK,
445 .stype_env = VIO_DRING_DATA,
446 .sid = vio_send_sid(&port->vio),
448 .dring_ident = dr->ident,
451 .state = vio_dring_state,
456 hdr.seq = dr->snd_nxt;
459 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
465 if ((delay <<= 1) > 128)
467 if (retries++ > VNET_MAX_RETRIES) {
468 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
469 port->raddr[0], port->raddr[1],
470 port->raddr[2], port->raddr[3],
471 port->raddr[4], port->raddr[5]);
474 } while (err == -EAGAIN);
476 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
477 port->stop_rx_idx = end;
478 port->stop_rx = true;
480 port->stop_rx_idx = 0;
481 port->stop_rx = false;
487 static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
488 struct vio_dring_state *dr,
491 struct vio_net_desc *desc = port->vio.desc_buf;
494 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
495 (index * dr->entry_size),
496 dr->cookies, dr->ncookies);
503 static int put_rx_desc(struct vnet_port *port,
504 struct vio_dring_state *dr,
505 struct vio_net_desc *desc,
510 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
511 (index * dr->entry_size),
512 dr->cookies, dr->ncookies);
519 static int vnet_walk_rx_one(struct vnet_port *port,
520 struct vio_dring_state *dr,
521 u32 index, int *needs_ack)
523 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
524 struct vio_driver_state *vio = &port->vio;
527 BUG_ON(desc == NULL);
529 return PTR_ERR(desc);
531 if (desc->hdr.state != VIO_DESC_READY)
536 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
537 desc->hdr.state, desc->hdr.ack,
538 desc->size, desc->ncookies,
539 desc->cookies[0].cookie_addr,
540 desc->cookies[0].cookie_size);
542 err = vnet_rx_one(port, desc);
543 if (err == -ECONNRESET)
545 trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
546 index, desc->hdr.ack);
547 desc->hdr.state = VIO_DESC_DONE;
548 err = put_rx_desc(port, dr, desc, index);
551 *needs_ack = desc->hdr.ack;
555 static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
556 u32 start, u32 end, int *npkts, int budget)
558 struct vio_driver_state *vio = &port->vio;
559 int ack_start = -1, ack_end = -1;
560 bool send_ack = true;
562 end = (end == (u32) -1) ? vio_dring_prev(dr, start)
563 : vio_dring_next(dr, end);
565 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
567 while (start != end) {
568 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
569 if (err == -ECONNRESET)
577 start = vio_dring_next(dr, start);
578 if (ack && start != end) {
579 err = vnet_send_ack(port, dr, ack_start, ack_end,
581 if (err == -ECONNRESET)
585 if ((*npkts) >= budget) {
590 if (unlikely(ack_start == -1))
591 ack_start = ack_end = vio_dring_prev(dr, start);
593 port->napi_resume = false;
594 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
597 return vnet_send_ack(port, dr, ack_start, ack_end,
600 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
603 port->napi_resume = true;
604 port->napi_stop_idx = ack_end;
609 static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
612 struct vio_dring_data *pkt = msgbuf;
613 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
614 struct vio_driver_state *vio = &port->vio;
616 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
617 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
619 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
621 if (unlikely(pkt->seq != dr->rcv_nxt)) {
622 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
623 pkt->seq, dr->rcv_nxt);
627 if (!port->napi_resume)
630 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
632 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
636 static int idx_is_pending(struct vio_dring_state *dr, u32 end)
641 while (idx != dr->prod) {
646 idx = vio_dring_next(dr, idx);
651 static int vnet_ack(struct vnet_port *port, void *msgbuf)
653 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
654 struct vio_dring_data *pkt = msgbuf;
655 struct net_device *dev;
658 struct vio_net_desc *desc;
659 struct netdev_queue *txq;
661 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
668 if (unlikely(!idx_is_pending(dr, end))) {
669 netif_tx_unlock(dev);
673 /* sync for race conditions with vnet_start_xmit() and tell xmit it
674 * is time to send a trigger.
676 trace_vnet_rx_stopped_ack(port->vio._local_sid,
677 port->vio._peer_sid, end);
678 dr->cons = vio_dring_next(dr, end);
679 desc = vio_dring_entry(dr, dr->cons);
680 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
681 /* vnet_start_xmit() just populated this dring but missed
682 * sending the "start" LDC message to the consumer.
683 * Send a "start" trigger on its behalf.
685 if (__vnet_tx_trigger(port, dr->cons) > 0)
686 port->start_cons = false;
688 port->start_cons = true;
690 port->start_cons = true;
692 netif_tx_unlock(dev);
694 txq = netdev_get_tx_queue(dev, port->q_index);
695 if (unlikely(netif_tx_queue_stopped(txq) &&
696 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
702 static int vnet_nack(struct vnet_port *port, void *msgbuf)
704 /* XXX just reset or similar XXX */
708 static int handle_mcast(struct vnet_port *port, void *msgbuf)
710 struct vio_net_mcast_info *pkt = msgbuf;
712 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
713 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
723 /* Got back a STOPPED LDC message on port. If the queue is stopped,
724 * wake it up so that we'll send out another START message at the
727 static void maybe_tx_wakeup(struct vnet_port *port)
729 struct netdev_queue *txq;
731 txq = netdev_get_tx_queue(port->vp->dev, port->q_index);
732 __netif_tx_lock(txq, smp_processor_id());
733 if (likely(netif_tx_queue_stopped(txq))) {
734 struct vio_dring_state *dr;
736 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
737 netif_tx_wake_queue(txq);
739 __netif_tx_unlock(txq);
742 static inline bool port_is_up(struct vnet_port *vnet)
744 struct vio_driver_state *vio = &vnet->vio;
746 return !!(vio->hs_state & VIO_HS_COMPLETE);
749 static int vnet_event_napi(struct vnet_port *port, int budget)
751 struct vio_driver_state *vio = &port->vio;
754 int event = (port->rx_event & LDC_EVENT_RESET);
757 if (unlikely(event == LDC_EVENT_RESET ||
758 event == LDC_EVENT_UP)) {
759 vio_link_state_change(vio, event);
761 if (event == LDC_EVENT_RESET) {
762 vnet_port_reset(port);
768 /* We may have multiple LDC events in rx_event. Unroll send_events() */
769 event = (port->rx_event & LDC_EVENT_UP);
770 port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
771 if (event == LDC_EVENT_UP)
773 event = port->rx_event;
774 if (!(event & LDC_EVENT_DATA_READY))
777 /* we dont expect any other bits than RESET, UP, DATA_READY */
778 BUG_ON(event != LDC_EVENT_DATA_READY);
783 struct vio_msg_tag tag;
787 if (port->napi_resume) {
788 struct vio_dring_data *pkt =
789 (struct vio_dring_data *)&msgbuf;
790 struct vio_dring_state *dr =
791 &port->vio.drings[VIO_DRIVER_RX_RING];
793 pkt->tag.type = VIO_TYPE_DATA;
794 pkt->tag.stype = VIO_SUBTYPE_INFO;
795 pkt->tag.stype_env = VIO_DRING_DATA;
796 pkt->seq = dr->rcv_nxt;
797 pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
801 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
802 if (unlikely(err < 0)) {
803 if (err == -ECONNRESET)
809 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
812 msgbuf.tag.stype_env,
814 err = vio_validate_sid(vio, &msgbuf.tag);
818 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
819 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
820 if (!port_is_up(port)) {
821 /* failures like handshake_failure()
822 * may have cleaned up dring, but
823 * NAPI polling may bring us here.
828 err = vnet_rx(port, &msgbuf, &npkts, budget);
833 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
834 err = vnet_ack(port, &msgbuf);
837 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
838 err = vnet_nack(port, &msgbuf);
840 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
841 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
842 err = handle_mcast(port, &msgbuf);
844 err = vio_control_pkt_engine(vio, &msgbuf);
848 err = vnet_handle_unknown(port, &msgbuf);
850 if (err == -ECONNRESET)
853 if (unlikely(tx_wakeup && err != -ECONNRESET))
854 maybe_tx_wakeup(port);
858 static int vnet_poll(struct napi_struct *napi, int budget)
860 struct vnet_port *port = container_of(napi, struct vnet_port, napi);
861 struct vio_driver_state *vio = &port->vio;
862 int processed = vnet_event_napi(port, budget);
864 if (processed < budget) {
866 port->rx_event &= ~LDC_EVENT_DATA_READY;
867 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
872 static void vnet_event(void *arg, int event)
874 struct vnet_port *port = arg;
875 struct vio_driver_state *vio = &port->vio;
877 port->rx_event |= event;
878 vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
879 napi_schedule(&port->napi);
883 static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
885 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
886 struct vio_dring_data hdr = {
888 .type = VIO_TYPE_DATA,
889 .stype = VIO_SUBTYPE_INFO,
890 .stype_env = VIO_DRING_DATA,
891 .sid = vio_send_sid(&port->vio),
893 .dring_ident = dr->ident,
901 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
903 port->stop_rx_idx, -1);
904 err = vnet_send_ack(port,
905 &port->vio.drings[VIO_DRIVER_RX_RING],
906 port->stop_rx_idx, -1,
912 hdr.seq = dr->snd_nxt;
915 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
921 if ((delay <<= 1) > 128)
923 if (retries++ > VNET_MAX_RETRIES)
925 } while (err == -EAGAIN);
926 trace_vnet_tx_trigger(port->vio._local_sid,
927 port->vio._peer_sid, start, err);
932 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
934 unsigned int hash = vnet_hashfn(skb->data);
935 struct hlist_head *hp = &vp->port_hash[hash];
936 struct vnet_port *port;
938 hlist_for_each_entry_rcu(port, hp, hash) {
939 if (!port_is_up(port))
941 if (ether_addr_equal(port->raddr, skb->data))
944 list_for_each_entry_rcu(port, &vp->port_list, list) {
945 if (!port->switch_port)
947 if (!port_is_up(port))
954 static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
957 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
958 struct sk_buff *skb = NULL;
964 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
965 struct vio_net_desc *d;
969 txi = VNET_TX_RING_SIZE-1;
971 d = vio_dring_entry(dr, txi);
973 if (d->hdr.state == VIO_DESC_READY) {
977 if (port->tx_bufs[txi].skb) {
978 if (d->hdr.state != VIO_DESC_DONE)
979 pr_notice("invalid ring buffer state %d\n",
981 BUG_ON(port->tx_bufs[txi].skb->next);
983 port->tx_bufs[txi].skb->next = skb;
984 skb = port->tx_bufs[txi].skb;
985 port->tx_bufs[txi].skb = NULL;
987 ldc_unmap(port->vio.lp,
988 port->tx_bufs[txi].cookies,
989 port->tx_bufs[txi].ncookies);
990 } else if (d->hdr.state == VIO_DESC_FREE)
992 d->hdr.state = VIO_DESC_FREE;
997 static inline void vnet_free_skbs(struct sk_buff *skb)
999 struct sk_buff *next;
1009 static void vnet_clean_timer_expire(unsigned long port0)
1011 struct vnet_port *port = (struct vnet_port *)port0;
1012 struct sk_buff *freeskbs;
1015 netif_tx_lock(port->vp->dev);
1016 freeskbs = vnet_clean_tx_ring(port, &pending);
1017 netif_tx_unlock(port->vp->dev);
1019 vnet_free_skbs(freeskbs);
1022 (void)mod_timer(&port->clean_timer,
1023 jiffies + VNET_CLEAN_TIMEOUT);
1025 del_timer(&port->clean_timer);
1028 static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
1029 struct ldc_trans_cookie *cookies, int ncookies,
1030 unsigned int map_perm)
1032 int i, nc, err, blen;
1035 blen = skb_headlen(skb);
1036 if (blen < ETH_ZLEN)
1038 blen += VNET_PACKET_SKIP;
1039 blen += 8 - (blen & 7);
1041 err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
1042 ncookies, map_perm);
1047 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1048 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1051 if (nc < ncookies) {
1052 vaddr = kmap_atomic(skb_frag_page(f));
1053 blen = skb_frag_size(f);
1054 blen += 8 - (blen & 7);
1055 err = ldc_map_single(lp, vaddr + f->page_offset,
1056 blen, cookies + nc, ncookies - nc,
1058 kunmap_atomic(vaddr);
1064 ldc_unmap(lp, cookies, nc);
1072 static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1074 struct sk_buff *nskb;
1075 int i, len, pad, docopy;
1079 if (len < ETH_ZLEN) {
1080 pad += ETH_ZLEN - skb->len;
1083 len += VNET_PACKET_SKIP;
1084 pad += 8 - (len & 7);
1086 /* make sure we have enough cookies and alignment in every frag */
1087 docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1088 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1089 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1091 docopy |= f->page_offset & 7;
1093 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1094 skb_tailroom(skb) < pad ||
1095 skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1096 int start = 0, offset;
1099 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1100 nskb = alloc_and_align_skb(skb->dev, len);
1105 skb_reserve(nskb, VNET_PACKET_SKIP);
1107 nskb->protocol = skb->protocol;
1108 offset = skb_mac_header(skb) - skb->data;
1109 skb_set_mac_header(nskb, offset);
1110 offset = skb_network_header(skb) - skb->data;
1111 skb_set_network_header(nskb, offset);
1112 offset = skb_transport_header(skb) - skb->data;
1113 skb_set_transport_header(nskb, offset);
1116 nskb->csum_offset = skb->csum_offset;
1117 nskb->ip_summed = skb->ip_summed;
1119 if (skb->ip_summed == CHECKSUM_PARTIAL)
1120 start = skb_checksum_start_offset(skb);
1122 struct iphdr *iph = ip_hdr(nskb);
1123 int offset = start + nskb->csum_offset;
1125 if (skb_copy_bits(skb, 0, nskb->data, start)) {
1126 dev_kfree_skb(nskb);
1130 *(__sum16 *)(skb->data + offset) = 0;
1131 csum = skb_copy_and_csum_bits(skb, start,
1133 skb->len - start, 0);
1134 if (iph->protocol == IPPROTO_TCP ||
1135 iph->protocol == IPPROTO_UDP) {
1136 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1138 iph->protocol, csum);
1140 *(__sum16 *)(nskb->data + offset) = csum;
1142 nskb->ip_summed = CHECKSUM_NONE;
1143 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1144 dev_kfree_skb(nskb);
1148 (void)skb_put(nskb, skb->len);
1149 if (skb_is_gso(skb)) {
1150 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1151 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1153 nskb->queue_mapping = skb->queue_mapping;
1161 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
1162 void *accel_priv, select_queue_fallback_t fallback)
1164 struct vnet *vp = netdev_priv(dev);
1165 struct vnet_port *port = __tx_port_find(vp, skb);
1169 return port->q_index;
1172 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev);
1174 static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
1176 struct net_device *dev = port->vp->dev;
1177 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1178 struct sk_buff *segs;
1179 int maclen, datalen;
1181 int gso_size, gso_type, gso_segs;
1182 int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1183 int proto = IPPROTO_IP;
1185 if (skb->protocol == htons(ETH_P_IP))
1186 proto = ip_hdr(skb)->protocol;
1187 else if (skb->protocol == htons(ETH_P_IPV6))
1188 proto = ipv6_hdr(skb)->nexthdr;
1190 if (proto == IPPROTO_TCP)
1191 hlen += tcp_hdr(skb)->doff * 4;
1192 else if (proto == IPPROTO_UDP)
1193 hlen += sizeof(struct udphdr);
1195 pr_err("vnet_handle_offloads GSO with unknown transport "
1196 "protocol %d tproto %d\n", skb->protocol, proto);
1197 hlen = 128; /* XXX */
1199 datalen = port->tsolen - hlen;
1201 gso_size = skb_shinfo(skb)->gso_size;
1202 gso_type = skb_shinfo(skb)->gso_type;
1203 gso_segs = skb_shinfo(skb)->gso_segs;
1205 if (port->tso && gso_size < datalen)
1206 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1208 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1209 struct netdev_queue *txq;
1211 txq = netdev_get_tx_queue(dev, port->q_index);
1212 netif_tx_stop_queue(txq);
1213 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1214 return NETDEV_TX_BUSY;
1215 netif_tx_wake_queue(txq);
1218 maclen = skb_network_header(skb) - skb_mac_header(skb);
1219 skb_pull(skb, maclen);
1221 if (port->tso && gso_size < datalen) {
1222 if (skb_unclone(skb, GFP_ATOMIC))
1225 /* segment to TSO size */
1226 skb_shinfo(skb)->gso_size = datalen;
1227 skb_shinfo(skb)->gso_segs = gso_segs;
1229 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1233 skb_push(skb, maclen);
1234 skb_reset_mac_header(skb);
1238 struct sk_buff *curr = segs;
1242 if (port->tso && curr->len > dev->mtu) {
1243 skb_shinfo(curr)->gso_size = gso_size;
1244 skb_shinfo(curr)->gso_type = gso_type;
1245 skb_shinfo(curr)->gso_segs =
1246 DIV_ROUND_UP(curr->len - hlen, gso_size);
1248 skb_shinfo(curr)->gso_size = 0;
1250 skb_push(curr, maclen);
1251 skb_reset_mac_header(curr);
1252 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1254 curr->csum_start = skb_transport_header(curr) - curr->head;
1255 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1256 curr->csum_offset = offsetof(struct tcphdr, check);
1257 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1258 curr->csum_offset = offsetof(struct udphdr, check);
1260 if (!(status & NETDEV_TX_MASK))
1261 status = vnet_start_xmit(curr, dev);
1262 if (status & NETDEV_TX_MASK)
1263 dev_kfree_skb_any(curr);
1266 if (!(status & NETDEV_TX_MASK))
1267 dev_kfree_skb_any(skb);
1270 dev->stats.tx_dropped++;
1271 dev_kfree_skb_any(skb);
1272 return NETDEV_TX_OK;
1275 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1277 struct vnet *vp = netdev_priv(dev);
1278 struct vnet_port *port = NULL;
1279 struct vio_dring_state *dr;
1280 struct vio_net_desc *d;
1282 struct sk_buff *freeskbs = NULL;
1284 unsigned pending = 0;
1285 struct netdev_queue *txq;
1288 port = __tx_port_find(vp, skb);
1289 if (unlikely(!port)) {
1294 if (skb_is_gso(skb) && skb->len > port->tsolen) {
1295 err = vnet_handle_offloads(port, skb);
1300 if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1301 unsigned long localmtu = port->rmtu - ETH_HLEN;
1303 if (vio_version_after_eq(&port->vio, 1, 3))
1304 localmtu -= VLAN_HLEN;
1306 if (skb->protocol == htons(ETH_P_IP)) {
1308 struct rtable *rt = NULL;
1310 memset(&fl4, 0, sizeof(fl4));
1311 fl4.flowi4_oif = dev->ifindex;
1312 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1313 fl4.daddr = ip_hdr(skb)->daddr;
1314 fl4.saddr = ip_hdr(skb)->saddr;
1316 rt = ip_route_output_key(dev_net(dev), &fl4);
1319 skb_dst_set(skb, &rt->dst);
1320 icmp_send(skb, ICMP_DEST_UNREACH,
1325 #if IS_ENABLED(CONFIG_IPV6)
1326 else if (skb->protocol == htons(ETH_P_IPV6))
1327 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1332 skb = vnet_skb_shape(skb, 2);
1337 if (skb->ip_summed == CHECKSUM_PARTIAL)
1340 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1341 i = skb_get_queue_mapping(skb);
1342 txq = netdev_get_tx_queue(dev, i);
1343 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1344 if (!netif_tx_queue_stopped(txq)) {
1345 netif_tx_stop_queue(txq);
1347 /* This is a hard error, log it. */
1348 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1349 dev->stats.tx_errors++;
1352 return NETDEV_TX_BUSY;
1355 d = vio_dring_cur(dr);
1359 freeskbs = vnet_clean_tx_ring(port, &pending);
1361 BUG_ON(port->tx_bufs[txi].skb);
1367 err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1368 (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1370 netdev_info(dev, "tx buffer map error %d\n", err);
1374 port->tx_bufs[txi].skb = skb;
1376 port->tx_bufs[txi].ncookies = err;
1378 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1379 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1380 * the protocol itself does not require it as long as the peer
1381 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1383 * An ACK for every packet in the ring is expensive as the
1384 * sending of LDC messages is slow and affects performance.
1386 d->hdr.ack = VIO_ACK_DISABLE;
1388 d->ncookies = port->tx_bufs[txi].ncookies;
1389 for (i = 0; i < d->ncookies; i++)
1390 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1391 if (vio_version_after_eq(&port->vio, 1, 7)) {
1392 struct vio_net_dext *dext = vio_net_ext(d);
1394 memset(dext, 0, sizeof(*dext));
1395 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1396 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1398 dext->flags |= VNET_PKT_IPV4_LSO;
1400 if (vio_version_after_eq(&port->vio, 1, 8) &&
1401 !port->switch_port) {
1402 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1403 dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1407 /* This has to be a non-SMP write barrier because we are writing
1408 * to memory which is shared with the peer LDOM.
1412 d->hdr.state = VIO_DESC_READY;
1414 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1415 * to notify the consumer that some descriptors are READY.
1416 * After that "start" trigger, no additional triggers are needed until
1417 * a DRING_STOPPED is received from the consumer. The dr->cons field
1418 * (set up by vnet_ack()) has the value of the next dring index
1419 * that has not yet been ack-ed. We send a "start" trigger here
1420 * if, and only if, start_cons is true (reset it afterward). Conversely,
1421 * vnet_ack() should check if the dring corresponding to cons
1422 * is marked READY, but start_cons was false.
1423 * If so, vnet_ack() should send out the missed "start" trigger.
1425 * Note that the dma_wmb() above makes sure the cookies et al. are
1426 * not globally visible before the VIO_DESC_READY, and that the
1427 * stores are ordered correctly by the compiler. The consumer will
1428 * not proceed until the VIO_DESC_READY is visible assuring that
1429 * the consumer does not observe anything related to descriptors
1430 * out of order. The HV trap from the LDC start trigger is the
1431 * producer to consumer announcement that work is available to the
1434 if (!port->start_cons) { /* previous trigger suffices */
1435 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1436 port->vio._peer_sid, dr->cons);
1437 goto ldc_start_done;
1440 err = __vnet_tx_trigger(port, dr->cons);
1441 if (unlikely(err < 0)) {
1442 netdev_info(dev, "TX trigger error %d\n", err);
1443 d->hdr.state = VIO_DESC_FREE;
1444 skb = port->tx_bufs[txi].skb;
1445 port->tx_bufs[txi].skb = NULL;
1446 dev->stats.tx_carrier_errors++;
1451 port->start_cons = false;
1453 dev->stats.tx_packets++;
1454 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1456 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1457 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1458 netif_tx_stop_queue(txq);
1459 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1460 netif_tx_wake_queue(txq);
1463 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1466 vnet_free_skbs(freeskbs);
1468 return NETDEV_TX_OK;
1472 (void)mod_timer(&port->clean_timer,
1473 jiffies + VNET_CLEAN_TIMEOUT);
1475 del_timer(&port->clean_timer);
1480 vnet_free_skbs(freeskbs);
1481 dev->stats.tx_dropped++;
1482 return NETDEV_TX_OK;
1485 static void vnet_tx_timeout(struct net_device *dev)
1487 /* XXX Implement me XXX */
1490 static int vnet_open(struct net_device *dev)
1492 netif_carrier_on(dev);
1493 netif_tx_start_all_queues(dev);
1498 static int vnet_close(struct net_device *dev)
1500 netif_tx_stop_all_queues(dev);
1501 netif_carrier_off(dev);
1506 static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1508 struct vnet_mcast_entry *m;
1510 for (m = vp->mcast_list; m; m = m->next) {
1511 if (ether_addr_equal(m->addr, addr))
1517 static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1519 struct netdev_hw_addr *ha;
1521 netdev_for_each_mc_addr(ha, dev) {
1522 struct vnet_mcast_entry *m;
1524 m = __vnet_mc_find(vp, ha->addr);
1531 m = kzalloc(sizeof(*m), GFP_ATOMIC);
1534 memcpy(m->addr, ha->addr, ETH_ALEN);
1537 m->next = vp->mcast_list;
1543 static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1545 struct vio_net_mcast_info info;
1546 struct vnet_mcast_entry *m, **pp;
1549 memset(&info, 0, sizeof(info));
1551 info.tag.type = VIO_TYPE_CTRL;
1552 info.tag.stype = VIO_SUBTYPE_INFO;
1553 info.tag.stype_env = VNET_MCAST_INFO;
1554 info.tag.sid = vio_send_sid(&port->vio);
1558 for (m = vp->mcast_list; m; m = m->next) {
1562 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1564 if (++n_addrs == VNET_NUM_MCAST) {
1565 info.count = n_addrs;
1567 (void) vio_ldc_send(&port->vio, &info,
1573 info.count = n_addrs;
1574 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1580 pp = &vp->mcast_list;
1581 while ((m = *pp) != NULL) {
1588 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1590 if (++n_addrs == VNET_NUM_MCAST) {
1591 info.count = n_addrs;
1592 (void) vio_ldc_send(&port->vio, &info,
1601 info.count = n_addrs;
1602 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1606 static void vnet_set_rx_mode(struct net_device *dev)
1608 struct vnet *vp = netdev_priv(dev);
1609 struct vnet_port *port;
1612 list_for_each_entry_rcu(port, &vp->port_list, list) {
1614 if (port->switch_port) {
1615 __update_mc_list(vp, dev);
1616 __send_mc_list(vp, port);
1623 static int vnet_change_mtu(struct net_device *dev, int new_mtu)
1625 if (new_mtu < 68 || new_mtu > 65535)
1632 static int vnet_set_mac_addr(struct net_device *dev, void *p)
1637 static void vnet_get_drvinfo(struct net_device *dev,
1638 struct ethtool_drvinfo *info)
1640 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1641 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1644 static u32 vnet_get_msglevel(struct net_device *dev)
1646 struct vnet *vp = netdev_priv(dev);
1647 return vp->msg_enable;
1650 static void vnet_set_msglevel(struct net_device *dev, u32 value)
1652 struct vnet *vp = netdev_priv(dev);
1653 vp->msg_enable = value;
1656 static const struct ethtool_ops vnet_ethtool_ops = {
1657 .get_drvinfo = vnet_get_drvinfo,
1658 .get_msglevel = vnet_get_msglevel,
1659 .set_msglevel = vnet_set_msglevel,
1660 .get_link = ethtool_op_get_link,
1663 static void vnet_port_free_tx_bufs(struct vnet_port *port)
1665 struct vio_dring_state *dr;
1668 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1670 if (dr->base == NULL)
1673 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1674 struct vio_net_desc *d;
1675 void *skb = port->tx_bufs[i].skb;
1680 d = vio_dring_entry(dr, i);
1682 ldc_unmap(port->vio.lp,
1683 port->tx_bufs[i].cookies,
1684 port->tx_bufs[i].ncookies);
1686 port->tx_bufs[i].skb = NULL;
1687 d->hdr.state = VIO_DESC_FREE;
1689 ldc_free_exp_dring(port->vio.lp, dr->base,
1690 (dr->entry_size * dr->num_entries),
1691 dr->cookies, dr->ncookies);
1694 dr->num_entries = 0;
1699 static void vnet_port_reset(struct vnet_port *port)
1701 del_timer(&port->clean_timer);
1702 vnet_port_free_tx_bufs(port);
1708 static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1710 struct vio_dring_state *dr;
1711 unsigned long len, elen;
1712 int i, err, ncookies;
1715 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1717 elen = sizeof(struct vio_net_desc) +
1718 sizeof(struct ldc_trans_cookie) * 2;
1719 if (vio_version_after_eq(&port->vio, 1, 7))
1720 elen += sizeof(struct vio_net_dext);
1721 len = VNET_TX_RING_SIZE * elen;
1723 ncookies = VIO_MAX_RING_COOKIES;
1724 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1725 dr->cookies, &ncookies,
1729 if (IS_ERR(dring)) {
1730 err = PTR_ERR(dring);
1735 dr->entry_size = elen;
1736 dr->num_entries = VNET_TX_RING_SIZE;
1737 dr->prod = dr->cons = 0;
1738 port->start_cons = true; /* need an initial trigger */
1739 dr->pending = VNET_TX_RING_SIZE;
1740 dr->ncookies = ncookies;
1742 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1743 struct vio_net_desc *d;
1745 d = vio_dring_entry(dr, i);
1746 d->hdr.state = VIO_DESC_FREE;
1751 vnet_port_free_tx_bufs(port);
1756 #ifdef CONFIG_NET_POLL_CONTROLLER
1757 static void vnet_poll_controller(struct net_device *dev)
1759 struct vnet *vp = netdev_priv(dev);
1760 struct vnet_port *port;
1761 unsigned long flags;
1763 spin_lock_irqsave(&vp->lock, flags);
1764 if (!list_empty(&vp->port_list)) {
1765 port = list_entry(vp->port_list.next, struct vnet_port, list);
1766 napi_schedule(&port->napi);
1768 spin_unlock_irqrestore(&vp->lock, flags);
1771 static LIST_HEAD(vnet_list);
1772 static DEFINE_MUTEX(vnet_list_mutex);
1774 static const struct net_device_ops vnet_ops = {
1775 .ndo_open = vnet_open,
1776 .ndo_stop = vnet_close,
1777 .ndo_set_rx_mode = vnet_set_rx_mode,
1778 .ndo_set_mac_address = vnet_set_mac_addr,
1779 .ndo_validate_addr = eth_validate_addr,
1780 .ndo_tx_timeout = vnet_tx_timeout,
1781 .ndo_change_mtu = vnet_change_mtu,
1782 .ndo_start_xmit = vnet_start_xmit,
1783 .ndo_select_queue = vnet_select_queue,
1784 #ifdef CONFIG_NET_POLL_CONTROLLER
1785 .ndo_poll_controller = vnet_poll_controller,
1789 static struct vnet *vnet_new(const u64 *local_mac,
1790 struct vio_dev *vdev)
1792 struct net_device *dev;
1796 dev = alloc_etherdev_mqs(sizeof(*vp), VNET_MAX_TXQS, 1);
1798 return ERR_PTR(-ENOMEM);
1799 dev->needed_headroom = VNET_PACKET_SKIP + 8;
1800 dev->needed_tailroom = 8;
1802 for (i = 0; i < ETH_ALEN; i++)
1803 dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
1805 vp = netdev_priv(dev);
1807 spin_lock_init(&vp->lock);
1810 INIT_LIST_HEAD(&vp->port_list);
1811 for (i = 0; i < VNET_PORT_HASH_SIZE; i++)
1812 INIT_HLIST_HEAD(&vp->port_hash[i]);
1813 INIT_LIST_HEAD(&vp->list);
1814 vp->local_mac = *local_mac;
1816 dev->netdev_ops = &vnet_ops;
1817 dev->ethtool_ops = &vnet_ethtool_ops;
1818 dev->watchdog_timeo = VNET_TX_TIMEOUT;
1820 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
1821 NETIF_F_HW_CSUM | NETIF_F_SG;
1822 dev->features = dev->hw_features;
1824 SET_NETDEV_DEV(dev, &vdev->dev);
1826 err = register_netdev(dev);
1828 pr_err("Cannot register net device, aborting\n");
1829 goto err_out_free_dev;
1832 netdev_info(dev, "Sun LDOM vnet %pM\n", dev->dev_addr);
1834 list_add(&vp->list, &vnet_list);
1841 return ERR_PTR(err);
1844 static struct vnet *vnet_find_or_create(const u64 *local_mac,
1845 struct vio_dev *vdev)
1847 struct vnet *iter, *vp;
1849 mutex_lock(&vnet_list_mutex);
1851 list_for_each_entry(iter, &vnet_list, list) {
1852 if (iter->local_mac == *local_mac) {
1858 vp = vnet_new(local_mac, vdev);
1859 mutex_unlock(&vnet_list_mutex);
1864 static void vnet_cleanup(void)
1867 struct net_device *dev;
1869 mutex_lock(&vnet_list_mutex);
1870 while (!list_empty(&vnet_list)) {
1871 vp = list_first_entry(&vnet_list, struct vnet, list);
1872 list_del(&vp->list);
1874 /* vio_unregister_driver() should have cleaned up port_list */
1875 BUG_ON(!list_empty(&vp->port_list));
1876 unregister_netdev(dev);
1879 mutex_unlock(&vnet_list_mutex);
1882 static const char *local_mac_prop = "local-mac-address";
1884 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
1886 struct vio_dev *vdev)
1888 const u64 *local_mac = NULL;
1891 mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) {
1892 u64 target = mdesc_arc_target(hp, a);
1895 name = mdesc_get_property(hp, target, "name", NULL);
1896 if (!name || strcmp(name, "network"))
1899 local_mac = mdesc_get_property(hp, target,
1900 local_mac_prop, NULL);
1905 return ERR_PTR(-ENODEV);
1907 return vnet_find_or_create(local_mac, vdev);
1910 static struct ldc_channel_config vnet_ldc_cfg = {
1911 .event = vnet_event,
1913 .mode = LDC_MODE_UNRELIABLE,
1916 static struct vio_driver_ops vnet_vio_ops = {
1917 .send_attr = vnet_send_attr,
1918 .handle_attr = vnet_handle_attr,
1919 .handshake_complete = vnet_handshake_complete,
1922 static void print_version(void)
1924 printk_once(KERN_INFO "%s", version);
1927 const char *remote_macaddr_prop = "remote-mac-address";
1930 vnet_port_add_txq(struct vnet_port *port)
1932 struct vnet *vp = port->vp;
1936 n = n & (VNET_MAX_TXQS - 1);
1938 netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index));
1942 vnet_port_rm_txq(struct vnet_port *port)
1945 netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index));
1948 static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1950 struct mdesc_handle *hp;
1951 struct vnet_port *port;
1952 unsigned long flags;
1955 int len, i, err, switch_port;
1961 vp = vnet_find_parent(hp, vdev->mp, vdev);
1963 pr_err("Cannot find port parent vnet\n");
1965 goto err_out_put_mdesc;
1968 rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
1971 pr_err("Port lacks %s property\n", remote_macaddr_prop);
1972 goto err_out_put_mdesc;
1975 port = kzalloc(sizeof(*port), GFP_KERNEL);
1978 goto err_out_put_mdesc;
1980 for (i = 0; i < ETH_ALEN; i++)
1981 port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff;
1985 err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK,
1986 vnet_versions, ARRAY_SIZE(vnet_versions),
1987 &vnet_vio_ops, vp->dev->name);
1989 goto err_out_free_port;
1991 err = vio_ldc_alloc(&port->vio, &vnet_ldc_cfg, port);
1993 goto err_out_free_port;
1995 netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT);
1997 INIT_HLIST_NODE(&port->hash);
1998 INIT_LIST_HEAD(&port->list);
2001 if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL)
2003 port->switch_port = switch_port;
2007 spin_lock_irqsave(&vp->lock, flags);
2009 list_add_rcu(&port->list, &vp->port_list);
2011 list_add_tail_rcu(&port->list, &vp->port_list);
2012 hlist_add_head_rcu(&port->hash,
2013 &vp->port_hash[vnet_hashfn(port->raddr)]);
2014 vnet_port_add_txq(port);
2015 spin_unlock_irqrestore(&vp->lock, flags);
2017 dev_set_drvdata(&vdev->dev, port);
2019 pr_info("%s: PORT ( remote-mac %pM%s )\n",
2020 vp->dev->name, port->raddr, switch_port ? " switch-port" : "");
2022 setup_timer(&port->clean_timer, vnet_clean_timer_expire,
2023 (unsigned long)port);
2025 napi_enable(&port->napi);
2026 vio_port_up(&port->vio);
2040 static int vnet_port_remove(struct vio_dev *vdev)
2042 struct vnet_port *port = dev_get_drvdata(&vdev->dev);
2046 del_timer_sync(&port->vio.timer);
2048 napi_disable(&port->napi);
2050 list_del_rcu(&port->list);
2051 hlist_del_rcu(&port->hash);
2054 del_timer_sync(&port->clean_timer);
2055 vnet_port_rm_txq(port);
2056 netif_napi_del(&port->napi);
2057 vnet_port_free_tx_bufs(port);
2058 vio_ldc_free(&port->vio);
2060 dev_set_drvdata(&vdev->dev, NULL);
2068 static const struct vio_device_id vnet_port_match[] = {
2070 .type = "vnet-port",
2074 MODULE_DEVICE_TABLE(vio, vnet_port_match);
2076 static struct vio_driver vnet_port_driver = {
2077 .id_table = vnet_port_match,
2078 .probe = vnet_port_probe,
2079 .remove = vnet_port_remove,
2080 .name = "vnet_port",
2083 static int __init vnet_init(void)
2085 return vio_register_driver(&vnet_port_driver);
2088 static void __exit vnet_exit(void)
2090 vio_unregister_driver(&vnet_port_driver);
2094 module_init(vnet_init);
2095 module_exit(vnet_exit);