]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/8021q/vlan_core.c
vlan: kill __vlan_hwaccel_rx and vlan_hwaccel_rx
[karo-tx-linux.git] / net / 8021q / vlan_core.c
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include "vlan.h"
6
7 bool vlan_do_receive(struct sk_buff **skbp)
8 {
9         struct sk_buff *skb = *skbp;
10         u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11         struct net_device *vlan_dev;
12         struct vlan_pcpu_stats *rx_stats;
13
14         vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15         if (!vlan_dev) {
16                 if (vlan_id)
17                         skb->pkt_type = PACKET_OTHERHOST;
18                 return false;
19         }
20
21         skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22         if (unlikely(!skb))
23                 return false;
24
25         skb->dev = vlan_dev;
26         if (skb->pkt_type == PACKET_OTHERHOST) {
27                 /* Our lower layer thinks this is not local, let's make sure.
28                  * This allows the VLAN to have a different MAC than the
29                  * underlying device, and still route correctly. */
30                 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
31                                         vlan_dev->dev_addr))
32                         skb->pkt_type = PACKET_HOST;
33         }
34
35         if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
36                 unsigned int offset = skb->data - skb_mac_header(skb);
37
38                 /*
39                  * vlan_insert_tag expect skb->data pointing to mac header.
40                  * So change skb->data before calling it and change back to
41                  * original position later
42                  */
43                 skb_push(skb, offset);
44                 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
45                 if (!skb)
46                         return false;
47                 skb_pull(skb, offset + VLAN_HLEN);
48                 skb_reset_mac_len(skb);
49         }
50
51         skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
52         skb->vlan_tci = 0;
53
54         rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
55
56         u64_stats_update_begin(&rx_stats->syncp);
57         rx_stats->rx_packets++;
58         rx_stats->rx_bytes += skb->len;
59         if (skb->pkt_type == PACKET_MULTICAST)
60                 rx_stats->rx_multicast++;
61         u64_stats_update_end(&rx_stats->syncp);
62
63         return true;
64 }
65
66 /* Must be invoked with rcu_read_lock or with RTNL. */
67 struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
68                                         u16 vlan_id)
69 {
70         struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
71
72         if (grp) {
73                 return vlan_group_get_device(grp, vlan_id);
74         } else {
75                 /*
76                  * Bonding slaves do not have grp assigned to themselves.
77                  * Grp is assigned to bonding master instead.
78                  */
79                 if (netif_is_bond_slave(real_dev))
80                         return __vlan_find_dev_deep(real_dev->master, vlan_id);
81         }
82
83         return NULL;
84 }
85 EXPORT_SYMBOL(__vlan_find_dev_deep);
86
87 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
88 {
89         return vlan_dev_info(dev)->real_dev;
90 }
91 EXPORT_SYMBOL(vlan_dev_real_dev);
92
93 u16 vlan_dev_vlan_id(const struct net_device *dev)
94 {
95         return vlan_dev_info(dev)->vlan_id;
96 }
97 EXPORT_SYMBOL(vlan_dev_vlan_id);
98
99 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
100                               unsigned int vlan_tci, struct sk_buff *skb)
101 {
102         __vlan_hwaccel_put_tag(skb, vlan_tci);
103         return napi_gro_receive(napi, skb);
104 }
105 EXPORT_SYMBOL(vlan_gro_receive);
106
107 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
108                             unsigned int vlan_tci)
109 {
110         __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
111         return napi_gro_frags(napi);
112 }
113 EXPORT_SYMBOL(vlan_gro_frags);
114
115 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
116 {
117         if (skb_cow(skb, skb_headroom(skb)) < 0)
118                 return NULL;
119         memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
120         skb->mac_header += VLAN_HLEN;
121         skb_reset_mac_len(skb);
122         return skb;
123 }
124
125 static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
126 {
127         __be16 proto;
128         unsigned char *rawp;
129
130         /*
131          * Was a VLAN packet, grab the encapsulated protocol, which the layer
132          * three protocols care about.
133          */
134
135         proto = vhdr->h_vlan_encapsulated_proto;
136         if (ntohs(proto) >= 1536) {
137                 skb->protocol = proto;
138                 return;
139         }
140
141         rawp = skb->data;
142         if (*(unsigned short *) rawp == 0xFFFF)
143                 /*
144                  * This is a magic hack to spot IPX packets. Older Novell
145                  * breaks the protocol design and runs IPX over 802.3 without
146                  * an 802.2 LLC layer. We look for FFFF which isn't a used
147                  * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
148                  * but does for the rest.
149                  */
150                 skb->protocol = htons(ETH_P_802_3);
151         else
152                 /*
153                  * Real 802.2 LLC
154                  */
155                 skb->protocol = htons(ETH_P_802_2);
156 }
157
158 struct sk_buff *vlan_untag(struct sk_buff *skb)
159 {
160         struct vlan_hdr *vhdr;
161         u16 vlan_tci;
162
163         if (unlikely(vlan_tx_tag_present(skb))) {
164                 /* vlan_tci is already set-up so leave this for another time */
165                 return skb;
166         }
167
168         skb = skb_share_check(skb, GFP_ATOMIC);
169         if (unlikely(!skb))
170                 goto err_free;
171
172         if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
173                 goto err_free;
174
175         vhdr = (struct vlan_hdr *) skb->data;
176         vlan_tci = ntohs(vhdr->h_vlan_TCI);
177         __vlan_hwaccel_put_tag(skb, vlan_tci);
178
179         skb_pull_rcsum(skb, VLAN_HLEN);
180         vlan_set_encap_proto(skb, vhdr);
181
182         skb = vlan_reorder_header(skb);
183         if (unlikely(!skb))
184                 goto err_free;
185
186         return skb;
187
188 err_free:
189         kfree_skb(skb);
190         return NULL;
191 }