1 #include <linux/skbuff.h>
2 #include <linux/export.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
8 #include <linux/igmp.h>
9 #include <linux/icmp.h>
10 #include <linux/sctp.h>
11 #include <linux/dccp.h>
12 #include <linux/if_tunnel.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15 #include <net/flow_keys.h>
17 /* copy saddr & daddr, possibly using 64bit load/store
18 * Equivalent to : flow->src = iph->saddr;
19 * flow->dst = iph->daddr;
21 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
23 BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
24 offsetof(typeof(*flow), src) + sizeof(flow->src));
25 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
28 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
30 int poff, nhoff = skb_network_offset(skb);
32 __be16 proto = skb->protocol;
34 memset(flow, 0, sizeof(*flow));
38 case __constant_htons(ETH_P_IP): {
39 const struct iphdr *iph;
42 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
46 if (ip_is_fragment(iph))
49 ip_proto = iph->protocol;
50 iph_to_flow_copy_addrs(flow, iph);
51 nhoff += iph->ihl * 4;
54 case __constant_htons(ETH_P_IPV6): {
55 const struct ipv6hdr *iph;
58 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
62 ip_proto = iph->nexthdr;
63 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
64 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
65 nhoff += sizeof(struct ipv6hdr);
68 case __constant_htons(ETH_P_8021AD):
69 case __constant_htons(ETH_P_8021Q): {
70 const struct vlan_hdr *vlan;
71 struct vlan_hdr _vlan;
73 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
77 proto = vlan->h_vlan_encapsulated_proto;
78 nhoff += sizeof(*vlan);
81 case __constant_htons(ETH_P_PPP_SES): {
86 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
90 nhoff += PPPOE_SES_HLEN;
92 case __constant_htons(PPP_IP):
94 case __constant_htons(PPP_IPV6):
111 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
115 * Only look inside GRE if version zero and no
118 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
121 if (hdr->flags & GRE_CSUM)
123 if (hdr->flags & GRE_KEY)
125 if (hdr->flags & GRE_SEQ)
127 if (proto == htons(ETH_P_TEB)) {
128 const struct ethhdr *eth;
131 eth = skb_header_pointer(skb, nhoff,
132 sizeof(_eth), &_eth);
135 proto = eth->h_proto;
136 nhoff += sizeof(*eth);
143 proto = htons(ETH_P_IP);
146 proto = htons(ETH_P_IPV6);
152 flow->ip_proto = ip_proto;
153 poff = proto_ports_offset(ip_proto);
155 __be32 *ports, _ports;
158 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
160 flow->ports = *ports;
163 flow->thoff = (u16) nhoff;
167 EXPORT_SYMBOL(skb_flow_dissect);
169 static u32 hashrnd __read_mostly;
172 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
173 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
174 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
175 * if hash is a canonical 4-tuple hash over transport ports.
177 void __skb_get_rxhash(struct sk_buff *skb)
179 struct flow_keys keys;
182 if (!skb_flow_dissect(skb, &keys))
188 /* get a consistent hash (same value on both flow directions) */
189 if (((__force u32)keys.dst < (__force u32)keys.src) ||
190 (((__force u32)keys.dst == (__force u32)keys.src) &&
191 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
192 swap(keys.dst, keys.src);
193 swap(keys.port16[0], keys.port16[1]);
196 hash = jhash_3words((__force u32)keys.dst,
197 (__force u32)keys.src,
198 (__force u32)keys.ports, hashrnd);
204 EXPORT_SYMBOL(__skb_get_rxhash);
207 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
208 * to be used as a distribution range.
210 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
211 unsigned int num_tx_queues)
215 u16 qcount = num_tx_queues;
217 if (skb_rx_queue_recorded(skb)) {
218 hash = skb_get_rx_queue(skb);
219 while (unlikely(hash >= num_tx_queues))
220 hash -= num_tx_queues;
225 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
226 qoffset = dev->tc_to_txq[tc].offset;
227 qcount = dev->tc_to_txq[tc].count;
230 if (skb->sk && skb->sk->sk_hash)
231 hash = skb->sk->sk_hash;
233 hash = (__force u16) skb->protocol;
234 hash = jhash_1word(hash, hashrnd);
236 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
238 EXPORT_SYMBOL(__skb_tx_hash);
240 /* __skb_get_poff() returns the offset to the payload as far as it could
241 * be dissected. The main user is currently BPF, so that we can dynamically
242 * truncate packets without needing to push actual payload to the user
243 * space and can analyze headers only, instead.
245 u32 __skb_get_poff(const struct sk_buff *skb)
247 struct flow_keys keys;
250 if (!skb_flow_dissect(skb, &keys))
254 switch (keys.ip_proto) {
256 const struct tcphdr *tcph;
259 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
263 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
267 case IPPROTO_UDPLITE:
268 poff += sizeof(struct udphdr);
270 /* For the rest, we do not really care about header
271 * extensions at this point for now.
274 poff += sizeof(struct icmphdr);
277 poff += sizeof(struct icmp6hdr);
280 poff += sizeof(struct igmphdr);
283 poff += sizeof(struct dccp_hdr);
286 poff += sizeof(struct sctphdr);
293 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
295 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
296 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
297 dev->name, queue_index,
298 dev->real_num_tx_queues);
304 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
307 struct xps_dev_maps *dev_maps;
309 int queue_index = -1;
312 dev_maps = rcu_dereference(dev->xps_maps);
314 map = rcu_dereference(
315 dev_maps->cpu_map[raw_smp_processor_id()]);
318 queue_index = map->queues[0];
321 if (skb->sk && skb->sk->sk_hash)
322 hash = skb->sk->sk_hash;
324 hash = (__force u16) skb->protocol ^
326 hash = jhash_1word(hash, hashrnd);
327 queue_index = map->queues[
328 ((u64)hash * map->len) >> 32];
330 if (unlikely(queue_index >= dev->real_num_tx_queues))
342 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
344 struct sock *sk = skb->sk;
345 int queue_index = sk_tx_queue_get(sk);
347 if (queue_index < 0 || skb->ooo_okay ||
348 queue_index >= dev->real_num_tx_queues) {
349 int new_index = get_xps_queue(dev, skb);
351 new_index = skb_tx_hash(dev, skb);
353 if (queue_index != new_index && sk &&
354 rcu_access_pointer(sk->sk_dst_cache))
355 sk_tx_queue_set(sk, new_index);
357 queue_index = new_index;
362 EXPORT_SYMBOL(__netdev_pick_tx);
364 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
369 if (dev->real_num_tx_queues != 1) {
370 const struct net_device_ops *ops = dev->netdev_ops;
371 if (ops->ndo_select_queue)
372 queue_index = ops->ndo_select_queue(dev, skb);
374 queue_index = __netdev_pick_tx(dev, skb);
375 queue_index = dev_cap_txqueue(dev, queue_index);
378 skb_set_queue_mapping(skb, queue_index);
379 return netdev_get_tx_queue(dev, queue_index);
382 static int __init initialize_hashrnd(void)
384 get_random_bytes(&hashrnd, sizeof(hashrnd));
388 late_initcall_sync(initialize_hashrnd);