1 #include <linux/skbuff.h>
2 #include <linux/export.h>
4 #include <linux/ipv6.h>
5 #include <linux/if_vlan.h>
8 #include <linux/if_tunnel.h>
9 #include <linux/if_pppox.h>
10 #include <linux/ppp_defs.h>
11 #include <net/flow_keys.h>
13 /* copy saddr & daddr, possibly using 64bit load/store
14 * Equivalent to : flow->src = iph->saddr;
15 * flow->dst = iph->daddr;
17 static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
19 BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
20 offsetof(typeof(*flow), src) + sizeof(flow->src));
21 memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
24 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
26 int poff, nhoff = skb_network_offset(skb);
28 __be16 proto = skb->protocol;
30 memset(flow, 0, sizeof(*flow));
34 case __constant_htons(ETH_P_IP): {
35 const struct iphdr *iph;
38 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
42 if (ip_is_fragment(iph))
45 ip_proto = iph->protocol;
46 iph_to_flow_copy_addrs(flow, iph);
47 nhoff += iph->ihl * 4;
50 case __constant_htons(ETH_P_IPV6): {
51 const struct ipv6hdr *iph;
54 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
58 ip_proto = iph->nexthdr;
59 flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
60 flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
61 nhoff += sizeof(struct ipv6hdr);
64 case __constant_htons(ETH_P_8021Q): {
65 const struct vlan_hdr *vlan;
66 struct vlan_hdr _vlan;
68 vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
72 proto = vlan->h_vlan_encapsulated_proto;
73 nhoff += sizeof(*vlan);
76 case __constant_htons(ETH_P_PPP_SES): {
81 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
85 nhoff += PPPOE_SES_HLEN;
87 case __constant_htons(PPP_IP):
89 case __constant_htons(PPP_IPV6):
106 hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
110 * Only look inside GRE if version zero and no
113 if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
116 if (hdr->flags & GRE_CSUM)
118 if (hdr->flags & GRE_KEY)
120 if (hdr->flags & GRE_SEQ)
132 flow->ip_proto = ip_proto;
133 poff = proto_ports_offset(ip_proto);
135 __be32 *ports, _ports;
138 ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
140 flow->ports = *ports;
145 EXPORT_SYMBOL(skb_flow_dissect);
147 static u32 hashrnd __read_mostly;
150 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
151 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
152 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
153 * if hash is a canonical 4-tuple hash over transport ports.
155 void __skb_get_rxhash(struct sk_buff *skb)
157 struct flow_keys keys;
160 if (!skb_flow_dissect(skb, &keys))
166 /* get a consistent hash (same value on both flow directions) */
167 if (((__force u32)keys.dst < (__force u32)keys.src) ||
168 (((__force u32)keys.dst == (__force u32)keys.src) &&
169 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
170 swap(keys.dst, keys.src);
171 swap(keys.port16[0], keys.port16[1]);
174 hash = jhash_3words((__force u32)keys.dst,
175 (__force u32)keys.src,
176 (__force u32)keys.ports, hashrnd);
182 EXPORT_SYMBOL(__skb_get_rxhash);
185 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
186 * to be used as a distribution range.
188 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
189 unsigned int num_tx_queues)
193 u16 qcount = num_tx_queues;
195 if (skb_rx_queue_recorded(skb)) {
196 hash = skb_get_rx_queue(skb);
197 while (unlikely(hash >= num_tx_queues))
198 hash -= num_tx_queues;
203 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
204 qoffset = dev->tc_to_txq[tc].offset;
205 qcount = dev->tc_to_txq[tc].count;
208 if (skb->sk && skb->sk->sk_hash)
209 hash = skb->sk->sk_hash;
211 hash = (__force u16) skb->protocol;
212 hash = jhash_1word(hash, hashrnd);
214 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
216 EXPORT_SYMBOL(__skb_tx_hash);
218 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
220 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
221 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
222 dev->name, queue_index,
223 dev->real_num_tx_queues);
229 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
232 struct xps_dev_maps *dev_maps;
234 int queue_index = -1;
237 dev_maps = rcu_dereference(dev->xps_maps);
239 map = rcu_dereference(
240 dev_maps->cpu_map[raw_smp_processor_id()]);
243 queue_index = map->queues[0];
246 if (skb->sk && skb->sk->sk_hash)
247 hash = skb->sk->sk_hash;
249 hash = (__force u16) skb->protocol ^
251 hash = jhash_1word(hash, hashrnd);
252 queue_index = map->queues[
253 ((u64)hash * map->len) >> 32];
255 if (unlikely(queue_index >= dev->real_num_tx_queues))
267 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
269 struct sock *sk = skb->sk;
270 int queue_index = sk_tx_queue_get(sk);
272 if (queue_index < 0 || skb->ooo_okay ||
273 queue_index >= dev->real_num_tx_queues) {
274 int new_index = get_xps_queue(dev, skb);
276 new_index = skb_tx_hash(dev, skb);
278 if (queue_index != new_index && sk) {
279 struct dst_entry *dst =
280 rcu_dereference_check(sk->sk_dst_cache, 1);
282 if (dst && skb_dst(skb) == dst)
283 sk_tx_queue_set(sk, queue_index);
287 queue_index = new_index;
292 EXPORT_SYMBOL(__netdev_pick_tx);
294 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
299 if (dev->real_num_tx_queues != 1) {
300 const struct net_device_ops *ops = dev->netdev_ops;
301 if (ops->ndo_select_queue)
302 queue_index = ops->ndo_select_queue(dev, skb);
304 queue_index = __netdev_pick_tx(dev, skb);
305 queue_index = dev_cap_txqueue(dev, queue_index);
308 skb_set_queue_mapping(skb, queue_index);
309 return netdev_get_tx_queue(dev, queue_index);
312 static int __init initialize_hashrnd(void)
314 get_random_bytes(&hashrnd, sizeof(hashrnd));
318 late_initcall_sync(initialize_hashrnd);