]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/bonding/bond_alb.c
virtio-net: correctly handle XDP_PASS for linearized packets
[karo-tx-linux.git] / drivers / net / bonding / bond_alb.c
1 /*
2  * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License as published by the
6  * Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  * You should have received a copy of the GNU General Public License along
15  * with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * The full GNU General Public License is included in this distribution in the
18  * file called LICENSE.
19  *
20  */
21
22 #include <linux/skbuff.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pkt_sched.h>
26 #include <linux/spinlock.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/if_arp.h>
32 #include <linux/if_ether.h>
33 #include <linux/if_bonding.h>
34 #include <linux/if_vlan.h>
35 #include <linux/in.h>
36 #include <net/ipx.h>
37 #include <net/arp.h>
38 #include <net/ipv6.h>
39 #include <asm/byteorder.h>
40 #include <net/bonding.h>
41 #include <net/bond_alb.h>
42
43
44
45 static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
46         0xff, 0xff, 0xff, 0xff, 0xff, 0xff
47 };
48 static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
49         0x33, 0x33, 0x00, 0x00, 0x00, 0x01
50 };
51 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
52
53 #pragma pack(1)
54 struct learning_pkt {
55         u8 mac_dst[ETH_ALEN];
56         u8 mac_src[ETH_ALEN];
57         __be16 type;
58         u8 padding[ETH_ZLEN - ETH_HLEN];
59 };
60
61 struct arp_pkt {
62         __be16  hw_addr_space;
63         __be16  prot_addr_space;
64         u8      hw_addr_len;
65         u8      prot_addr_len;
66         __be16  op_code;
67         u8      mac_src[ETH_ALEN];      /* sender hardware address */
68         __be32  ip_src;                 /* sender IP address */
69         u8      mac_dst[ETH_ALEN];      /* target hardware address */
70         __be32  ip_dst;                 /* target IP address */
71 };
72 #pragma pack()
73
74 static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
75 {
76         return (struct arp_pkt *)skb_network_header(skb);
77 }
78
79 /* Forward declaration */
80 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
81                                       bool strict_match);
82 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
83 static void rlb_src_unlink(struct bonding *bond, u32 index);
84 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash,
85                          u32 ip_dst_hash);
86
87 static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
88 {
89         int i;
90         u8 hash = 0;
91
92         for (i = 0; i < hash_size; i++)
93                 hash ^= hash_start[i];
94
95         return hash;
96 }
97
98 /*********************** tlb specific functions ***************************/
99
100 static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
101 {
102         if (save_load) {
103                 entry->load_history = 1 + entry->tx_bytes /
104                                       BOND_TLB_REBALANCE_INTERVAL;
105                 entry->tx_bytes = 0;
106         }
107
108         entry->tx_slave = NULL;
109         entry->next = TLB_NULL_INDEX;
110         entry->prev = TLB_NULL_INDEX;
111 }
112
113 static inline void tlb_init_slave(struct slave *slave)
114 {
115         SLAVE_TLB_INFO(slave).load = 0;
116         SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
117 }
118
119 static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
120                          int save_load)
121 {
122         struct tlb_client_info *tx_hash_table;
123         u32 index;
124
125         /* clear slave from tx_hashtbl */
126         tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
127
128         /* skip this if we've already freed the tx hash table */
129         if (tx_hash_table) {
130                 index = SLAVE_TLB_INFO(slave).head;
131                 while (index != TLB_NULL_INDEX) {
132                         u32 next_index = tx_hash_table[index].next;
133                         tlb_init_table_entry(&tx_hash_table[index], save_load);
134                         index = next_index;
135                 }
136         }
137
138         tlb_init_slave(slave);
139 }
140
141 static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
142                          int save_load)
143 {
144         spin_lock_bh(&bond->mode_lock);
145         __tlb_clear_slave(bond, slave, save_load);
146         spin_unlock_bh(&bond->mode_lock);
147 }
148
149 /* Must be called before starting the monitor timer */
150 static int tlb_initialize(struct bonding *bond)
151 {
152         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
153         int size = TLB_HASH_TABLE_SIZE * sizeof(struct tlb_client_info);
154         struct tlb_client_info *new_hashtbl;
155         int i;
156
157         new_hashtbl = kzalloc(size, GFP_KERNEL);
158         if (!new_hashtbl)
159                 return -ENOMEM;
160
161         spin_lock_bh(&bond->mode_lock);
162
163         bond_info->tx_hashtbl = new_hashtbl;
164
165         for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
166                 tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
167
168         spin_unlock_bh(&bond->mode_lock);
169
170         return 0;
171 }
172
173 /* Must be called only after all slaves have been released */
174 static void tlb_deinitialize(struct bonding *bond)
175 {
176         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
177
178         spin_lock_bh(&bond->mode_lock);
179
180         kfree(bond_info->tx_hashtbl);
181         bond_info->tx_hashtbl = NULL;
182
183         spin_unlock_bh(&bond->mode_lock);
184 }
185
186 static long long compute_gap(struct slave *slave)
187 {
188         return (s64) (slave->speed << 20) - /* Convert to Megabit per sec */
189                (s64) (SLAVE_TLB_INFO(slave).load << 3); /* Bytes to bits */
190 }
191
192 static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
193 {
194         struct slave *slave, *least_loaded;
195         struct list_head *iter;
196         long long max_gap;
197
198         least_loaded = NULL;
199         max_gap = LLONG_MIN;
200
201         /* Find the slave with the largest gap */
202         bond_for_each_slave_rcu(bond, slave, iter) {
203                 if (bond_slave_can_tx(slave)) {
204                         long long gap = compute_gap(slave);
205
206                         if (max_gap < gap) {
207                                 least_loaded = slave;
208                                 max_gap = gap;
209                         }
210                 }
211         }
212
213         return least_loaded;
214 }
215
216 static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
217                                                 u32 skb_len)
218 {
219         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
220         struct tlb_client_info *hash_table;
221         struct slave *assigned_slave;
222
223         hash_table = bond_info->tx_hashtbl;
224         assigned_slave = hash_table[hash_index].tx_slave;
225         if (!assigned_slave) {
226                 assigned_slave = tlb_get_least_loaded_slave(bond);
227
228                 if (assigned_slave) {
229                         struct tlb_slave_info *slave_info =
230                                 &(SLAVE_TLB_INFO(assigned_slave));
231                         u32 next_index = slave_info->head;
232
233                         hash_table[hash_index].tx_slave = assigned_slave;
234                         hash_table[hash_index].next = next_index;
235                         hash_table[hash_index].prev = TLB_NULL_INDEX;
236
237                         if (next_index != TLB_NULL_INDEX)
238                                 hash_table[next_index].prev = hash_index;
239
240                         slave_info->head = hash_index;
241                         slave_info->load +=
242                                 hash_table[hash_index].load_history;
243                 }
244         }
245
246         if (assigned_slave)
247                 hash_table[hash_index].tx_bytes += skb_len;
248
249         return assigned_slave;
250 }
251
252 static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
253                                         u32 skb_len)
254 {
255         struct slave *tx_slave;
256
257         /* We don't need to disable softirq here, becase
258          * tlb_choose_channel() is only called by bond_alb_xmit()
259          * which already has softirq disabled.
260          */
261         spin_lock(&bond->mode_lock);
262         tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
263         spin_unlock(&bond->mode_lock);
264
265         return tx_slave;
266 }
267
268 /*********************** rlb specific functions ***************************/
269
270 /* when an ARP REPLY is received from a client update its info
271  * in the rx_hashtbl
272  */
273 static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
274 {
275         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
276         struct rlb_client_info *client_info;
277         u32 hash_index;
278
279         spin_lock_bh(&bond->mode_lock);
280
281         hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
282         client_info = &(bond_info->rx_hashtbl[hash_index]);
283
284         if ((client_info->assigned) &&
285             (client_info->ip_src == arp->ip_dst) &&
286             (client_info->ip_dst == arp->ip_src) &&
287             (!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
288                 /* update the clients MAC address */
289                 ether_addr_copy(client_info->mac_dst, arp->mac_src);
290                 client_info->ntt = 1;
291                 bond_info->rx_ntt = 1;
292         }
293
294         spin_unlock_bh(&bond->mode_lock);
295 }
296
297 static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
298                         struct slave *slave)
299 {
300         struct arp_pkt *arp, _arp;
301
302         if (skb->protocol != cpu_to_be16(ETH_P_ARP))
303                 goto out;
304
305         arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
306         if (!arp)
307                 goto out;
308
309         /* We received an ARP from arp->ip_src.
310          * We might have used this IP address previously (on the bonding host
311          * itself or on a system that is bridged together with the bond).
312          * However, if arp->mac_src is different than what is stored in
313          * rx_hashtbl, some other host is now using the IP and we must prevent
314          * sending out client updates with this IP address and the old MAC
315          * address.
316          * Clean up all hash table entries that have this address as ip_src but
317          * have a different mac_src.
318          */
319         rlb_purge_src_ip(bond, arp);
320
321         if (arp->op_code == htons(ARPOP_REPLY)) {
322                 /* update rx hash table for this ARP */
323                 rlb_update_entry_from_arp(bond, arp);
324                 netdev_dbg(bond->dev, "Server received an ARP Reply from client\n");
325         }
326 out:
327         return RX_HANDLER_ANOTHER;
328 }
329
330 /* Caller must hold rcu_read_lock() */
331 static struct slave *__rlb_next_rx_slave(struct bonding *bond)
332 {
333         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
334         struct slave *before = NULL, *rx_slave = NULL, *slave;
335         struct list_head *iter;
336         bool found = false;
337
338         bond_for_each_slave_rcu(bond, slave, iter) {
339                 if (!bond_slave_can_tx(slave))
340                         continue;
341                 if (!found) {
342                         if (!before || before->speed < slave->speed)
343                                 before = slave;
344                 } else {
345                         if (!rx_slave || rx_slave->speed < slave->speed)
346                                 rx_slave = slave;
347                 }
348                 if (slave == bond_info->rx_slave)
349                         found = true;
350         }
351         /* we didn't find anything after the current or we have something
352          * better before and up to the current slave
353          */
354         if (!rx_slave || (before && rx_slave->speed < before->speed))
355                 rx_slave = before;
356
357         if (rx_slave)
358                 bond_info->rx_slave = rx_slave;
359
360         return rx_slave;
361 }
362
363 /* Caller must hold RTNL, rcu_read_lock is obtained only to silence checkers */
364 static struct slave *rlb_next_rx_slave(struct bonding *bond)
365 {
366         struct slave *rx_slave;
367
368         ASSERT_RTNL();
369
370         rcu_read_lock();
371         rx_slave = __rlb_next_rx_slave(bond);
372         rcu_read_unlock();
373
374         return rx_slave;
375 }
376
377 /* teach the switch the mac of a disabled slave
378  * on the primary for fault tolerance
379  *
380  * Caller must hold RTNL
381  */
382 static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
383 {
384         struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
385
386         if (!curr_active)
387                 return;
388
389         if (!bond->alb_info.primary_is_promisc) {
390                 if (!dev_set_promiscuity(curr_active->dev, 1))
391                         bond->alb_info.primary_is_promisc = 1;
392                 else
393                         bond->alb_info.primary_is_promisc = 0;
394         }
395
396         bond->alb_info.rlb_promisc_timeout_counter = 0;
397
398         alb_send_learning_packets(curr_active, addr, true);
399 }
400
401 /* slave being removed should not be active at this point
402  *
403  * Caller must hold rtnl.
404  */
405 static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
406 {
407         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
408         struct rlb_client_info *rx_hash_table;
409         u32 index, next_index;
410
411         /* clear slave from rx_hashtbl */
412         spin_lock_bh(&bond->mode_lock);
413
414         rx_hash_table = bond_info->rx_hashtbl;
415         index = bond_info->rx_hashtbl_used_head;
416         for (; index != RLB_NULL_INDEX; index = next_index) {
417                 next_index = rx_hash_table[index].used_next;
418                 if (rx_hash_table[index].slave == slave) {
419                         struct slave *assigned_slave = rlb_next_rx_slave(bond);
420
421                         if (assigned_slave) {
422                                 rx_hash_table[index].slave = assigned_slave;
423                                 if (!ether_addr_equal_64bits(rx_hash_table[index].mac_dst,
424                                                              mac_bcast)) {
425                                         bond_info->rx_hashtbl[index].ntt = 1;
426                                         bond_info->rx_ntt = 1;
427                                         /* A slave has been removed from the
428                                          * table because it is either disabled
429                                          * or being released. We must retry the
430                                          * update to avoid clients from not
431                                          * being updated & disconnecting when
432                                          * there is stress
433                                          */
434                                         bond_info->rlb_update_retry_counter =
435                                                 RLB_UPDATE_RETRY;
436                                 }
437                         } else {  /* there is no active slave */
438                                 rx_hash_table[index].slave = NULL;
439                         }
440                 }
441         }
442
443         spin_unlock_bh(&bond->mode_lock);
444
445         if (slave != rtnl_dereference(bond->curr_active_slave))
446                 rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
447 }
448
449 static void rlb_update_client(struct rlb_client_info *client_info)
450 {
451         int i;
452
453         if (!client_info->slave)
454                 return;
455
456         for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
457                 struct sk_buff *skb;
458
459                 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
460                                  client_info->ip_dst,
461                                  client_info->slave->dev,
462                                  client_info->ip_src,
463                                  client_info->mac_dst,
464                                  client_info->slave->dev->dev_addr,
465                                  client_info->mac_dst);
466                 if (!skb) {
467                         netdev_err(client_info->slave->bond->dev,
468                                    "failed to create an ARP packet\n");
469                         continue;
470                 }
471
472                 skb->dev = client_info->slave->dev;
473
474                 if (client_info->vlan_id) {
475                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
476                                                client_info->vlan_id);
477                 }
478
479                 arp_xmit(skb);
480         }
481 }
482
483 /* sends ARP REPLIES that update the clients that need updating */
484 static void rlb_update_rx_clients(struct bonding *bond)
485 {
486         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
487         struct rlb_client_info *client_info;
488         u32 hash_index;
489
490         spin_lock_bh(&bond->mode_lock);
491
492         hash_index = bond_info->rx_hashtbl_used_head;
493         for (; hash_index != RLB_NULL_INDEX;
494              hash_index = client_info->used_next) {
495                 client_info = &(bond_info->rx_hashtbl[hash_index]);
496                 if (client_info->ntt) {
497                         rlb_update_client(client_info);
498                         if (bond_info->rlb_update_retry_counter == 0)
499                                 client_info->ntt = 0;
500                 }
501         }
502
503         /* do not update the entries again until this counter is zero so that
504          * not to confuse the clients.
505          */
506         bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
507
508         spin_unlock_bh(&bond->mode_lock);
509 }
510
511 /* The slave was assigned a new mac address - update the clients */
512 static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *slave)
513 {
514         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
515         struct rlb_client_info *client_info;
516         int ntt = 0;
517         u32 hash_index;
518
519         spin_lock_bh(&bond->mode_lock);
520
521         hash_index = bond_info->rx_hashtbl_used_head;
522         for (; hash_index != RLB_NULL_INDEX;
523              hash_index = client_info->used_next) {
524                 client_info = &(bond_info->rx_hashtbl[hash_index]);
525
526                 if ((client_info->slave == slave) &&
527                     !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
528                         client_info->ntt = 1;
529                         ntt = 1;
530                 }
531         }
532
533         /* update the team's flag only after the whole iteration */
534         if (ntt) {
535                 bond_info->rx_ntt = 1;
536                 /* fasten the change */
537                 bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
538         }
539
540         spin_unlock_bh(&bond->mode_lock);
541 }
542
543 /* mark all clients using src_ip to be updated */
544 static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
545 {
546         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
547         struct rlb_client_info *client_info;
548         u32 hash_index;
549
550         spin_lock(&bond->mode_lock);
551
552         hash_index = bond_info->rx_hashtbl_used_head;
553         for (; hash_index != RLB_NULL_INDEX;
554              hash_index = client_info->used_next) {
555                 client_info = &(bond_info->rx_hashtbl[hash_index]);
556
557                 if (!client_info->slave) {
558                         netdev_err(bond->dev, "found a client with no channel in the client's hash table\n");
559                         continue;
560                 }
561                 /* update all clients using this src_ip, that are not assigned
562                  * to the team's address (curr_active_slave) and have a known
563                  * unicast mac address.
564                  */
565                 if ((client_info->ip_src == src_ip) &&
566                     !ether_addr_equal_64bits(client_info->slave->dev->dev_addr,
567                                              bond->dev->dev_addr) &&
568                     !ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
569                         client_info->ntt = 1;
570                         bond_info->rx_ntt = 1;
571                 }
572         }
573
574         spin_unlock(&bond->mode_lock);
575 }
576
577 static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
578 {
579         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
580         struct arp_pkt *arp = arp_pkt(skb);
581         struct slave *assigned_slave, *curr_active_slave;
582         struct rlb_client_info *client_info;
583         u32 hash_index = 0;
584
585         spin_lock(&bond->mode_lock);
586
587         curr_active_slave = rcu_dereference(bond->curr_active_slave);
588
589         hash_index = _simple_hash((u8 *)&arp->ip_dst, sizeof(arp->ip_dst));
590         client_info = &(bond_info->rx_hashtbl[hash_index]);
591
592         if (client_info->assigned) {
593                 if ((client_info->ip_src == arp->ip_src) &&
594                     (client_info->ip_dst == arp->ip_dst)) {
595                         /* the entry is already assigned to this client */
596                         if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
597                                 /* update mac address from arp */
598                                 ether_addr_copy(client_info->mac_dst, arp->mac_dst);
599                         }
600                         ether_addr_copy(client_info->mac_src, arp->mac_src);
601
602                         assigned_slave = client_info->slave;
603                         if (assigned_slave) {
604                                 spin_unlock(&bond->mode_lock);
605                                 return assigned_slave;
606                         }
607                 } else {
608                         /* the entry is already assigned to some other client,
609                          * move the old client to primary (curr_active_slave) so
610                          * that the new client can be assigned to this entry.
611                          */
612                         if (curr_active_slave &&
613                             client_info->slave != curr_active_slave) {
614                                 client_info->slave = curr_active_slave;
615                                 rlb_update_client(client_info);
616                         }
617                 }
618         }
619         /* assign a new slave */
620         assigned_slave = __rlb_next_rx_slave(bond);
621
622         if (assigned_slave) {
623                 if (!(client_info->assigned &&
624                       client_info->ip_src == arp->ip_src)) {
625                         /* ip_src is going to be updated,
626                          * fix the src hash list
627                          */
628                         u32 hash_src = _simple_hash((u8 *)&arp->ip_src,
629                                                     sizeof(arp->ip_src));
630                         rlb_src_unlink(bond, hash_index);
631                         rlb_src_link(bond, hash_src, hash_index);
632                 }
633
634                 client_info->ip_src = arp->ip_src;
635                 client_info->ip_dst = arp->ip_dst;
636                 /* arp->mac_dst is broadcast for arp reqeusts.
637                  * will be updated with clients actual unicast mac address
638                  * upon receiving an arp reply.
639                  */
640                 ether_addr_copy(client_info->mac_dst, arp->mac_dst);
641                 ether_addr_copy(client_info->mac_src, arp->mac_src);
642                 client_info->slave = assigned_slave;
643
644                 if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
645                         client_info->ntt = 1;
646                         bond->alb_info.rx_ntt = 1;
647                 } else {
648                         client_info->ntt = 0;
649                 }
650
651                 if (vlan_get_tag(skb, &client_info->vlan_id))
652                         client_info->vlan_id = 0;
653
654                 if (!client_info->assigned) {
655                         u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
656                         bond_info->rx_hashtbl_used_head = hash_index;
657                         client_info->used_next = prev_tbl_head;
658                         if (prev_tbl_head != RLB_NULL_INDEX) {
659                                 bond_info->rx_hashtbl[prev_tbl_head].used_prev =
660                                         hash_index;
661                         }
662                         client_info->assigned = 1;
663                 }
664         }
665
666         spin_unlock(&bond->mode_lock);
667
668         return assigned_slave;
669 }
670
671 /* chooses (and returns) transmit channel for arp reply
672  * does not choose channel for other arp types since they are
673  * sent on the curr_active_slave
674  */
675 static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
676 {
677         struct arp_pkt *arp = arp_pkt(skb);
678         struct slave *tx_slave = NULL;
679
680         /* Don't modify or load balance ARPs that do not originate locally
681          * (e.g.,arrive via a bridge).
682          */
683         if (!bond_slave_has_mac_rx(bond, arp->mac_src))
684                 return NULL;
685
686         if (arp->op_code == htons(ARPOP_REPLY)) {
687                 /* the arp must be sent on the selected rx channel */
688                 tx_slave = rlb_choose_channel(skb, bond);
689                 if (tx_slave)
690                         ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
691                 netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
692         } else if (arp->op_code == htons(ARPOP_REQUEST)) {
693                 /* Create an entry in the rx_hashtbl for this client as a
694                  * place holder.
695                  * When the arp reply is received the entry will be updated
696                  * with the correct unicast address of the client.
697                  */
698                 rlb_choose_channel(skb, bond);
699
700                 /* The ARP reply packets must be delayed so that
701                  * they can cancel out the influence of the ARP request.
702                  */
703                 bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
704
705                 /* arp requests are broadcast and are sent on the primary
706                  * the arp request will collapse all clients on the subnet to
707                  * the primary slave. We must register these clients to be
708                  * updated with their assigned mac.
709                  */
710                 rlb_req_update_subnet_clients(bond, arp->ip_src);
711                 netdev_dbg(bond->dev, "Server sent ARP Request packet\n");
712         }
713
714         return tx_slave;
715 }
716
717 static void rlb_rebalance(struct bonding *bond)
718 {
719         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
720         struct slave *assigned_slave;
721         struct rlb_client_info *client_info;
722         int ntt;
723         u32 hash_index;
724
725         spin_lock_bh(&bond->mode_lock);
726
727         ntt = 0;
728         hash_index = bond_info->rx_hashtbl_used_head;
729         for (; hash_index != RLB_NULL_INDEX;
730              hash_index = client_info->used_next) {
731                 client_info = &(bond_info->rx_hashtbl[hash_index]);
732                 assigned_slave = __rlb_next_rx_slave(bond);
733                 if (assigned_slave && (client_info->slave != assigned_slave)) {
734                         client_info->slave = assigned_slave;
735                         client_info->ntt = 1;
736                         ntt = 1;
737                 }
738         }
739
740         /* update the team's flag only after the whole iteration */
741         if (ntt)
742                 bond_info->rx_ntt = 1;
743         spin_unlock_bh(&bond->mode_lock);
744 }
745
746 /* Caller must hold mode_lock */
747 static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
748 {
749         entry->used_next = RLB_NULL_INDEX;
750         entry->used_prev = RLB_NULL_INDEX;
751         entry->assigned = 0;
752         entry->slave = NULL;
753         entry->vlan_id = 0;
754 }
755 static void rlb_init_table_entry_src(struct rlb_client_info *entry)
756 {
757         entry->src_first = RLB_NULL_INDEX;
758         entry->src_prev = RLB_NULL_INDEX;
759         entry->src_next = RLB_NULL_INDEX;
760 }
761
762 static void rlb_init_table_entry(struct rlb_client_info *entry)
763 {
764         memset(entry, 0, sizeof(struct rlb_client_info));
765         rlb_init_table_entry_dst(entry);
766         rlb_init_table_entry_src(entry);
767 }
768
769 static void rlb_delete_table_entry_dst(struct bonding *bond, u32 index)
770 {
771         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
772         u32 next_index = bond_info->rx_hashtbl[index].used_next;
773         u32 prev_index = bond_info->rx_hashtbl[index].used_prev;
774
775         if (index == bond_info->rx_hashtbl_used_head)
776                 bond_info->rx_hashtbl_used_head = next_index;
777         if (prev_index != RLB_NULL_INDEX)
778                 bond_info->rx_hashtbl[prev_index].used_next = next_index;
779         if (next_index != RLB_NULL_INDEX)
780                 bond_info->rx_hashtbl[next_index].used_prev = prev_index;
781 }
782
783 /* unlink a rlb hash table entry from the src list */
784 static void rlb_src_unlink(struct bonding *bond, u32 index)
785 {
786         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
787         u32 next_index = bond_info->rx_hashtbl[index].src_next;
788         u32 prev_index = bond_info->rx_hashtbl[index].src_prev;
789
790         bond_info->rx_hashtbl[index].src_next = RLB_NULL_INDEX;
791         bond_info->rx_hashtbl[index].src_prev = RLB_NULL_INDEX;
792
793         if (next_index != RLB_NULL_INDEX)
794                 bond_info->rx_hashtbl[next_index].src_prev = prev_index;
795
796         if (prev_index == RLB_NULL_INDEX)
797                 return;
798
799         /* is prev_index pointing to the head of this list? */
800         if (bond_info->rx_hashtbl[prev_index].src_first == index)
801                 bond_info->rx_hashtbl[prev_index].src_first = next_index;
802         else
803                 bond_info->rx_hashtbl[prev_index].src_next = next_index;
804
805 }
806
807 static void rlb_delete_table_entry(struct bonding *bond, u32 index)
808 {
809         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
810         struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
811
812         rlb_delete_table_entry_dst(bond, index);
813         rlb_init_table_entry_dst(entry);
814
815         rlb_src_unlink(bond, index);
816 }
817
818 /* add the rx_hashtbl[ip_dst_hash] entry to the list
819  * of entries with identical ip_src_hash
820  */
821 static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
822 {
823         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
824         u32 next;
825
826         bond_info->rx_hashtbl[ip_dst_hash].src_prev = ip_src_hash;
827         next = bond_info->rx_hashtbl[ip_src_hash].src_first;
828         bond_info->rx_hashtbl[ip_dst_hash].src_next = next;
829         if (next != RLB_NULL_INDEX)
830                 bond_info->rx_hashtbl[next].src_prev = ip_dst_hash;
831         bond_info->rx_hashtbl[ip_src_hash].src_first = ip_dst_hash;
832 }
833
834 /* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
835  * not match arp->mac_src
836  */
837 static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
838 {
839         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
840         u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
841         u32 index;
842
843         spin_lock_bh(&bond->mode_lock);
844
845         index = bond_info->rx_hashtbl[ip_src_hash].src_first;
846         while (index != RLB_NULL_INDEX) {
847                 struct rlb_client_info *entry = &(bond_info->rx_hashtbl[index]);
848                 u32 next_index = entry->src_next;
849                 if (entry->ip_src == arp->ip_src &&
850                     !ether_addr_equal_64bits(arp->mac_src, entry->mac_src))
851                                 rlb_delete_table_entry(bond, index);
852                 index = next_index;
853         }
854         spin_unlock_bh(&bond->mode_lock);
855 }
856
857 static int rlb_initialize(struct bonding *bond)
858 {
859         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
860         struct rlb_client_info  *new_hashtbl;
861         int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
862         int i;
863
864         new_hashtbl = kmalloc(size, GFP_KERNEL);
865         if (!new_hashtbl)
866                 return -1;
867
868         spin_lock_bh(&bond->mode_lock);
869
870         bond_info->rx_hashtbl = new_hashtbl;
871
872         bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
873
874         for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
875                 rlb_init_table_entry(bond_info->rx_hashtbl + i);
876
877         spin_unlock_bh(&bond->mode_lock);
878
879         /* register to receive ARPs */
880         bond->recv_probe = rlb_arp_recv;
881
882         return 0;
883 }
884
885 static void rlb_deinitialize(struct bonding *bond)
886 {
887         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
888
889         spin_lock_bh(&bond->mode_lock);
890
891         kfree(bond_info->rx_hashtbl);
892         bond_info->rx_hashtbl = NULL;
893         bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
894
895         spin_unlock_bh(&bond->mode_lock);
896 }
897
898 static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
899 {
900         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
901         u32 curr_index;
902
903         spin_lock_bh(&bond->mode_lock);
904
905         curr_index = bond_info->rx_hashtbl_used_head;
906         while (curr_index != RLB_NULL_INDEX) {
907                 struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
908                 u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
909
910                 if (curr->vlan_id == vlan_id)
911                         rlb_delete_table_entry(bond, curr_index);
912
913                 curr_index = next_index;
914         }
915
916         spin_unlock_bh(&bond->mode_lock);
917 }
918
919 /*********************** tlb/rlb shared functions *********************/
920
921 static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
922                             __be16 vlan_proto, u16 vid)
923 {
924         struct learning_pkt pkt;
925         struct sk_buff *skb;
926         int size = sizeof(struct learning_pkt);
927         char *data;
928
929         memset(&pkt, 0, size);
930         ether_addr_copy(pkt.mac_dst, mac_addr);
931         ether_addr_copy(pkt.mac_src, mac_addr);
932         pkt.type = cpu_to_be16(ETH_P_LOOPBACK);
933
934         skb = dev_alloc_skb(size);
935         if (!skb)
936                 return;
937
938         data = skb_put(skb, size);
939         memcpy(data, &pkt, size);
940
941         skb_reset_mac_header(skb);
942         skb->network_header = skb->mac_header + ETH_HLEN;
943         skb->protocol = pkt.type;
944         skb->priority = TC_PRIO_CONTROL;
945         skb->dev = slave->dev;
946
947         if (vid)
948                 __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
949
950         dev_queue_xmit(skb);
951 }
952
953 struct alb_walk_data {
954         struct bonding *bond;
955         struct slave *slave;
956         u8 *mac_addr;
957         bool strict_match;
958 };
959
960 static int alb_upper_dev_walk(struct net_device *upper, void *_data)
961 {
962         struct alb_walk_data *data = _data;
963         bool strict_match = data->strict_match;
964         struct bonding *bond = data->bond;
965         struct slave *slave = data->slave;
966         u8 *mac_addr = data->mac_addr;
967         struct bond_vlan_tag *tags;
968
969         if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
970                 if (strict_match &&
971                     ether_addr_equal_64bits(mac_addr,
972                                             upper->dev_addr)) {
973                         alb_send_lp_vid(slave, mac_addr,
974                                         vlan_dev_vlan_proto(upper),
975                                         vlan_dev_vlan_id(upper));
976                 } else if (!strict_match) {
977                         alb_send_lp_vid(slave, upper->dev_addr,
978                                         vlan_dev_vlan_proto(upper),
979                                         vlan_dev_vlan_id(upper));
980                 }
981         }
982
983         /* If this is a macvlan device, then only send updates
984          * when strict_match is turned off.
985          */
986         if (netif_is_macvlan(upper) && !strict_match) {
987                 tags = bond_verify_device_path(bond->dev, upper, 0);
988                 if (IS_ERR_OR_NULL(tags))
989                         BUG();
990                 alb_send_lp_vid(slave, upper->dev_addr,
991                                 tags[0].vlan_proto, tags[0].vlan_id);
992                 kfree(tags);
993         }
994
995         return 0;
996 }
997
998 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
999                                       bool strict_match)
1000 {
1001         struct bonding *bond = bond_get_bond_by_slave(slave);
1002         struct alb_walk_data data = {
1003                 .strict_match = strict_match,
1004                 .mac_addr = mac_addr,
1005                 .slave = slave,
1006                 .bond = bond,
1007         };
1008
1009         /* send untagged */
1010         alb_send_lp_vid(slave, mac_addr, 0, 0);
1011
1012         /* loop through all devices and see if we need to send a packet
1013          * for that device.
1014          */
1015         rcu_read_lock();
1016         netdev_walk_all_upper_dev_rcu(bond->dev, alb_upper_dev_walk, &data);
1017         rcu_read_unlock();
1018 }
1019
1020 static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
1021 {
1022         struct net_device *dev = slave->dev;
1023         struct sockaddr s_addr;
1024
1025         if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
1026                 memcpy(dev->dev_addr, addr, dev->addr_len);
1027                 return 0;
1028         }
1029
1030         /* for rlb each slave must have a unique hw mac addresses so that
1031          * each slave will receive packets destined to a different mac
1032          */
1033         memcpy(s_addr.sa_data, addr, dev->addr_len);
1034         s_addr.sa_family = dev->type;
1035         if (dev_set_mac_address(dev, &s_addr)) {
1036                 netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
1037                            dev->name);
1038                 return -EOPNOTSUPP;
1039         }
1040         return 0;
1041 }
1042
1043 /* Swap MAC addresses between two slaves.
1044  *
1045  * Called with RTNL held, and no other locks.
1046  */
1047 static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
1048 {
1049         u8 tmp_mac_addr[ETH_ALEN];
1050
1051         ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
1052         alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
1053         alb_set_slave_mac_addr(slave2, tmp_mac_addr);
1054
1055 }
1056
1057 /* Send learning packets after MAC address swap.
1058  *
1059  * Called with RTNL and no other locks
1060  */
1061 static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1,
1062                                 struct slave *slave2)
1063 {
1064         int slaves_state_differ = (bond_slave_can_tx(slave1) != bond_slave_can_tx(slave2));
1065         struct slave *disabled_slave = NULL;
1066
1067         ASSERT_RTNL();
1068
1069         /* fasten the change in the switch */
1070         if (bond_slave_can_tx(slave1)) {
1071                 alb_send_learning_packets(slave1, slave1->dev->dev_addr, false);
1072                 if (bond->alb_info.rlb_enabled) {
1073                         /* inform the clients that the mac address
1074                          * has changed
1075                          */
1076                         rlb_req_update_slave_clients(bond, slave1);
1077                 }
1078         } else {
1079                 disabled_slave = slave1;
1080         }
1081
1082         if (bond_slave_can_tx(slave2)) {
1083                 alb_send_learning_packets(slave2, slave2->dev->dev_addr, false);
1084                 if (bond->alb_info.rlb_enabled) {
1085                         /* inform the clients that the mac address
1086                          * has changed
1087                          */
1088                         rlb_req_update_slave_clients(bond, slave2);
1089                 }
1090         } else {
1091                 disabled_slave = slave2;
1092         }
1093
1094         if (bond->alb_info.rlb_enabled && slaves_state_differ) {
1095                 /* A disabled slave was assigned an active mac addr */
1096                 rlb_teach_disabled_mac_on_primary(bond,
1097                                                   disabled_slave->dev->dev_addr);
1098         }
1099 }
1100
1101 /**
1102  * alb_change_hw_addr_on_detach
1103  * @bond: bonding we're working on
1104  * @slave: the slave that was just detached
1105  *
1106  * We assume that @slave was already detached from the slave list.
1107  *
1108  * If @slave's permanent hw address is different both from its current
1109  * address and from @bond's address, then somewhere in the bond there's
1110  * a slave that has @slave's permanet address as its current address.
1111  * We'll make sure that that slave no longer uses @slave's permanent address.
1112  *
1113  * Caller must hold RTNL and no other locks
1114  */
1115 static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *slave)
1116 {
1117         int perm_curr_diff;
1118         int perm_bond_diff;
1119         struct slave *found_slave;
1120
1121         perm_curr_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
1122                                                   slave->dev->dev_addr);
1123         perm_bond_diff = !ether_addr_equal_64bits(slave->perm_hwaddr,
1124                                                   bond->dev->dev_addr);
1125
1126         if (perm_curr_diff && perm_bond_diff) {
1127                 found_slave = bond_slave_has_mac(bond, slave->perm_hwaddr);
1128
1129                 if (found_slave) {
1130                         alb_swap_mac_addr(slave, found_slave);
1131                         alb_fasten_mac_swap(bond, slave, found_slave);
1132                 }
1133         }
1134 }
1135
1136 /**
1137  * alb_handle_addr_collision_on_attach
1138  * @bond: bonding we're working on
1139  * @slave: the slave that was just attached
1140  *
1141  * checks uniqueness of slave's mac address and handles the case the
1142  * new slave uses the bonds mac address.
1143  *
1144  * If the permanent hw address of @slave is @bond's hw address, we need to
1145  * find a different hw address to give @slave, that isn't in use by any other
1146  * slave in the bond. This address must be, of course, one of the permanent
1147  * addresses of the other slaves.
1148  *
1149  * We go over the slave list, and for each slave there we compare its
1150  * permanent hw address with the current address of all the other slaves.
1151  * If no match was found, then we've found a slave with a permanent address
1152  * that isn't used by any other slave in the bond, so we can assign it to
1153  * @slave.
1154  *
1155  * assumption: this function is called before @slave is attached to the
1156  *             bond slave list.
1157  */
1158 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
1159 {
1160         struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave);
1161         struct slave *tmp_slave1, *free_mac_slave = NULL;
1162         struct list_head *iter;
1163
1164         if (!bond_has_slaves(bond)) {
1165                 /* this is the first slave */
1166                 return 0;
1167         }
1168
1169         /* if slave's mac address differs from bond's mac address
1170          * check uniqueness of slave's mac address against the other
1171          * slaves in the bond.
1172          */
1173         if (!ether_addr_equal_64bits(slave->perm_hwaddr, bond->dev->dev_addr)) {
1174                 if (!bond_slave_has_mac(bond, slave->dev->dev_addr))
1175                         return 0;
1176
1177                 /* Try setting slave mac to bond address and fall-through
1178                  * to code handling that situation below...
1179                  */
1180                 alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
1181         }
1182
1183         /* The slave's address is equal to the address of the bond.
1184          * Search for a spare address in the bond for this slave.
1185          */
1186         bond_for_each_slave(bond, tmp_slave1, iter) {
1187                 if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
1188                         /* no slave has tmp_slave1's perm addr
1189                          * as its curr addr
1190                          */
1191                         free_mac_slave = tmp_slave1;
1192                         break;
1193                 }
1194
1195                 if (!has_bond_addr) {
1196                         if (ether_addr_equal_64bits(tmp_slave1->dev->dev_addr,
1197                                                     bond->dev->dev_addr)) {
1198
1199                                 has_bond_addr = tmp_slave1;
1200                         }
1201                 }
1202         }
1203
1204         if (free_mac_slave) {
1205                 alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
1206
1207                 netdev_warn(bond->dev, "the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
1208                             slave->dev->name, free_mac_slave->dev->name);
1209
1210         } else if (has_bond_addr) {
1211                 netdev_err(bond->dev, "the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
1212                            slave->dev->name);
1213                 return -EFAULT;
1214         }
1215
1216         return 0;
1217 }
1218
1219 /**
1220  * alb_set_mac_address
1221  * @bond:
1222  * @addr:
1223  *
1224  * In TLB mode all slaves are configured to the bond's hw address, but set
1225  * their dev_addr field to different addresses (based on their permanent hw
1226  * addresses).
1227  *
1228  * For each slave, this function sets the interface to the new address and then
1229  * changes its dev_addr field to its previous value.
1230  *
1231  * Unwinding assumes bond's mac address has not yet changed.
1232  */
1233 static int alb_set_mac_address(struct bonding *bond, void *addr)
1234 {
1235         struct slave *slave, *rollback_slave;
1236         struct list_head *iter;
1237         struct sockaddr sa;
1238         char tmp_addr[ETH_ALEN];
1239         int res;
1240
1241         if (bond->alb_info.rlb_enabled)
1242                 return 0;
1243
1244         bond_for_each_slave(bond, slave, iter) {
1245                 /* save net_device's current hw address */
1246                 ether_addr_copy(tmp_addr, slave->dev->dev_addr);
1247
1248                 res = dev_set_mac_address(slave->dev, addr);
1249
1250                 /* restore net_device's hw address */
1251                 ether_addr_copy(slave->dev->dev_addr, tmp_addr);
1252
1253                 if (res)
1254                         goto unwind;
1255         }
1256
1257         return 0;
1258
1259 unwind:
1260         memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
1261         sa.sa_family = bond->dev->type;
1262
1263         /* unwind from head to the slave that failed */
1264         bond_for_each_slave(bond, rollback_slave, iter) {
1265                 if (rollback_slave == slave)
1266                         break;
1267                 ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
1268                 dev_set_mac_address(rollback_slave->dev, &sa);
1269                 ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
1270         }
1271
1272         return res;
1273 }
1274
1275 /************************ exported alb funcions ************************/
1276
1277 int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
1278 {
1279         int res;
1280
1281         res = tlb_initialize(bond);
1282         if (res)
1283                 return res;
1284
1285         if (rlb_enabled) {
1286                 bond->alb_info.rlb_enabled = 1;
1287                 res = rlb_initialize(bond);
1288                 if (res) {
1289                         tlb_deinitialize(bond);
1290                         return res;
1291                 }
1292         } else {
1293                 bond->alb_info.rlb_enabled = 0;
1294         }
1295
1296         return 0;
1297 }
1298
1299 void bond_alb_deinitialize(struct bonding *bond)
1300 {
1301         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1302
1303         tlb_deinitialize(bond);
1304
1305         if (bond_info->rlb_enabled)
1306                 rlb_deinitialize(bond);
1307 }
1308
1309 static int bond_do_alb_xmit(struct sk_buff *skb, struct bonding *bond,
1310                             struct slave *tx_slave)
1311 {
1312         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1313         struct ethhdr *eth_data = eth_hdr(skb);
1314
1315         if (!tx_slave) {
1316                 /* unbalanced or unassigned, send through primary */
1317                 tx_slave = rcu_dereference(bond->curr_active_slave);
1318                 if (bond->params.tlb_dynamic_lb)
1319                         bond_info->unbalanced_load += skb->len;
1320         }
1321
1322         if (tx_slave && bond_slave_can_tx(tx_slave)) {
1323                 if (tx_slave != rcu_access_pointer(bond->curr_active_slave)) {
1324                         ether_addr_copy(eth_data->h_source,
1325                                         tx_slave->dev->dev_addr);
1326                 }
1327
1328                 bond_dev_queue_xmit(bond, skb, tx_slave->dev);
1329                 goto out;
1330         }
1331
1332         if (tx_slave && bond->params.tlb_dynamic_lb) {
1333                 spin_lock(&bond->mode_lock);
1334                 __tlb_clear_slave(bond, tx_slave, 0);
1335                 spin_unlock(&bond->mode_lock);
1336         }
1337
1338         /* no suitable interface, frame not sent */
1339         bond_tx_drop(bond->dev, skb);
1340 out:
1341         return NETDEV_TX_OK;
1342 }
1343
1344 int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1345 {
1346         struct bonding *bond = netdev_priv(bond_dev);
1347         struct ethhdr *eth_data;
1348         struct slave *tx_slave = NULL;
1349         u32 hash_index;
1350
1351         skb_reset_mac_header(skb);
1352         eth_data = eth_hdr(skb);
1353
1354         /* Do not TX balance any multicast or broadcast */
1355         if (!is_multicast_ether_addr(eth_data->h_dest)) {
1356                 switch (skb->protocol) {
1357                 case htons(ETH_P_IP):
1358                 case htons(ETH_P_IPX):
1359                     /* In case of IPX, it will falback to L2 hash */
1360                 case htons(ETH_P_IPV6):
1361                         hash_index = bond_xmit_hash(bond, skb);
1362                         if (bond->params.tlb_dynamic_lb) {
1363                                 tx_slave = tlb_choose_channel(bond,
1364                                                               hash_index & 0xFF,
1365                                                               skb->len);
1366                         } else {
1367                                 struct bond_up_slave *slaves;
1368                                 unsigned int count;
1369
1370                                 slaves = rcu_dereference(bond->slave_arr);
1371                                 count = slaves ? ACCESS_ONCE(slaves->count) : 0;
1372                                 if (likely(count))
1373                                         tx_slave = slaves->arr[hash_index %
1374                                                                count];
1375                         }
1376                         break;
1377                 }
1378         }
1379         return bond_do_alb_xmit(skb, bond, tx_slave);
1380 }
1381
1382 int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
1383 {
1384         struct bonding *bond = netdev_priv(bond_dev);
1385         struct ethhdr *eth_data;
1386         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1387         struct slave *tx_slave = NULL;
1388         static const __be32 ip_bcast = htonl(0xffffffff);
1389         int hash_size = 0;
1390         bool do_tx_balance = true;
1391         u32 hash_index = 0;
1392         const u8 *hash_start = NULL;
1393         struct ipv6hdr *ip6hdr;
1394
1395         skb_reset_mac_header(skb);
1396         eth_data = eth_hdr(skb);
1397
1398         switch (ntohs(skb->protocol)) {
1399         case ETH_P_IP: {
1400                 const struct iphdr *iph = ip_hdr(skb);
1401
1402                 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast) ||
1403                     (iph->daddr == ip_bcast) ||
1404                     (iph->protocol == IPPROTO_IGMP)) {
1405                         do_tx_balance = false;
1406                         break;
1407                 }
1408                 hash_start = (char *)&(iph->daddr);
1409                 hash_size = sizeof(iph->daddr);
1410         }
1411                 break;
1412         case ETH_P_IPV6:
1413                 /* IPv6 doesn't really use broadcast mac address, but leave
1414                  * that here just in case.
1415                  */
1416                 if (ether_addr_equal_64bits(eth_data->h_dest, mac_bcast)) {
1417                         do_tx_balance = false;
1418                         break;
1419                 }
1420
1421                 /* IPv6 uses all-nodes multicast as an equivalent to
1422                  * broadcasts in IPv4.
1423                  */
1424                 if (ether_addr_equal_64bits(eth_data->h_dest, mac_v6_allmcast)) {
1425                         do_tx_balance = false;
1426                         break;
1427                 }
1428
1429                 /* Additianally, DAD probes should not be tx-balanced as that
1430                  * will lead to false positives for duplicate addresses and
1431                  * prevent address configuration from working.
1432                  */
1433                 ip6hdr = ipv6_hdr(skb);
1434                 if (ipv6_addr_any(&ip6hdr->saddr)) {
1435                         do_tx_balance = false;
1436                         break;
1437                 }
1438
1439                 hash_start = (char *)&(ipv6_hdr(skb)->daddr);
1440                 hash_size = sizeof(ipv6_hdr(skb)->daddr);
1441                 break;
1442         case ETH_P_IPX:
1443                 if (ipx_hdr(skb)->ipx_checksum != IPX_NO_CHECKSUM) {
1444                         /* something is wrong with this packet */
1445                         do_tx_balance = false;
1446                         break;
1447                 }
1448
1449                 if (ipx_hdr(skb)->ipx_type != IPX_TYPE_NCP) {
1450                         /* The only protocol worth balancing in
1451                          * this family since it has an "ARP" like
1452                          * mechanism
1453                          */
1454                         do_tx_balance = false;
1455                         break;
1456                 }
1457
1458                 hash_start = (char *)eth_data->h_dest;
1459                 hash_size = ETH_ALEN;
1460                 break;
1461         case ETH_P_ARP:
1462                 do_tx_balance = false;
1463                 if (bond_info->rlb_enabled)
1464                         tx_slave = rlb_arp_xmit(skb, bond);
1465                 break;
1466         default:
1467                 do_tx_balance = false;
1468                 break;
1469         }
1470
1471         if (do_tx_balance) {
1472                 hash_index = _simple_hash(hash_start, hash_size);
1473                 tx_slave = tlb_choose_channel(bond, hash_index, skb->len);
1474         }
1475
1476         return bond_do_alb_xmit(skb, bond, tx_slave);
1477 }
1478
1479 void bond_alb_monitor(struct work_struct *work)
1480 {
1481         struct bonding *bond = container_of(work, struct bonding,
1482                                             alb_work.work);
1483         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1484         struct list_head *iter;
1485         struct slave *slave;
1486
1487         if (!bond_has_slaves(bond)) {
1488                 bond_info->tx_rebalance_counter = 0;
1489                 bond_info->lp_counter = 0;
1490                 goto re_arm;
1491         }
1492
1493         rcu_read_lock();
1494
1495         bond_info->tx_rebalance_counter++;
1496         bond_info->lp_counter++;
1497
1498         /* send learning packets */
1499         if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) {
1500                 bool strict_match;
1501
1502                 bond_for_each_slave_rcu(bond, slave, iter) {
1503                         /* If updating current_active, use all currently
1504                          * user mac addreses (!strict_match).  Otherwise, only
1505                          * use mac of the slave device.
1506                          * In RLB mode, we always use strict matches.
1507                          */
1508                         strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) ||
1509                                         bond_info->rlb_enabled);
1510                         alb_send_learning_packets(slave, slave->dev->dev_addr,
1511                                                   strict_match);
1512                 }
1513                 bond_info->lp_counter = 0;
1514         }
1515
1516         /* rebalance tx traffic */
1517         if (bond_info->tx_rebalance_counter >= BOND_TLB_REBALANCE_TICKS) {
1518                 bond_for_each_slave_rcu(bond, slave, iter) {
1519                         tlb_clear_slave(bond, slave, 1);
1520                         if (slave == rcu_access_pointer(bond->curr_active_slave)) {
1521                                 SLAVE_TLB_INFO(slave).load =
1522                                         bond_info->unbalanced_load /
1523                                                 BOND_TLB_REBALANCE_INTERVAL;
1524                                 bond_info->unbalanced_load = 0;
1525                         }
1526                 }
1527                 bond_info->tx_rebalance_counter = 0;
1528         }
1529
1530         if (bond_info->rlb_enabled) {
1531                 if (bond_info->primary_is_promisc &&
1532                     (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
1533
1534                         /* dev_set_promiscuity requires rtnl and
1535                          * nothing else.  Avoid race with bond_close.
1536                          */
1537                         rcu_read_unlock();
1538                         if (!rtnl_trylock())
1539                                 goto re_arm;
1540
1541                         bond_info->rlb_promisc_timeout_counter = 0;
1542
1543                         /* If the primary was set to promiscuous mode
1544                          * because a slave was disabled then
1545                          * it can now leave promiscuous mode.
1546                          */
1547                         dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev,
1548                                             -1);
1549                         bond_info->primary_is_promisc = 0;
1550
1551                         rtnl_unlock();
1552                         rcu_read_lock();
1553                 }
1554
1555                 if (bond_info->rlb_rebalance) {
1556                         bond_info->rlb_rebalance = 0;
1557                         rlb_rebalance(bond);
1558                 }
1559
1560                 /* check if clients need updating */
1561                 if (bond_info->rx_ntt) {
1562                         if (bond_info->rlb_update_delay_counter) {
1563                                 --bond_info->rlb_update_delay_counter;
1564                         } else {
1565                                 rlb_update_rx_clients(bond);
1566                                 if (bond_info->rlb_update_retry_counter)
1567                                         --bond_info->rlb_update_retry_counter;
1568                                 else
1569                                         bond_info->rx_ntt = 0;
1570                         }
1571                 }
1572         }
1573         rcu_read_unlock();
1574 re_arm:
1575         queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks);
1576 }
1577
1578 /* assumption: called before the slave is attached to the bond
1579  * and not locked by the bond lock
1580  */
1581 int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
1582 {
1583         int res;
1584
1585         res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
1586         if (res)
1587                 return res;
1588
1589         res = alb_handle_addr_collision_on_attach(bond, slave);
1590         if (res)
1591                 return res;
1592
1593         tlb_init_slave(slave);
1594
1595         /* order a rebalance ASAP */
1596         bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
1597
1598         if (bond->alb_info.rlb_enabled)
1599                 bond->alb_info.rlb_rebalance = 1;
1600
1601         return 0;
1602 }
1603
1604 /* Remove slave from tlb and rlb hash tables, and fix up MAC addresses
1605  * if necessary.
1606  *
1607  * Caller must hold RTNL and no other locks
1608  */
1609 void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
1610 {
1611         if (bond_has_slaves(bond))
1612                 alb_change_hw_addr_on_detach(bond, slave);
1613
1614         tlb_clear_slave(bond, slave, 0);
1615
1616         if (bond->alb_info.rlb_enabled) {
1617                 bond->alb_info.rx_slave = NULL;
1618                 rlb_clear_slave(bond, slave);
1619         }
1620
1621 }
1622
1623 void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char link)
1624 {
1625         struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1626
1627         if (link == BOND_LINK_DOWN) {
1628                 tlb_clear_slave(bond, slave, 0);
1629                 if (bond->alb_info.rlb_enabled)
1630                         rlb_clear_slave(bond, slave);
1631         } else if (link == BOND_LINK_UP) {
1632                 /* order a rebalance ASAP */
1633                 bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
1634                 if (bond->alb_info.rlb_enabled) {
1635                         bond->alb_info.rlb_rebalance = 1;
1636                         /* If the updelay module parameter is smaller than the
1637                          * forwarding delay of the switch the rebalance will
1638                          * not work because the rebalance arp replies will
1639                          * not be forwarded to the clients..
1640                          */
1641                 }
1642         }
1643
1644         if (bond_is_nondyn_tlb(bond)) {
1645                 if (bond_update_slave_arr(bond, NULL))
1646                         pr_err("Failed to build slave-array for TLB mode.\n");
1647         }
1648 }
1649
1650 /**
1651  * bond_alb_handle_active_change - assign new curr_active_slave
1652  * @bond: our bonding struct
1653  * @new_slave: new slave to assign
1654  *
1655  * Set the bond->curr_active_slave to @new_slave and handle
1656  * mac address swapping and promiscuity changes as needed.
1657  *
1658  * Caller must hold RTNL
1659  */
1660 void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave)
1661 {
1662         struct slave *swap_slave;
1663         struct slave *curr_active;
1664
1665         curr_active = rtnl_dereference(bond->curr_active_slave);
1666         if (curr_active == new_slave)
1667                 return;
1668
1669         if (curr_active && bond->alb_info.primary_is_promisc) {
1670                 dev_set_promiscuity(curr_active->dev, -1);
1671                 bond->alb_info.primary_is_promisc = 0;
1672                 bond->alb_info.rlb_promisc_timeout_counter = 0;
1673         }
1674
1675         swap_slave = curr_active;
1676         rcu_assign_pointer(bond->curr_active_slave, new_slave);
1677
1678         if (!new_slave || !bond_has_slaves(bond))
1679                 return;
1680
1681         /* set the new curr_active_slave to the bonds mac address
1682          * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
1683          */
1684         if (!swap_slave)
1685                 swap_slave = bond_slave_has_mac(bond, bond->dev->dev_addr);
1686
1687         /* Arrange for swap_slave and new_slave to temporarily be
1688          * ignored so we can mess with their MAC addresses without
1689          * fear of interference from transmit activity.
1690          */
1691         if (swap_slave)
1692                 tlb_clear_slave(bond, swap_slave, 1);
1693         tlb_clear_slave(bond, new_slave, 1);
1694
1695         /* in TLB mode, the slave might flip down/up with the old dev_addr,
1696          * and thus filter bond->dev_addr's packets, so force bond's mac
1697          */
1698         if (BOND_MODE(bond) == BOND_MODE_TLB) {
1699                 struct sockaddr sa;
1700                 u8 tmp_addr[ETH_ALEN];
1701
1702                 ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
1703
1704                 memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
1705                 sa.sa_family = bond->dev->type;
1706                 /* we don't care if it can't change its mac, best effort */
1707                 dev_set_mac_address(new_slave->dev, &sa);
1708
1709                 ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
1710         }
1711
1712         /* curr_active_slave must be set before calling alb_swap_mac_addr */
1713         if (swap_slave) {
1714                 /* swap mac address */
1715                 alb_swap_mac_addr(swap_slave, new_slave);
1716                 alb_fasten_mac_swap(bond, swap_slave, new_slave);
1717         } else {
1718                 /* set the new_slave to the bond mac address */
1719                 alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
1720                 alb_send_learning_packets(new_slave, bond->dev->dev_addr,
1721                                           false);
1722         }
1723 }
1724
1725 /* Called with RTNL */
1726 int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
1727 {
1728         struct bonding *bond = netdev_priv(bond_dev);
1729         struct sockaddr *sa = addr;
1730         struct slave *curr_active;
1731         struct slave *swap_slave;
1732         int res;
1733
1734         if (!is_valid_ether_addr(sa->sa_data))
1735                 return -EADDRNOTAVAIL;
1736
1737         res = alb_set_mac_address(bond, addr);
1738         if (res)
1739                 return res;
1740
1741         memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
1742
1743         /* If there is no curr_active_slave there is nothing else to do.
1744          * Otherwise we'll need to pass the new address to it and handle
1745          * duplications.
1746          */
1747         curr_active = rtnl_dereference(bond->curr_active_slave);
1748         if (!curr_active)
1749                 return 0;
1750
1751         swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
1752
1753         if (swap_slave) {
1754                 alb_swap_mac_addr(swap_slave, curr_active);
1755                 alb_fasten_mac_swap(bond, swap_slave, curr_active);
1756         } else {
1757                 alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr);
1758
1759                 alb_send_learning_packets(curr_active,
1760                                           bond_dev->dev_addr, false);
1761                 if (bond->alb_info.rlb_enabled) {
1762                         /* inform clients mac address has changed */
1763                         rlb_req_update_slave_clients(bond, curr_active);
1764                 }
1765         }
1766
1767         return 0;
1768 }
1769
1770 void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
1771 {
1772         if (bond->alb_info.rlb_enabled)
1773                 rlb_clear_vlan(bond, vlan_id);
1774 }
1775