]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/batman-adv/send.c
batman-adv: remove vis functionality
[karo-tx-linux.git] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "send.h"
23 #include "routing.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "gateway_common.h"
28 #include "originator.h"
29 #include "network-coding.h"
30
31 #include <linux/if_ether.h>
32
33 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
34
35 /* send out an already prepared packet to the given address via the
36  * specified batman interface
37  */
38 int batadv_send_skb_packet(struct sk_buff *skb,
39                            struct batadv_hard_iface *hard_iface,
40                            const uint8_t *dst_addr)
41 {
42         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
43         struct ethhdr *ethhdr;
44
45         if (hard_iface->if_status != BATADV_IF_ACTIVE)
46                 goto send_skb_err;
47
48         if (unlikely(!hard_iface->net_dev))
49                 goto send_skb_err;
50
51         if (!(hard_iface->net_dev->flags & IFF_UP)) {
52                 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
53                         hard_iface->net_dev->name);
54                 goto send_skb_err;
55         }
56
57         /* push to the ethernet header. */
58         if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
59                 goto send_skb_err;
60
61         skb_reset_mac_header(skb);
62
63         ethhdr = eth_hdr(skb);
64         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
65         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
66         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
67
68         skb_set_network_header(skb, ETH_HLEN);
69         skb->protocol = __constant_htons(ETH_P_BATMAN);
70
71         skb->dev = hard_iface->net_dev;
72
73         /* Save a clone of the skb to use when decoding coded packets */
74         batadv_nc_skb_store_for_decoding(bat_priv, skb);
75
76         /* dev_queue_xmit() returns a negative result on error.  However on
77          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
78          * (which is > 0). This will not be treated as an error.
79          */
80         return dev_queue_xmit(skb);
81 send_skb_err:
82         kfree_skb(skb);
83         return NET_XMIT_DROP;
84 }
85
86 /**
87  * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
88  * @skb: Packet to be transmitted.
89  * @orig_node: Final destination of the packet.
90  * @recv_if: Interface used when receiving the packet (can be NULL).
91  *
92  * Looks up the best next-hop towards the passed originator and passes the
93  * skb on for preparation of MAC header. If the packet originated from this
94  * host, NULL can be passed as recv_if and no interface alternating is
95  * attempted.
96  *
97  * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
98  * NET_XMIT_POLICED if the skb is buffered for later transmit.
99  */
100 int batadv_send_skb_to_orig(struct sk_buff *skb,
101                             struct batadv_orig_node *orig_node,
102                             struct batadv_hard_iface *recv_if)
103 {
104         struct batadv_priv *bat_priv = orig_node->bat_priv;
105         struct batadv_neigh_node *neigh_node;
106         int ret = NET_XMIT_DROP;
107
108         /* batadv_find_router() increases neigh_nodes refcount if found. */
109         neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
110         if (!neigh_node)
111                 return ret;
112
113         /* try to network code the packet, if it is received on an interface
114          * (i.e. being forwarded). If the packet originates from this node or if
115          * network coding fails, then send the packet as usual.
116          */
117         if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
118                 ret = NET_XMIT_POLICED;
119         } else {
120                 batadv_send_skb_packet(skb, neigh_node->if_incoming,
121                                        neigh_node->addr);
122                 ret = NET_XMIT_SUCCESS;
123         }
124
125         batadv_neigh_node_free_ref(neigh_node);
126
127         return ret;
128 }
129
130 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
131 {
132         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
133
134         if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
135             (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
136                 return;
137
138         /* the interface gets activated here to avoid race conditions between
139          * the moment of activating the interface in
140          * hardif_activate_interface() where the originator mac is set and
141          * outdated packets (especially uninitialized mac addresses) in the
142          * packet queue
143          */
144         if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
145                 hard_iface->if_status = BATADV_IF_ACTIVE;
146
147         bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
148 }
149
150 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
151 {
152         if (forw_packet->skb)
153                 kfree_skb(forw_packet->skb);
154         if (forw_packet->if_incoming)
155                 batadv_hardif_free_ref(forw_packet->if_incoming);
156         kfree(forw_packet);
157 }
158
159 static void
160 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
161                                  struct batadv_forw_packet *forw_packet,
162                                  unsigned long send_time)
163 {
164         /* add new packet to packet list */
165         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
166         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
167         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
168
169         /* start timer for this packet */
170         queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
171                            send_time);
172 }
173
174 /* add a broadcast packet to the queue and setup timers. broadcast packets
175  * are sent multiple times to increase probability for being received.
176  *
177  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
178  * errors.
179  *
180  * The skb is not consumed, so the caller should make sure that the
181  * skb is freed.
182  */
183 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
184                                     const struct sk_buff *skb,
185                                     unsigned long delay)
186 {
187         struct batadv_hard_iface *primary_if = NULL;
188         struct batadv_forw_packet *forw_packet;
189         struct batadv_bcast_packet *bcast_packet;
190         struct sk_buff *newskb;
191
192         if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
193                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
194                            "bcast packet queue full\n");
195                 goto out;
196         }
197
198         primary_if = batadv_primary_if_get_selected(bat_priv);
199         if (!primary_if)
200                 goto out_and_inc;
201
202         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
203
204         if (!forw_packet)
205                 goto out_and_inc;
206
207         newskb = skb_copy(skb, GFP_ATOMIC);
208         if (!newskb)
209                 goto packet_free;
210
211         /* as we have a copy now, it is safe to decrease the TTL */
212         bcast_packet = (struct batadv_bcast_packet *)newskb->data;
213         bcast_packet->header.ttl--;
214
215         skb_reset_mac_header(newskb);
216
217         forw_packet->skb = newskb;
218         forw_packet->if_incoming = primary_if;
219
220         /* how often did we send the bcast packet ? */
221         forw_packet->num_packets = 0;
222
223         INIT_DELAYED_WORK(&forw_packet->delayed_work,
224                           batadv_send_outstanding_bcast_packet);
225
226         _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
227         return NETDEV_TX_OK;
228
229 packet_free:
230         kfree(forw_packet);
231 out_and_inc:
232         atomic_inc(&bat_priv->bcast_queue_left);
233 out:
234         if (primary_if)
235                 batadv_hardif_free_ref(primary_if);
236         return NETDEV_TX_BUSY;
237 }
238
239 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
240 {
241         struct batadv_hard_iface *hard_iface;
242         struct delayed_work *delayed_work;
243         struct batadv_forw_packet *forw_packet;
244         struct sk_buff *skb1;
245         struct net_device *soft_iface;
246         struct batadv_priv *bat_priv;
247
248         delayed_work = container_of(work, struct delayed_work, work);
249         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
250                                    delayed_work);
251         soft_iface = forw_packet->if_incoming->soft_iface;
252         bat_priv = netdev_priv(soft_iface);
253
254         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
255         hlist_del(&forw_packet->list);
256         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
257
258         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
259                 goto out;
260
261         if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
262                 goto out;
263
264         /* rebroadcast packet */
265         rcu_read_lock();
266         list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
267                 if (hard_iface->soft_iface != soft_iface)
268                         continue;
269
270                 if (forw_packet->num_packets >= hard_iface->num_bcasts)
271                         continue;
272
273                 /* send a copy of the saved skb */
274                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
275                 if (skb1)
276                         batadv_send_skb_packet(skb1, hard_iface,
277                                                batadv_broadcast_addr);
278         }
279         rcu_read_unlock();
280
281         forw_packet->num_packets++;
282
283         /* if we still have some more bcasts to send */
284         if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
285                 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
286                                                  msecs_to_jiffies(5));
287                 return;
288         }
289
290 out:
291         batadv_forw_packet_free(forw_packet);
292         atomic_inc(&bat_priv->bcast_queue_left);
293 }
294
295 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
296 {
297         struct delayed_work *delayed_work;
298         struct batadv_forw_packet *forw_packet;
299         struct batadv_priv *bat_priv;
300
301         delayed_work = container_of(work, struct delayed_work, work);
302         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
303                                    delayed_work);
304         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
305         spin_lock_bh(&bat_priv->forw_bat_list_lock);
306         hlist_del(&forw_packet->list);
307         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
308
309         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
310                 goto out;
311
312         bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
313
314         /* we have to have at least one packet in the queue
315          * to determine the queues wake up time unless we are
316          * shutting down
317          */
318         if (forw_packet->own)
319                 batadv_schedule_bat_ogm(forw_packet->if_incoming);
320
321 out:
322         /* don't count own packet */
323         if (!forw_packet->own)
324                 atomic_inc(&bat_priv->batman_queue_left);
325
326         batadv_forw_packet_free(forw_packet);
327 }
328
329 void
330 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
331                                  const struct batadv_hard_iface *hard_iface)
332 {
333         struct batadv_forw_packet *forw_packet;
334         struct hlist_node *safe_tmp_node;
335         bool pending;
336
337         if (hard_iface)
338                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
339                            "purge_outstanding_packets(): %s\n",
340                            hard_iface->net_dev->name);
341         else
342                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
343                            "purge_outstanding_packets()\n");
344
345         /* free bcast list */
346         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
347         hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
348                                   &bat_priv->forw_bcast_list, list) {
349                 /* if purge_outstanding_packets() was called with an argument
350                  * we delete only packets belonging to the given interface
351                  */
352                 if ((hard_iface) &&
353                     (forw_packet->if_incoming != hard_iface))
354                         continue;
355
356                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
357
358                 /* batadv_send_outstanding_bcast_packet() will lock the list to
359                  * delete the item from the list
360                  */
361                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
362                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
363
364                 if (pending) {
365                         hlist_del(&forw_packet->list);
366                         batadv_forw_packet_free(forw_packet);
367                 }
368         }
369         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
370
371         /* free batman packet list */
372         spin_lock_bh(&bat_priv->forw_bat_list_lock);
373         hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
374                                   &bat_priv->forw_bat_list, list) {
375                 /* if purge_outstanding_packets() was called with an argument
376                  * we delete only packets belonging to the given interface
377                  */
378                 if ((hard_iface) &&
379                     (forw_packet->if_incoming != hard_iface))
380                         continue;
381
382                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
383
384                 /* send_outstanding_bat_packet() will lock the list to
385                  * delete the item from the list
386                  */
387                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
388                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
389
390                 if (pending) {
391                         hlist_del(&forw_packet->list);
392                         batadv_forw_packet_free(forw_packet);
393                 }
394         }
395         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
396 }