]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/batman-adv/send.c
Merge branch 'drm-tda998x-3.12-fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-cubox...
[karo-tx-linux.git] / net / batman-adv / send.c
1 /* Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
2  *
3  * Marek Lindner, Simon Wunderlich
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of version 2 of the GNU General Public
7  * License as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but
10  * WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17  * 02110-1301, USA
18  */
19
20 #include "main.h"
21 #include "distributed-arp-table.h"
22 #include "send.h"
23 #include "routing.h"
24 #include "translation-table.h"
25 #include "soft-interface.h"
26 #include "hard-interface.h"
27 #include "vis.h"
28 #include "gateway_common.h"
29 #include "originator.h"
30 #include "network-coding.h"
31
32 #include <linux/if_ether.h>
33
34 static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
35
36 /* send out an already prepared packet to the given address via the
37  * specified batman interface
38  */
39 int batadv_send_skb_packet(struct sk_buff *skb,
40                            struct batadv_hard_iface *hard_iface,
41                            const uint8_t *dst_addr)
42 {
43         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
44         struct ethhdr *ethhdr;
45
46         if (hard_iface->if_status != BATADV_IF_ACTIVE)
47                 goto send_skb_err;
48
49         if (unlikely(!hard_iface->net_dev))
50                 goto send_skb_err;
51
52         if (!(hard_iface->net_dev->flags & IFF_UP)) {
53                 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
54                         hard_iface->net_dev->name);
55                 goto send_skb_err;
56         }
57
58         /* push to the ethernet header. */
59         if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
60                 goto send_skb_err;
61
62         skb_reset_mac_header(skb);
63
64         ethhdr = eth_hdr(skb);
65         memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
66         memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
67         ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
68
69         skb_set_network_header(skb, ETH_HLEN);
70         skb->protocol = __constant_htons(ETH_P_BATMAN);
71
72         skb->dev = hard_iface->net_dev;
73
74         /* Save a clone of the skb to use when decoding coded packets */
75         batadv_nc_skb_store_for_decoding(bat_priv, skb);
76
77         /* dev_queue_xmit() returns a negative result on error.  However on
78          * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
79          * (which is > 0). This will not be treated as an error.
80          */
81         return dev_queue_xmit(skb);
82 send_skb_err:
83         kfree_skb(skb);
84         return NET_XMIT_DROP;
85 }
86
87 /**
88  * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
89  * @skb: Packet to be transmitted.
90  * @orig_node: Final destination of the packet.
91  * @recv_if: Interface used when receiving the packet (can be NULL).
92  *
93  * Looks up the best next-hop towards the passed originator and passes the
94  * skb on for preparation of MAC header. If the packet originated from this
95  * host, NULL can be passed as recv_if and no interface alternating is
96  * attempted.
97  *
98  * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
99  * NET_XMIT_POLICED if the skb is buffered for later transmit.
100  */
101 int batadv_send_skb_to_orig(struct sk_buff *skb,
102                             struct batadv_orig_node *orig_node,
103                             struct batadv_hard_iface *recv_if)
104 {
105         struct batadv_priv *bat_priv = orig_node->bat_priv;
106         struct batadv_neigh_node *neigh_node;
107         int ret = NET_XMIT_DROP;
108
109         /* batadv_find_router() increases neigh_nodes refcount if found. */
110         neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
111         if (!neigh_node)
112                 return ret;
113
114         /* try to network code the packet, if it is received on an interface
115          * (i.e. being forwarded). If the packet originates from this node or if
116          * network coding fails, then send the packet as usual.
117          */
118         if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
119                 ret = NET_XMIT_POLICED;
120         } else {
121                 batadv_send_skb_packet(skb, neigh_node->if_incoming,
122                                        neigh_node->addr);
123                 ret = NET_XMIT_SUCCESS;
124         }
125
126         batadv_neigh_node_free_ref(neigh_node);
127
128         return ret;
129 }
130
131 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
132 {
133         struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
134
135         if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
136             (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
137                 return;
138
139         /* the interface gets activated here to avoid race conditions between
140          * the moment of activating the interface in
141          * hardif_activate_interface() where the originator mac is set and
142          * outdated packets (especially uninitialized mac addresses) in the
143          * packet queue
144          */
145         if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
146                 hard_iface->if_status = BATADV_IF_ACTIVE;
147
148         bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
149 }
150
151 static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
152 {
153         if (forw_packet->skb)
154                 kfree_skb(forw_packet->skb);
155         if (forw_packet->if_incoming)
156                 batadv_hardif_free_ref(forw_packet->if_incoming);
157         kfree(forw_packet);
158 }
159
160 static void
161 _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
162                                  struct batadv_forw_packet *forw_packet,
163                                  unsigned long send_time)
164 {
165         /* add new packet to packet list */
166         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
167         hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
168         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
169
170         /* start timer for this packet */
171         queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
172                            send_time);
173 }
174
175 /* add a broadcast packet to the queue and setup timers. broadcast packets
176  * are sent multiple times to increase probability for being received.
177  *
178  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
179  * errors.
180  *
181  * The skb is not consumed, so the caller should make sure that the
182  * skb is freed.
183  */
184 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
185                                     const struct sk_buff *skb,
186                                     unsigned long delay)
187 {
188         struct batadv_hard_iface *primary_if = NULL;
189         struct batadv_forw_packet *forw_packet;
190         struct batadv_bcast_packet *bcast_packet;
191         struct sk_buff *newskb;
192
193         if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
194                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
195                            "bcast packet queue full\n");
196                 goto out;
197         }
198
199         primary_if = batadv_primary_if_get_selected(bat_priv);
200         if (!primary_if)
201                 goto out_and_inc;
202
203         forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
204
205         if (!forw_packet)
206                 goto out_and_inc;
207
208         newskb = skb_copy(skb, GFP_ATOMIC);
209         if (!newskb)
210                 goto packet_free;
211
212         /* as we have a copy now, it is safe to decrease the TTL */
213         bcast_packet = (struct batadv_bcast_packet *)newskb->data;
214         bcast_packet->header.ttl--;
215
216         skb_reset_mac_header(newskb);
217
218         forw_packet->skb = newskb;
219         forw_packet->if_incoming = primary_if;
220
221         /* how often did we send the bcast packet ? */
222         forw_packet->num_packets = 0;
223
224         INIT_DELAYED_WORK(&forw_packet->delayed_work,
225                           batadv_send_outstanding_bcast_packet);
226
227         _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
228         return NETDEV_TX_OK;
229
230 packet_free:
231         kfree(forw_packet);
232 out_and_inc:
233         atomic_inc(&bat_priv->bcast_queue_left);
234 out:
235         if (primary_if)
236                 batadv_hardif_free_ref(primary_if);
237         return NETDEV_TX_BUSY;
238 }
239
240 static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
241 {
242         struct batadv_hard_iface *hard_iface;
243         struct delayed_work *delayed_work;
244         struct batadv_forw_packet *forw_packet;
245         struct sk_buff *skb1;
246         struct net_device *soft_iface;
247         struct batadv_priv *bat_priv;
248
249         delayed_work = container_of(work, struct delayed_work, work);
250         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
251                                    delayed_work);
252         soft_iface = forw_packet->if_incoming->soft_iface;
253         bat_priv = netdev_priv(soft_iface);
254
255         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
256         hlist_del(&forw_packet->list);
257         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
258
259         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
260                 goto out;
261
262         if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
263                 goto out;
264
265         /* rebroadcast packet */
266         rcu_read_lock();
267         list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
268                 if (hard_iface->soft_iface != soft_iface)
269                         continue;
270
271                 if (forw_packet->num_packets >= hard_iface->num_bcasts)
272                         continue;
273
274                 /* send a copy of the saved skb */
275                 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
276                 if (skb1)
277                         batadv_send_skb_packet(skb1, hard_iface,
278                                                batadv_broadcast_addr);
279         }
280         rcu_read_unlock();
281
282         forw_packet->num_packets++;
283
284         /* if we still have some more bcasts to send */
285         if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
286                 _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
287                                                  msecs_to_jiffies(5));
288                 return;
289         }
290
291 out:
292         batadv_forw_packet_free(forw_packet);
293         atomic_inc(&bat_priv->bcast_queue_left);
294 }
295
296 void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
297 {
298         struct delayed_work *delayed_work;
299         struct batadv_forw_packet *forw_packet;
300         struct batadv_priv *bat_priv;
301
302         delayed_work = container_of(work, struct delayed_work, work);
303         forw_packet = container_of(delayed_work, struct batadv_forw_packet,
304                                    delayed_work);
305         bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
306         spin_lock_bh(&bat_priv->forw_bat_list_lock);
307         hlist_del(&forw_packet->list);
308         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
309
310         if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
311                 goto out;
312
313         bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
314
315         /* we have to have at least one packet in the queue
316          * to determine the queues wake up time unless we are
317          * shutting down
318          */
319         if (forw_packet->own)
320                 batadv_schedule_bat_ogm(forw_packet->if_incoming);
321
322 out:
323         /* don't count own packet */
324         if (!forw_packet->own)
325                 atomic_inc(&bat_priv->batman_queue_left);
326
327         batadv_forw_packet_free(forw_packet);
328 }
329
330 void
331 batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
332                                  const struct batadv_hard_iface *hard_iface)
333 {
334         struct batadv_forw_packet *forw_packet;
335         struct hlist_node *safe_tmp_node;
336         bool pending;
337
338         if (hard_iface)
339                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
340                            "purge_outstanding_packets(): %s\n",
341                            hard_iface->net_dev->name);
342         else
343                 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
344                            "purge_outstanding_packets()\n");
345
346         /* free bcast list */
347         spin_lock_bh(&bat_priv->forw_bcast_list_lock);
348         hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
349                                   &bat_priv->forw_bcast_list, list) {
350                 /* if purge_outstanding_packets() was called with an argument
351                  * we delete only packets belonging to the given interface
352                  */
353                 if ((hard_iface) &&
354                     (forw_packet->if_incoming != hard_iface))
355                         continue;
356
357                 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
358
359                 /* batadv_send_outstanding_bcast_packet() will lock the list to
360                  * delete the item from the list
361                  */
362                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
363                 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
364
365                 if (pending) {
366                         hlist_del(&forw_packet->list);
367                         batadv_forw_packet_free(forw_packet);
368                 }
369         }
370         spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
371
372         /* free batman packet list */
373         spin_lock_bh(&bat_priv->forw_bat_list_lock);
374         hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
375                                   &bat_priv->forw_bat_list, list) {
376                 /* if purge_outstanding_packets() was called with an argument
377                  * we delete only packets belonging to the given interface
378                  */
379                 if ((hard_iface) &&
380                     (forw_packet->if_incoming != hard_iface))
381                         continue;
382
383                 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
384
385                 /* send_outstanding_bat_packet() will lock the list to
386                  * delete the item from the list
387                  */
388                 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
389                 spin_lock_bh(&bat_priv->forw_bat_list_lock);
390
391                 if (pending) {
392                         hlist_del(&forw_packet->list);
393                         batadv_forw_packet_free(forw_packet);
394                 }
395         }
396         spin_unlock_bh(&bat_priv->forw_bat_list_lock);
397 }