2 * Copyright (C) 2007-2009 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 #include "translation-table.h"
27 #include "hard-interface.h"
30 #include "aggregation.h"
34 /* apply hop penalty for a normal link */
35 static uint8_t hop_penalty(const uint8_t tq)
37 return (tq * (TQ_MAX_VALUE - TQ_HOP_PENALTY)) / (TQ_MAX_VALUE);
40 /* when do we schedule our own packet to be sent */
41 static unsigned long own_send_time(void)
44 (((atomic_read(&originator_interval) - JITTER +
45 (random32() % 2*JITTER)) * HZ) / 1000);
48 /* when do we schedule a forwarded packet to be sent */
49 static unsigned long forward_send_time(void)
51 unsigned long send_time = jiffies; /* Starting now plus... */
53 if (atomic_read(&aggregation_enabled))
54 send_time += (((MAX_AGGREGATION_MS - (JITTER/2) +
55 (random32() % JITTER)) * HZ) / 1000);
57 send_time += (((random32() % (JITTER/2)) * HZ) / 1000);
62 /* sends a raw packet. */
63 void send_raw_packet(unsigned char *pack_buff, int pack_buff_len,
64 struct batman_if *batman_if, uint8_t *dst_addr)
66 struct ethhdr *ethhdr;
71 if (batman_if->if_active != IF_ACTIVE)
74 if (!(batman_if->net_dev->flags & IFF_UP)) {
75 debug_log(LOG_TYPE_WARN,
76 "Interface %s is not up - can't send packet via that interface (IF_TO_BE_DEACTIVATED was here) !\n",
81 skb = dev_alloc_skb(pack_buff_len + sizeof(struct ethhdr));
84 data = skb_put(skb, pack_buff_len + sizeof(struct ethhdr));
86 memcpy(data + sizeof(struct ethhdr), pack_buff, pack_buff_len);
88 ethhdr = (struct ethhdr *) data;
89 memcpy(ethhdr->h_source, batman_if->net_dev->dev_addr, ETH_ALEN);
90 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
91 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
93 skb_reset_mac_header(skb);
94 skb_set_network_header(skb, ETH_HLEN);
95 skb->priority = TC_PRIO_CONTROL;
96 skb->protocol = __constant_htons(ETH_P_BATMAN);
97 skb->dev = batman_if->net_dev;
99 /* dev_queue_xmit() returns a negative result on error. However on
100 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
101 * (which is > 0). This will not be treated as an error. */
102 retval = dev_queue_xmit(skb);
104 debug_log(LOG_TYPE_CRIT,
105 "Can't write to raw socket (IF_TO_BE_DEACTIVATED was here): %i\n",
109 /* Send a packet to a given interface */
110 static void send_packet_to_if(struct forw_packet *forw_packet,
111 struct batman_if *batman_if)
116 struct batman_packet *batman_packet;
117 char orig_str[ETH_STR_LEN];
119 if (batman_if->if_active != IF_ACTIVE)
122 packet_num = buff_pos = 0;
123 batman_packet = (struct batman_packet *)
124 (forw_packet->packet_buff);
126 /* adjust all flags and log packets */
127 while (aggregated_packet(buff_pos,
128 forw_packet->packet_len,
129 batman_packet->num_hna)) {
131 /* we might have aggregated direct link packets with an
132 * ordinary base packet */
133 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
134 (forw_packet->if_incoming == batman_if))
135 batman_packet->flags |= DIRECTLINK;
137 batman_packet->flags &= ~DIRECTLINK;
139 addr_to_string(orig_str, batman_packet->orig);
140 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
143 debug_log(LOG_TYPE_BATMAN,
144 "%s %spacket (originator %s, seqno %d, TQ %d, TTL %d, IDF %s) on interface %s [%s]\n",
146 (packet_num > 0 ? "aggregated " : ""),
147 orig_str, ntohs(batman_packet->seqno),
148 batman_packet->tq, batman_packet->ttl,
149 (batman_packet->flags & DIRECTLINK ?
151 batman_if->dev, batman_if->addr_str);
153 buff_pos += sizeof(struct batman_packet) +
154 (batman_packet->num_hna * ETH_ALEN);
156 batman_packet = (struct batman_packet *)
157 (forw_packet->packet_buff + buff_pos);
160 send_raw_packet(forw_packet->packet_buff,
161 forw_packet->packet_len,
162 batman_if, broadcastAddr);
165 /* send a batman packet */
166 static void send_packet(struct forw_packet *forw_packet)
168 struct batman_if *batman_if;
169 struct batman_packet *batman_packet =
170 (struct batman_packet *)(forw_packet->packet_buff);
171 char orig_str[ETH_STR_LEN];
172 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
174 if (!forw_packet->if_incoming) {
175 debug_log(LOG_TYPE_CRIT,
176 "Error - can't forward packet: incoming iface not specified\n");
180 if (forw_packet->if_incoming->if_active != IF_ACTIVE)
183 addr_to_string(orig_str, batman_packet->orig);
185 /* multihomed peer assumed */
186 /* non-primary OGMs are only broadcasted on their interface */
187 if ((directlink && (batman_packet->ttl == 1)) ||
188 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) {
190 /* FIXME: what about aggregated packets ? */
191 debug_log(LOG_TYPE_BATMAN,
192 "%s packet (originator %s, seqno %d, TTL %d) on interface %s [%s]\n",
193 (forw_packet->own ? "Sending own" : "Forwarding"),
194 orig_str, ntohs(batman_packet->seqno),
195 batman_packet->ttl, forw_packet->if_incoming->dev,
196 forw_packet->if_incoming->addr_str);
198 send_raw_packet(forw_packet->packet_buff,
199 forw_packet->packet_len,
200 forw_packet->if_incoming,
205 /* broadcast on every interface */
207 list_for_each_entry_rcu(batman_if, &if_list, list)
208 send_packet_to_if(forw_packet, batman_if);
212 static void rebuild_batman_packet(struct batman_if *batman_if)
215 unsigned char *new_buff;
216 struct batman_packet *batman_packet;
218 new_len = sizeof(struct batman_packet) + (num_hna * ETH_ALEN);
219 new_buff = kmalloc(new_len, GFP_ATOMIC);
221 /* keep old buffer if kmalloc should fail */
223 memcpy(new_buff, batman_if->packet_buff,
224 sizeof(struct batman_packet));
225 batman_packet = (struct batman_packet *)new_buff;
227 batman_packet->num_hna = hna_local_fill_buffer(
228 new_buff + sizeof(struct batman_packet),
229 new_len - sizeof(struct batman_packet));
231 kfree(batman_if->packet_buff);
232 batman_if->packet_buff = new_buff;
233 batman_if->packet_len = new_len;
237 void schedule_own_packet(struct batman_if *batman_if)
239 unsigned long send_time;
240 struct batman_packet *batman_packet;
243 * the interface gets activated here to avoid race conditions between
244 * the moment of activating the interface in
245 * hardif_activate_interface() where the originator mac is set and
246 * outdated packets (especially uninitialized mac addresses) in the
249 if (batman_if->if_active == IF_TO_BE_ACTIVATED)
250 batman_if->if_active = IF_ACTIVE;
252 /* if local hna has changed and interface is a primary interface */
253 if ((atomic_read(&hna_local_changed)) && (batman_if->if_num == 0))
254 rebuild_batman_packet(batman_if);
257 * NOTE: packet_buff might just have been re-allocated in
258 * rebuild_batman_packet()
260 batman_packet = (struct batman_packet *)batman_if->packet_buff;
262 /* change sequence number to network order */
263 batman_packet->seqno = htons((uint16_t)atomic_read(&batman_if->seqno));
266 batman_packet->flags = VIS_SERVER;
268 batman_packet->flags = 0;
270 /* could be read by receive_bat_packet() */
271 atomic_inc(&batman_if->seqno);
273 slide_own_bcast_window(batman_if);
274 send_time = own_send_time();
275 add_bat_packet_to_list(batman_if->packet_buff,
276 batman_if->packet_len, batman_if, 1, send_time);
279 void schedule_forward_packet(struct orig_node *orig_node,
280 struct ethhdr *ethhdr,
281 struct batman_packet *batman_packet,
282 uint8_t directlink, int hna_buff_len,
283 struct batman_if *if_incoming)
285 unsigned char in_tq, in_ttl, tq_avg = 0;
286 unsigned long send_time;
288 if (batman_packet->ttl <= 1) {
289 debug_log(LOG_TYPE_BATMAN, "ttl exceeded \n");
293 in_tq = batman_packet->tq;
294 in_ttl = batman_packet->ttl;
296 batman_packet->ttl--;
297 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
299 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
300 * of our best tq value */
301 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) {
303 /* rebroadcast ogm of best ranking neighbor as is */
304 if (!compare_orig(orig_node->router->addr, ethhdr->h_source)) {
305 batman_packet->tq = orig_node->router->tq_avg;
307 if (orig_node->router->last_ttl)
308 batman_packet->ttl = orig_node->router->last_ttl - 1;
311 tq_avg = orig_node->router->tq_avg;
314 /* apply hop penalty */
315 batman_packet->tq = hop_penalty(batman_packet->tq);
317 debug_log(LOG_TYPE_BATMAN, "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i \n",
318 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
321 batman_packet->seqno = htons(batman_packet->seqno);
324 batman_packet->flags |= DIRECTLINK;
326 batman_packet->flags &= ~DIRECTLINK;
328 send_time = forward_send_time();
329 add_bat_packet_to_list((unsigned char *)batman_packet,
330 sizeof(struct batman_packet) + hna_buff_len,
331 if_incoming, 0, send_time);
334 static void forw_packet_free(struct forw_packet *forw_packet)
336 kfree(forw_packet->packet_buff);
340 static void _add_bcast_packet_to_list(struct forw_packet *forw_packet,
341 unsigned long send_time)
343 INIT_HLIST_NODE(&forw_packet->list);
345 /* add new packet to packet list */
346 spin_lock(&forw_bcast_list_lock);
347 hlist_add_head(&forw_packet->list, &forw_bcast_list);
348 spin_unlock(&forw_bcast_list_lock);
350 /* start timer for this packet */
351 INIT_DELAYED_WORK(&forw_packet->delayed_work,
352 send_outstanding_bcast_packet);
353 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
357 void add_bcast_packet_to_list(unsigned char *packet_buff, int packet_len)
359 struct forw_packet *forw_packet;
361 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
365 forw_packet->packet_buff = kmalloc(packet_len, GFP_ATOMIC);
366 if (!forw_packet->packet_buff) {
371 forw_packet->packet_len = packet_len;
372 memcpy(forw_packet->packet_buff, packet_buff, forw_packet->packet_len);
374 /* how often did we send the bcast packet ? */
375 forw_packet->num_packets = 0;
377 _add_bcast_packet_to_list(forw_packet, 1);
380 void send_outstanding_bcast_packet(struct work_struct *work)
382 struct batman_if *batman_if;
383 struct delayed_work *delayed_work =
384 container_of(work, struct delayed_work, work);
385 struct forw_packet *forw_packet =
386 container_of(delayed_work, struct forw_packet, delayed_work);
388 spin_lock(&forw_bcast_list_lock);
389 hlist_del(&forw_packet->list);
390 spin_unlock(&forw_bcast_list_lock);
392 /* rebroadcast packet */
394 list_for_each_entry_rcu(batman_if, &if_list, list) {
395 send_raw_packet(forw_packet->packet_buff,
396 forw_packet->packet_len,
397 batman_if, broadcastAddr);
401 forw_packet->num_packets++;
403 /* if we still have some more bcasts to send and we are not shutting
405 if ((forw_packet->num_packets < 3) &&
406 (atomic_read(&module_state) != MODULE_DEACTIVATING))
407 _add_bcast_packet_to_list(forw_packet, ((5 * HZ) / 1000));
409 forw_packet_free(forw_packet);
412 void send_outstanding_bat_packet(struct work_struct *work)
414 struct delayed_work *delayed_work =
415 container_of(work, struct delayed_work, work);
416 struct forw_packet *forw_packet =
417 container_of(delayed_work, struct forw_packet, delayed_work);
419 spin_lock(&forw_bat_list_lock);
420 hlist_del(&forw_packet->list);
421 spin_unlock(&forw_bat_list_lock);
423 send_packet(forw_packet);
426 * we have to have at least one packet in the queue
427 * to determine the queues wake up time unless we are
430 if ((forw_packet->own) &&
431 (atomic_read(&module_state) != MODULE_DEACTIVATING))
432 schedule_own_packet(forw_packet->if_incoming);
434 forw_packet_free(forw_packet);
437 void purge_outstanding_packets(void)
439 struct forw_packet *forw_packet;
440 struct hlist_node *tmp_node, *safe_tmp_node;
442 debug_log(LOG_TYPE_BATMAN, "purge_outstanding_packets()\n");
444 /* free bcast list */
445 spin_lock(&forw_bcast_list_lock);
446 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
447 &forw_bcast_list, list) {
449 spin_unlock(&forw_bcast_list_lock);
452 * send_outstanding_bcast_packet() will lock the list to
453 * delete the item from the list
455 cancel_delayed_work_sync(&forw_packet->delayed_work);
456 spin_lock(&forw_bcast_list_lock);
458 spin_unlock(&forw_bcast_list_lock);
460 /* free batman packet list */
461 spin_lock(&forw_bat_list_lock);
462 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
463 &forw_bat_list, list) {
465 spin_unlock(&forw_bat_list_lock);
468 * send_outstanding_bat_packet() will lock the list to
469 * delete the item from the list
471 cancel_delayed_work_sync(&forw_packet->delayed_work);
472 spin_lock(&forw_bat_list_lock);
474 spin_unlock(&forw_bat_list_lock);