blob: 95bfc5962e1ad1881adc6e8a15677d2bc027dbe7 [file] [log] [blame]
Marek Lindnerfc957272011-07-30 12:04:12 +02001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Marek Lindnerfc957272011-07-30 12:04:12 +02003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
Marek Lindnerfc957272011-07-30 12:04:12 +020023#include "translation-table.h"
24#include "ring_buffer.h"
25#include "originator.h"
26#include "routing.h"
27#include "gateway_common.h"
28#include "gateway_client.h"
29#include "hard-interface.h"
30#include "send.h"
Marek Lindner1c280472011-11-28 17:40:17 +080031#include "bat_algo.h"
Marek Lindnerfc957272011-07-30 12:04:12 +020032
Marek Lindnerc2aca022012-02-07 17:20:45 +080033static void bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020034{
35 struct batman_ogm_packet *batman_ogm_packet;
Marek Lindnerd7d32ec2012-02-07 17:20:46 +080036 uint32_t random_seqno;
37
38 /* randomize initial seqno to avoid collision */
39 get_random_bytes(&random_seqno, sizeof(random_seqno));
40 atomic_set(&hard_iface->seqno, random_seqno);
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020041
42 hard_iface->packet_len = BATMAN_OGM_LEN;
43 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
44
45 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
Sven Eckelmann76543d12011-11-20 15:47:38 +010046 batman_ogm_packet->header.packet_type = BAT_OGM;
47 batman_ogm_packet->header.version = COMPAT_VERSION;
48 batman_ogm_packet->header.ttl = 2;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020049 batman_ogm_packet->flags = NO_FLAGS;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020050 batman_ogm_packet->tq = TQ_MAX_VALUE;
51 batman_ogm_packet->tt_num_changes = 0;
52 batman_ogm_packet->ttvn = 0;
53}
54
Marek Lindner01c42242011-11-28 21:31:55 +080055static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020056{
57 struct batman_ogm_packet *batman_ogm_packet;
58
59 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
60 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
Sven Eckelmann76543d12011-11-20 15:47:38 +010061 batman_ogm_packet->header.ttl = TTL;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020062}
63
Marek Lindner01c42242011-11-28 21:31:55 +080064static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020065{
66 struct batman_ogm_packet *batman_ogm_packet;
67
68 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
69 memcpy(batman_ogm_packet->orig,
70 hard_iface->net_dev->dev_addr, ETH_ALEN);
71 memcpy(batman_ogm_packet->prev_sender,
72 hard_iface->net_dev->dev_addr, ETH_ALEN);
73}
74
Marek Lindnerb9dacc52011-08-03 09:09:30 +020075/* when do we schedule our own ogm to be sent */
Marek Lindner01c42242011-11-28 21:31:55 +080076static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
Marek Lindnerb9dacc52011-08-03 09:09:30 +020077{
78 return jiffies + msecs_to_jiffies(
79 atomic_read(&bat_priv->orig_interval) -
80 JITTER + (random32() % 2*JITTER));
81}
82
83/* when do we schedule a ogm packet to be sent */
Marek Lindner01c42242011-11-28 21:31:55 +080084static unsigned long bat_iv_ogm_fwd_send_time(void)
Marek Lindnerb9dacc52011-08-03 09:09:30 +020085{
86 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
87}
88
89/* apply hop penalty for a normal link */
90static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
91{
92 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
93 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
94}
95
Marek Lindnerfc957272011-07-30 12:04:12 +020096/* is there another aggregated packet here? */
Marek Lindner01c42242011-11-28 21:31:55 +080097static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
98 int tt_num_changes)
Marek Lindnerfc957272011-07-30 12:04:12 +020099{
100 int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes);
101
102 return (next_buff_pos <= packet_len) &&
103 (next_buff_pos <= MAX_AGGREGATION_BYTES);
104}
105
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200106/* send a batman ogm to a given interface */
Marek Lindner01c42242011-11-28 21:31:55 +0800107static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
108 struct hard_iface *hard_iface)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200109{
110 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
111 char *fwd_str;
112 uint8_t packet_num;
113 int16_t buff_pos;
114 struct batman_ogm_packet *batman_ogm_packet;
115 struct sk_buff *skb;
116
117 if (hard_iface->if_status != IF_ACTIVE)
118 return;
119
120 packet_num = 0;
121 buff_pos = 0;
122 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
123
124 /* adjust all flags and log packets */
Marek Lindner01c42242011-11-28 21:31:55 +0800125 while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
126 batman_ogm_packet->tt_num_changes)) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200127
128 /* we might have aggregated direct link packets with an
129 * ordinary base packet */
130 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
131 (forw_packet->if_incoming == hard_iface))
132 batman_ogm_packet->flags |= DIRECTLINK;
133 else
134 batman_ogm_packet->flags &= ~DIRECTLINK;
135
136 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
137 "Sending own" :
138 "Forwarding"));
139 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100140 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200141 fwd_str, (packet_num > 0 ? "aggregated " : ""),
142 batman_ogm_packet->orig,
143 ntohl(batman_ogm_packet->seqno),
Sven Eckelmann76543d12011-11-20 15:47:38 +0100144 batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200145 (batman_ogm_packet->flags & DIRECTLINK ?
146 "on" : "off"),
147 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
148 hard_iface->net_dev->dev_addr);
149
150 buff_pos += BATMAN_OGM_LEN +
151 tt_len(batman_ogm_packet->tt_num_changes);
152 packet_num++;
153 batman_ogm_packet = (struct batman_ogm_packet *)
154 (forw_packet->skb->data + buff_pos);
155 }
156
157 /* create clone because function is called more than once */
158 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
159 if (skb)
160 send_skb_packet(skb, hard_iface, broadcast_addr);
161}
162
163/* send a batman ogm packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800164static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200165{
166 struct hard_iface *hard_iface;
167 struct net_device *soft_iface;
168 struct bat_priv *bat_priv;
169 struct hard_iface *primary_if = NULL;
170 struct batman_ogm_packet *batman_ogm_packet;
171 unsigned char directlink;
172
173 batman_ogm_packet = (struct batman_ogm_packet *)
174 (forw_packet->skb->data);
175 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
176
177 if (!forw_packet->if_incoming) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100178 pr_err("Error - can't forward packet: incoming iface not specified\n");
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200179 goto out;
180 }
181
182 soft_iface = forw_packet->if_incoming->soft_iface;
183 bat_priv = netdev_priv(soft_iface);
184
185 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
186 goto out;
187
188 primary_if = primary_if_get_selected(bat_priv);
189 if (!primary_if)
190 goto out;
191
192 /* multihomed peer assumed */
193 /* non-primary OGMs are only broadcasted on their interface */
Sven Eckelmann76543d12011-11-20 15:47:38 +0100194 if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200195 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
196
197 /* FIXME: what about aggregated packets ? */
198 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100199 "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200200 (forw_packet->own ? "Sending own" : "Forwarding"),
201 batman_ogm_packet->orig,
202 ntohl(batman_ogm_packet->seqno),
Sven Eckelmann76543d12011-11-20 15:47:38 +0100203 batman_ogm_packet->header.ttl,
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200204 forw_packet->if_incoming->net_dev->name,
205 forw_packet->if_incoming->net_dev->dev_addr);
206
207 /* skb is only used once and than forw_packet is free'd */
208 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
209 broadcast_addr);
210 forw_packet->skb = NULL;
211
212 goto out;
213 }
214
215 /* broadcast on every interface */
216 rcu_read_lock();
217 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
218 if (hard_iface->soft_iface != soft_iface)
219 continue;
220
Marek Lindner01c42242011-11-28 21:31:55 +0800221 bat_iv_ogm_send_to_if(forw_packet, hard_iface);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200222 }
223 rcu_read_unlock();
224
225out:
226 if (primary_if)
227 hardif_free_ref(primary_if);
228}
229
230/* return true if new_packet can be aggregated with forw_packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800231static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200232 *new_batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800233 struct bat_priv *bat_priv,
234 int packet_len, unsigned long send_time,
235 bool directlink,
236 const struct hard_iface *if_incoming,
237 const struct forw_packet *forw_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200238{
239 struct batman_ogm_packet *batman_ogm_packet;
240 int aggregated_bytes = forw_packet->packet_len + packet_len;
241 struct hard_iface *primary_if = NULL;
242 bool res = false;
243
244 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
245
246 /**
247 * we can aggregate the current packet to this aggregated packet
248 * if:
249 *
250 * - the send time is within our MAX_AGGREGATION_MS time
251 * - the resulting packet wont be bigger than
252 * MAX_AGGREGATION_BYTES
253 */
254
255 if (time_before(send_time, forw_packet->send_time) &&
256 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
257 forw_packet->send_time) &&
258 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
259
260 /**
261 * check aggregation compatibility
262 * -> direct link packets are broadcasted on
263 * their interface only
264 * -> aggregate packet if the current packet is
265 * a "global" packet as well as the base
266 * packet
267 */
268
269 primary_if = primary_if_get_selected(bat_priv);
270 if (!primary_if)
271 goto out;
272
273 /* packets without direct link flag and high TTL
274 * are flooded through the net */
275 if ((!directlink) &&
276 (!(batman_ogm_packet->flags & DIRECTLINK)) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100277 (batman_ogm_packet->header.ttl != 1) &&
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200278
279 /* own packets originating non-primary
280 * interfaces leave only that interface */
281 ((!forw_packet->own) ||
282 (forw_packet->if_incoming == primary_if))) {
283 res = true;
284 goto out;
285 }
286
287 /* if the incoming packet is sent via this one
288 * interface only - we still can aggregate */
289 if ((directlink) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100290 (new_batman_ogm_packet->header.ttl == 1) &&
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200291 (forw_packet->if_incoming == if_incoming) &&
292
293 /* packets from direct neighbors or
294 * own secondary interface packets
295 * (= secondary interface packets in general) */
296 (batman_ogm_packet->flags & DIRECTLINK ||
297 (forw_packet->own &&
298 forw_packet->if_incoming != primary_if))) {
299 res = true;
300 goto out;
301 }
302 }
303
304out:
305 if (primary_if)
306 hardif_free_ref(primary_if);
307 return res;
308}
309
310/* create a new aggregated packet and add this packet to it */
Marek Lindner01c42242011-11-28 21:31:55 +0800311static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
312 int packet_len, unsigned long send_time,
313 bool direct_link,
314 struct hard_iface *if_incoming,
315 int own_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200316{
317 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
318 struct forw_packet *forw_packet_aggr;
319 unsigned char *skb_buff;
320
321 if (!atomic_inc_not_zero(&if_incoming->refcount))
322 return;
323
324 /* own packet should always be scheduled */
325 if (!own_packet) {
326 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
327 bat_dbg(DBG_BATMAN, bat_priv,
328 "batman packet queue full\n");
329 goto out;
330 }
331 }
332
333 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
334 if (!forw_packet_aggr) {
335 if (!own_packet)
336 atomic_inc(&bat_priv->batman_queue_left);
337 goto out;
338 }
339
340 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
341 (packet_len < MAX_AGGREGATION_BYTES))
342 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
343 sizeof(struct ethhdr));
344 else
345 forw_packet_aggr->skb = dev_alloc_skb(packet_len +
346 sizeof(struct ethhdr));
347
348 if (!forw_packet_aggr->skb) {
349 if (!own_packet)
350 atomic_inc(&bat_priv->batman_queue_left);
351 kfree(forw_packet_aggr);
352 goto out;
353 }
354 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
355
356 INIT_HLIST_NODE(&forw_packet_aggr->list);
357
358 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
359 forw_packet_aggr->packet_len = packet_len;
360 memcpy(skb_buff, packet_buff, packet_len);
361
362 forw_packet_aggr->own = own_packet;
363 forw_packet_aggr->if_incoming = if_incoming;
364 forw_packet_aggr->num_packets = 0;
365 forw_packet_aggr->direct_link_flags = NO_FLAGS;
366 forw_packet_aggr->send_time = send_time;
367
368 /* save packet direct link flag status */
369 if (direct_link)
370 forw_packet_aggr->direct_link_flags |= 1;
371
372 /* add new packet to packet list */
373 spin_lock_bh(&bat_priv->forw_bat_list_lock);
374 hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
375 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
376
377 /* start timer for this packet */
378 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
379 send_outstanding_bat_ogm_packet);
380 queue_delayed_work(bat_event_workqueue,
381 &forw_packet_aggr->delayed_work,
382 send_time - jiffies);
383
384 return;
385out:
386 hardif_free_ref(if_incoming);
387}
388
389/* aggregate a new packet into the existing ogm packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800390static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
391 const unsigned char *packet_buff,
392 int packet_len, bool direct_link)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200393{
394 unsigned char *skb_buff;
395
396 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
397 memcpy(skb_buff, packet_buff, packet_len);
398 forw_packet_aggr->packet_len += packet_len;
399 forw_packet_aggr->num_packets++;
400
401 /* save packet direct link flag status */
402 if (direct_link)
403 forw_packet_aggr->direct_link_flags |=
404 (1 << forw_packet_aggr->num_packets);
405}
406
Marek Lindner01c42242011-11-28 21:31:55 +0800407static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
408 unsigned char *packet_buff,
409 int packet_len, struct hard_iface *if_incoming,
410 int own_packet, unsigned long send_time)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200411{
412 /**
413 * _aggr -> pointer to the packet we want to aggregate with
414 * _pos -> pointer to the position in the queue
415 */
416 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
417 struct hlist_node *tmp_node;
418 struct batman_ogm_packet *batman_ogm_packet;
419 bool direct_link;
420
421 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
422 direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
423
424 /* find position for the packet in the forward queue */
425 spin_lock_bh(&bat_priv->forw_bat_list_lock);
426 /* own packets are not to be aggregated */
427 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
428 hlist_for_each_entry(forw_packet_pos, tmp_node,
429 &bat_priv->forw_bat_list, list) {
Marek Lindner01c42242011-11-28 21:31:55 +0800430 if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
431 bat_priv, packet_len,
432 send_time, direct_link,
433 if_incoming,
434 forw_packet_pos)) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200435 forw_packet_aggr = forw_packet_pos;
436 break;
437 }
438 }
439 }
440
441 /* nothing to aggregate with - either aggregation disabled or no
442 * suitable aggregation packet found */
443 if (!forw_packet_aggr) {
444 /* the following section can run without the lock */
445 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
446
447 /**
448 * if we could not aggregate this packet with one of the others
449 * we hold it back for a while, so that it might be aggregated
450 * later on
451 */
452 if ((!own_packet) &&
453 (atomic_read(&bat_priv->aggregated_ogms)))
454 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
455
Marek Lindner01c42242011-11-28 21:31:55 +0800456 bat_iv_ogm_aggregate_new(packet_buff, packet_len,
457 send_time, direct_link,
458 if_incoming, own_packet);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200459 } else {
Marek Lindner01c42242011-11-28 21:31:55 +0800460 bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
461 packet_len, direct_link);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200462 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
463 }
464}
465
Marek Lindner01c42242011-11-28 21:31:55 +0800466static void bat_iv_ogm_forward(struct orig_node *orig_node,
467 const struct ethhdr *ethhdr,
468 struct batman_ogm_packet *batman_ogm_packet,
469 int directlink, struct hard_iface *if_incoming)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200470{
471 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
472 struct neigh_node *router;
473 uint8_t in_tq, in_ttl, tq_avg = 0;
474 uint8_t tt_num_changes;
475
Sven Eckelmann76543d12011-11-20 15:47:38 +0100476 if (batman_ogm_packet->header.ttl <= 1) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200477 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
478 return;
479 }
480
481 router = orig_node_get_router(orig_node);
482
483 in_tq = batman_ogm_packet->tq;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100484 in_ttl = batman_ogm_packet->header.ttl;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200485 tt_num_changes = batman_ogm_packet->tt_num_changes;
486
Sven Eckelmann76543d12011-11-20 15:47:38 +0100487 batman_ogm_packet->header.ttl--;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200488 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
489
490 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
491 * of our best tq value */
492 if (router && router->tq_avg != 0) {
493
494 /* rebroadcast ogm of best ranking neighbor as is */
495 if (!compare_eth(router->addr, ethhdr->h_source)) {
496 batman_ogm_packet->tq = router->tq_avg;
497
498 if (router->last_ttl)
Sven Eckelmann76543d12011-11-20 15:47:38 +0100499 batman_ogm_packet->header.ttl =
500 router->last_ttl - 1;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200501 }
502
503 tq_avg = router->tq_avg;
504 }
505
506 if (router)
507 neigh_node_free_ref(router);
508
509 /* apply hop penalty */
510 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
511
512 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100513 "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200514 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
Sven Eckelmann76543d12011-11-20 15:47:38 +0100515 batman_ogm_packet->header.ttl);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200516
517 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
518 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
519
520 /* switch of primaries first hop flag when forwarding */
521 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
522 if (directlink)
523 batman_ogm_packet->flags |= DIRECTLINK;
524 else
525 batman_ogm_packet->flags &= ~DIRECTLINK;
526
Marek Lindner01c42242011-11-28 21:31:55 +0800527 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
528 BATMAN_OGM_LEN + tt_len(tt_num_changes),
529 if_incoming, 0, bat_iv_ogm_fwd_send_time());
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200530}
531
Marek Lindner01c42242011-11-28 21:31:55 +0800532static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
533 int tt_num_changes)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200534{
535 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
536 struct batman_ogm_packet *batman_ogm_packet;
537 struct hard_iface *primary_if;
538 int vis_server;
539
540 vis_server = atomic_read(&bat_priv->vis_mode);
541 primary_if = primary_if_get_selected(bat_priv);
542
543 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
544
545 /* change sequence number to network order */
546 batman_ogm_packet->seqno =
547 htonl((uint32_t)atomic_read(&hard_iface->seqno));
548
549 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
550 batman_ogm_packet->tt_crc = htons((uint16_t)
551 atomic_read(&bat_priv->tt_crc));
552 if (tt_num_changes >= 0)
553 batman_ogm_packet->tt_num_changes = tt_num_changes;
554
555 if (vis_server == VIS_TYPE_SERVER_SYNC)
556 batman_ogm_packet->flags |= VIS_SERVER;
557 else
558 batman_ogm_packet->flags &= ~VIS_SERVER;
559
560 if ((hard_iface == primary_if) &&
561 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
562 batman_ogm_packet->gw_flags =
563 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
564 else
565 batman_ogm_packet->gw_flags = NO_FLAGS;
566
567 atomic_inc(&hard_iface->seqno);
568
569 slide_own_bcast_window(hard_iface);
Marek Lindner01c42242011-11-28 21:31:55 +0800570 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
571 hard_iface->packet_len, hard_iface, 1,
572 bat_iv_ogm_emit_send_time(bat_priv));
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200573
574 if (primary_if)
575 hardif_free_ref(primary_if);
576}
577
Marek Lindner01c42242011-11-28 21:31:55 +0800578static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
579 struct orig_node *orig_node,
580 const struct ethhdr *ethhdr,
581 const struct batman_ogm_packet
Marek Lindnerfc957272011-07-30 12:04:12 +0200582 *batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800583 struct hard_iface *if_incoming,
584 const unsigned char *tt_buff,
585 int is_duplicate)
Marek Lindnerfc957272011-07-30 12:04:12 +0200586{
587 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
588 struct neigh_node *router = NULL;
589 struct orig_node *orig_node_tmp;
590 struct hlist_node *node;
591 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
592
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100593 bat_dbg(DBG_BATMAN, bat_priv,
594 "update_originator(): Searching and updating originator entry of received packet\n");
Marek Lindnerfc957272011-07-30 12:04:12 +0200595
596 rcu_read_lock();
597 hlist_for_each_entry_rcu(tmp_neigh_node, node,
598 &orig_node->neigh_list, list) {
599 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
600 (tmp_neigh_node->if_incoming == if_incoming) &&
601 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
602 if (neigh_node)
603 neigh_node_free_ref(neigh_node);
604 neigh_node = tmp_neigh_node;
605 continue;
606 }
607
608 if (is_duplicate)
609 continue;
610
611 spin_lock_bh(&tmp_neigh_node->tq_lock);
612 ring_buffer_set(tmp_neigh_node->tq_recv,
613 &tmp_neigh_node->tq_index, 0);
614 tmp_neigh_node->tq_avg =
615 ring_buffer_avg(tmp_neigh_node->tq_recv);
616 spin_unlock_bh(&tmp_neigh_node->tq_lock);
617 }
618
619 if (!neigh_node) {
620 struct orig_node *orig_tmp;
621
622 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
623 if (!orig_tmp)
624 goto unlock;
625
626 neigh_node = create_neighbor(orig_node, orig_tmp,
627 ethhdr->h_source, if_incoming);
628
629 orig_node_free_ref(orig_tmp);
630 if (!neigh_node)
631 goto unlock;
632 } else
633 bat_dbg(DBG_BATMAN, bat_priv,
634 "Updating existing last-hop neighbor of originator\n");
635
636 rcu_read_unlock();
637
638 orig_node->flags = batman_ogm_packet->flags;
639 neigh_node->last_valid = jiffies;
640
641 spin_lock_bh(&neigh_node->tq_lock);
642 ring_buffer_set(neigh_node->tq_recv,
643 &neigh_node->tq_index,
644 batman_ogm_packet->tq);
645 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
646 spin_unlock_bh(&neigh_node->tq_lock);
647
648 if (!is_duplicate) {
Sven Eckelmann76543d12011-11-20 15:47:38 +0100649 orig_node->last_ttl = batman_ogm_packet->header.ttl;
650 neigh_node->last_ttl = batman_ogm_packet->header.ttl;
Marek Lindnerfc957272011-07-30 12:04:12 +0200651 }
652
653 bonding_candidate_add(orig_node, neigh_node);
654
655 /* if this neighbor already is our next hop there is nothing
656 * to change */
657 router = orig_node_get_router(orig_node);
658 if (router == neigh_node)
659 goto update_tt;
660
661 /* if this neighbor does not offer a better TQ we won't consider it */
662 if (router && (router->tq_avg > neigh_node->tq_avg))
663 goto update_tt;
664
665 /* if the TQ is the same and the link not more symmetric we
666 * won't consider it either */
667 if (router && (neigh_node->tq_avg == router->tq_avg)) {
668 orig_node_tmp = router->orig_node;
669 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
670 bcast_own_sum_orig =
671 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
672 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
673
674 orig_node_tmp = neigh_node->orig_node;
675 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
676 bcast_own_sum_neigh =
677 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
678 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
679
680 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
681 goto update_tt;
682 }
683
684 update_route(bat_priv, orig_node, neigh_node);
685
686update_tt:
687 /* I have to check for transtable changes only if the OGM has been
688 * sent through a primary interface */
689 if (((batman_ogm_packet->orig != ethhdr->h_source) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100690 (batman_ogm_packet->header.ttl > 2)) ||
Marek Lindnerfc957272011-07-30 12:04:12 +0200691 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
692 tt_update_orig(bat_priv, orig_node, tt_buff,
693 batman_ogm_packet->tt_num_changes,
694 batman_ogm_packet->ttvn,
695 batman_ogm_packet->tt_crc);
696
697 if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
698 gw_node_update(bat_priv, orig_node,
699 batman_ogm_packet->gw_flags);
700
701 orig_node->gw_flags = batman_ogm_packet->gw_flags;
702
703 /* restart gateway selection if fast or late switching was enabled */
704 if ((orig_node->gw_flags) &&
705 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
706 (atomic_read(&bat_priv->gw_sel_class) > 2))
707 gw_check_election(bat_priv, orig_node);
708
709 goto out;
710
711unlock:
712 rcu_read_unlock();
713out:
714 if (neigh_node)
715 neigh_node_free_ref(neigh_node);
716 if (router)
717 neigh_node_free_ref(router);
718}
719
Marek Lindner01c42242011-11-28 21:31:55 +0800720static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
721 struct orig_node *orig_neigh_node,
722 struct batman_ogm_packet *batman_ogm_packet,
723 struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200724{
725 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
726 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
727 struct hlist_node *node;
728 uint8_t total_count;
729 uint8_t orig_eq_count, neigh_rq_count, tq_own;
730 int tq_asym_penalty, ret = 0;
731
732 /* find corresponding one hop neighbor */
733 rcu_read_lock();
734 hlist_for_each_entry_rcu(tmp_neigh_node, node,
735 &orig_neigh_node->neigh_list, list) {
736
737 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
738 continue;
739
740 if (tmp_neigh_node->if_incoming != if_incoming)
741 continue;
742
743 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
744 continue;
745
746 neigh_node = tmp_neigh_node;
747 break;
748 }
749 rcu_read_unlock();
750
751 if (!neigh_node)
752 neigh_node = create_neighbor(orig_neigh_node,
753 orig_neigh_node,
754 orig_neigh_node->orig,
755 if_incoming);
756
757 if (!neigh_node)
758 goto out;
759
760 /* if orig_node is direct neighbor update neigh_node last_valid */
761 if (orig_node == orig_neigh_node)
762 neigh_node->last_valid = jiffies;
763
764 orig_node->last_valid = jiffies;
765
766 /* find packet count of corresponding one hop neighbor */
767 spin_lock_bh(&orig_node->ogm_cnt_lock);
768 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
769 neigh_rq_count = neigh_node->real_packet_count;
770 spin_unlock_bh(&orig_node->ogm_cnt_lock);
771
772 /* pay attention to not get a value bigger than 100 % */
773 total_count = (orig_eq_count > neigh_rq_count ?
774 neigh_rq_count : orig_eq_count);
775
776 /* if we have too few packets (too less data) we set tq_own to zero */
777 /* if we receive too few packets it is not considered bidirectional */
778 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
779 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
780 tq_own = 0;
781 else
782 /* neigh_node->real_packet_count is never zero as we
783 * only purge old information when getting new
784 * information */
785 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
786
Sven Eckelmann21a12362012-03-07 09:07:46 +0100787 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
Marek Lindnerfc957272011-07-30 12:04:12 +0200788 * affect the nearly-symmetric links only a little, but
789 * punishes asymmetric links more. This will give a value
790 * between 0 and TQ_MAX_VALUE
791 */
792 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
793 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
794 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
795 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
796 (TQ_LOCAL_WINDOW_SIZE *
797 TQ_LOCAL_WINDOW_SIZE *
798 TQ_LOCAL_WINDOW_SIZE);
799
800 batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
801 * tq_asym_penalty) /
802 (TQ_MAX_VALUE * TQ_MAX_VALUE));
803
804 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100805 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200806 orig_node->orig, orig_neigh_node->orig, total_count,
807 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
808
809 /* if link has the minimum required transmission quality
810 * consider it bidirectional */
811 if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
812 ret = 1;
813
814out:
815 if (neigh_node)
816 neigh_node_free_ref(neigh_node);
817 return ret;
818}
819
820/* processes a batman packet for all interfaces, adjusts the sequence number and
821 * finds out whether it is a duplicate.
822 * returns:
823 * 1 the packet is a duplicate
824 * 0 the packet has not yet been received
825 * -1 the packet is old and has been received while the seqno window
826 * was protected. Caller should drop it.
827 */
Marek Lindner01c42242011-11-28 21:31:55 +0800828static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
829 const struct batman_ogm_packet
Marek Lindnerfc957272011-07-30 12:04:12 +0200830 *batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800831 const struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200832{
833 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
834 struct orig_node *orig_node;
835 struct neigh_node *tmp_neigh_node;
836 struct hlist_node *node;
837 int is_duplicate = 0;
838 int32_t seq_diff;
839 int need_update = 0;
840 int set_mark, ret = -1;
841
842 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
843 if (!orig_node)
844 return 0;
845
846 spin_lock_bh(&orig_node->ogm_cnt_lock);
847 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
848
849 /* signalize caller that the packet is to be dropped. */
850 if (window_protected(bat_priv, seq_diff,
851 &orig_node->batman_seqno_reset))
852 goto out;
853
854 rcu_read_lock();
855 hlist_for_each_entry_rcu(tmp_neigh_node, node,
856 &orig_node->neigh_list, list) {
857
Sven Eckelmann0079d2c2012-02-04 17:34:52 +0100858 is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
859 orig_node->last_real_seqno,
860 batman_ogm_packet->seqno);
Marek Lindnerfc957272011-07-30 12:04:12 +0200861
862 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
863 (tmp_neigh_node->if_incoming == if_incoming))
864 set_mark = 1;
865 else
866 set_mark = 0;
867
868 /* if the window moved, set the update flag. */
869 need_update |= bit_get_packet(bat_priv,
870 tmp_neigh_node->real_bits,
871 seq_diff, set_mark);
872
873 tmp_neigh_node->real_packet_count =
Sven Eckelmann0079d2c2012-02-04 17:34:52 +0100874 bitmap_weight(tmp_neigh_node->real_bits,
875 TQ_LOCAL_WINDOW_SIZE);
Marek Lindnerfc957272011-07-30 12:04:12 +0200876 }
877 rcu_read_unlock();
878
879 if (need_update) {
880 bat_dbg(DBG_BATMAN, bat_priv,
881 "updating last_seqno: old %d, new %d\n",
882 orig_node->last_real_seqno, batman_ogm_packet->seqno);
883 orig_node->last_real_seqno = batman_ogm_packet->seqno;
884 }
885
886 ret = is_duplicate;
887
888out:
889 spin_unlock_bh(&orig_node->ogm_cnt_lock);
890 orig_node_free_ref(orig_node);
891 return ret;
892}
893
Marek Lindner01c42242011-11-28 21:31:55 +0800894static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
895 struct batman_ogm_packet *batman_ogm_packet,
896 const unsigned char *tt_buff,
897 struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200898{
899 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
900 struct hard_iface *hard_iface;
901 struct orig_node *orig_neigh_node, *orig_node;
902 struct neigh_node *router = NULL, *router_router = NULL;
903 struct neigh_node *orig_neigh_router = NULL;
904 int has_directlink_flag;
905 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
906 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
907 int is_duplicate;
908 uint32_t if_incoming_seqno;
909
910 /* Silently drop when the batman packet is actually not a
911 * correct packet.
912 *
913 * This might happen if a packet is padded (e.g. Ethernet has a
914 * minimum frame length of 64 byte) and the aggregation interprets
915 * it as an additional length.
916 *
917 * TODO: A more sane solution would be to have a bit in the
918 * batman_ogm_packet to detect whether the packet is the last
919 * packet in an aggregation. Here we expect that the padding
920 * is always zero (or not 0x01)
921 */
Sven Eckelmann76543d12011-11-20 15:47:38 +0100922 if (batman_ogm_packet->header.packet_type != BAT_OGM)
Marek Lindnerfc957272011-07-30 12:04:12 +0200923 return;
924
925 /* could be changed by schedule_own_packet() */
926 if_incoming_seqno = atomic_read(&if_incoming->seqno);
927
928 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
929
930 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
931 batman_ogm_packet->orig) ? 1 : 0);
932
933 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100934 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200935 ethhdr->h_source, if_incoming->net_dev->name,
936 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
937 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
938 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
939 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
Sven Eckelmann76543d12011-11-20 15:47:38 +0100940 batman_ogm_packet->header.ttl,
941 batman_ogm_packet->header.version, has_directlink_flag);
Marek Lindnerfc957272011-07-30 12:04:12 +0200942
943 rcu_read_lock();
944 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
945 if (hard_iface->if_status != IF_ACTIVE)
946 continue;
947
948 if (hard_iface->soft_iface != if_incoming->soft_iface)
949 continue;
950
951 if (compare_eth(ethhdr->h_source,
952 hard_iface->net_dev->dev_addr))
953 is_my_addr = 1;
954
955 if (compare_eth(batman_ogm_packet->orig,
956 hard_iface->net_dev->dev_addr))
957 is_my_orig = 1;
958
959 if (compare_eth(batman_ogm_packet->prev_sender,
960 hard_iface->net_dev->dev_addr))
961 is_my_oldorig = 1;
962
963 if (is_broadcast_ether_addr(ethhdr->h_source))
964 is_broadcast = 1;
965 }
966 rcu_read_unlock();
967
Sven Eckelmann76543d12011-11-20 15:47:38 +0100968 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
Marek Lindnerfc957272011-07-30 12:04:12 +0200969 bat_dbg(DBG_BATMAN, bat_priv,
970 "Drop packet: incompatible batman version (%i)\n",
Sven Eckelmann76543d12011-11-20 15:47:38 +0100971 batman_ogm_packet->header.version);
Marek Lindnerfc957272011-07-30 12:04:12 +0200972 return;
973 }
974
975 if (is_my_addr) {
976 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100977 "Drop packet: received my own broadcast (sender: %pM)\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200978 ethhdr->h_source);
979 return;
980 }
981
982 if (is_broadcast) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100983 bat_dbg(DBG_BATMAN, bat_priv,
984 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
985 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +0200986 return;
987 }
988
989 if (is_my_orig) {
990 unsigned long *word;
991 int offset;
992
993 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
994 if (!orig_neigh_node)
995 return;
996
997 /* neighbor has to indicate direct link and it has to
998 * come via the corresponding interface */
999 /* save packet seqno for bidirectional check */
1000 if (has_directlink_flag &&
1001 compare_eth(if_incoming->net_dev->dev_addr,
1002 batman_ogm_packet->orig)) {
1003 offset = if_incoming->if_num * NUM_WORDS;
1004
1005 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1006 word = &(orig_neigh_node->bcast_own[offset]);
Sven Eckelmann0079d2c2012-02-04 17:34:52 +01001007 bat_set_bit(word,
1008 if_incoming_seqno -
Marek Lindnerfc957272011-07-30 12:04:12 +02001009 batman_ogm_packet->seqno - 2);
1010 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
Sven Eckelmann0079d2c2012-02-04 17:34:52 +01001011 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
Marek Lindnerfc957272011-07-30 12:04:12 +02001012 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1013 }
1014
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001015 bat_dbg(DBG_BATMAN, bat_priv,
1016 "Drop packet: originator packet from myself (via neighbor)\n");
Marek Lindnerfc957272011-07-30 12:04:12 +02001017 orig_node_free_ref(orig_neigh_node);
1018 return;
1019 }
1020
1021 if (is_my_oldorig) {
1022 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001023 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
1024 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001025 return;
1026 }
1027
1028 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
1029 if (!orig_node)
1030 return;
1031
Marek Lindner01c42242011-11-28 21:31:55 +08001032 is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
1033 if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001034
1035 if (is_duplicate == -1) {
1036 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001037 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1038 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001039 goto out;
1040 }
1041
1042 if (batman_ogm_packet->tq == 0) {
1043 bat_dbg(DBG_BATMAN, bat_priv,
1044 "Drop packet: originator packet with tq equal 0\n");
1045 goto out;
1046 }
1047
1048 router = orig_node_get_router(orig_node);
1049 if (router)
1050 router_router = orig_node_get_router(router->orig_node);
1051
1052 /* avoid temporary routing loops */
1053 if (router && router_router &&
1054 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
1055 !(compare_eth(batman_ogm_packet->orig,
1056 batman_ogm_packet->prev_sender)) &&
1057 (compare_eth(router->addr, router_router->addr))) {
1058 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001059 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
1060 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001061 goto out;
1062 }
1063
1064 /* if sender is a direct neighbor the sender mac equals
1065 * originator mac */
1066 orig_neigh_node = (is_single_hop_neigh ?
1067 orig_node :
1068 get_orig_node(bat_priv, ethhdr->h_source));
1069 if (!orig_neigh_node)
1070 goto out;
1071
1072 orig_neigh_router = orig_node_get_router(orig_neigh_node);
1073
1074 /* drop packet if sender is not a direct neighbor and if we
1075 * don't route towards it */
1076 if (!is_single_hop_neigh && (!orig_neigh_router)) {
1077 bat_dbg(DBG_BATMAN, bat_priv,
1078 "Drop packet: OGM via unknown neighbor!\n");
1079 goto out_neigh;
1080 }
1081
Marek Lindner01c42242011-11-28 21:31:55 +08001082 is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
1083 batman_ogm_packet, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001084
1085 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
1086
1087 /* update ranking if it is not a duplicate or has the same
1088 * seqno and similar ttl as the non-duplicate */
1089 if (is_bidirectional &&
1090 (!is_duplicate ||
1091 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +01001092 (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
Marek Lindner01c42242011-11-28 21:31:55 +08001093 bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1094 batman_ogm_packet, if_incoming,
1095 tt_buff, is_duplicate);
Marek Lindnerfc957272011-07-30 12:04:12 +02001096
1097 /* is single hop (direct) neighbor */
1098 if (is_single_hop_neigh) {
1099
1100 /* mark direct link on incoming interface */
Marek Lindner01c42242011-11-28 21:31:55 +08001101 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1102 1, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001103
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001104 bat_dbg(DBG_BATMAN, bat_priv,
1105 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
Marek Lindnerfc957272011-07-30 12:04:12 +02001106 goto out_neigh;
1107 }
1108
1109 /* multihop originator */
1110 if (!is_bidirectional) {
1111 bat_dbg(DBG_BATMAN, bat_priv,
1112 "Drop packet: not received via bidirectional link\n");
1113 goto out_neigh;
1114 }
1115
1116 if (is_duplicate) {
1117 bat_dbg(DBG_BATMAN, bat_priv,
1118 "Drop packet: duplicate packet received\n");
1119 goto out_neigh;
1120 }
1121
1122 bat_dbg(DBG_BATMAN, bat_priv,
1123 "Forwarding packet: rebroadcast originator packet\n");
Marek Lindner01c42242011-11-28 21:31:55 +08001124 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
1125 0, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001126
1127out_neigh:
1128 if ((orig_neigh_node) && (!is_single_hop_neigh))
1129 orig_node_free_ref(orig_neigh_node);
1130out:
1131 if (router)
1132 neigh_node_free_ref(router);
1133 if (router_router)
1134 neigh_node_free_ref(router_router);
1135 if (orig_neigh_router)
1136 neigh_node_free_ref(orig_neigh_router);
1137
1138 orig_node_free_ref(orig_node);
1139}
1140
Marek Lindner01c42242011-11-28 21:31:55 +08001141static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
1142 struct sk_buff *skb)
Marek Lindnerfc957272011-07-30 12:04:12 +02001143{
1144 struct batman_ogm_packet *batman_ogm_packet;
Marek Lindner8780dad2011-12-05 04:01:51 +08001145 struct ethhdr *ethhdr;
1146 int buff_pos = 0, packet_len;
1147 unsigned char *tt_buff, *packet_buff;
Marek Lindnerfc957272011-07-30 12:04:12 +02001148
Marek Lindner8780dad2011-12-05 04:01:51 +08001149 packet_len = skb_headlen(skb);
1150 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1151 packet_buff = skb->data;
Marek Lindnerfc957272011-07-30 12:04:12 +02001152 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
1153
1154 /* unpack the aggregated packets and process them one by one */
1155 do {
1156 /* network to host order for our 32bit seqno and the
1157 orig_interval */
1158 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1159 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1160
1161 tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN;
1162
Marek Lindner01c42242011-11-28 21:31:55 +08001163 bat_iv_ogm_process(ethhdr, batman_ogm_packet,
1164 tt_buff, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001165
1166 buff_pos += BATMAN_OGM_LEN +
1167 tt_len(batman_ogm_packet->tt_num_changes);
1168
1169 batman_ogm_packet = (struct batman_ogm_packet *)
1170 (packet_buff + buff_pos);
Marek Lindner01c42242011-11-28 21:31:55 +08001171 } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
1172 batman_ogm_packet->tt_num_changes));
Marek Lindnerfc957272011-07-30 12:04:12 +02001173}
Marek Lindner1c280472011-11-28 17:40:17 +08001174
1175static struct bat_algo_ops batman_iv __read_mostly = {
1176 .name = "BATMAN IV",
Marek Lindnerc2aca022012-02-07 17:20:45 +08001177 .bat_iface_enable = bat_iv_ogm_iface_enable,
Marek Lindner01c42242011-11-28 21:31:55 +08001178 .bat_ogm_init_primary = bat_iv_ogm_init_primary,
1179 .bat_ogm_update_mac = bat_iv_ogm_update_mac,
1180 .bat_ogm_schedule = bat_iv_ogm_schedule,
1181 .bat_ogm_emit = bat_iv_ogm_emit,
1182 .bat_ogm_receive = bat_iv_ogm_receive,
Marek Lindner1c280472011-11-28 17:40:17 +08001183};
1184
1185int __init bat_iv_init(void)
1186{
1187 return bat_algo_register(&batman_iv);
1188}