blob: 7627ebe50c4b9c1c34d0383abbd10e562885d917 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "routing.h"
24#include "send.h"
25#include "hash.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
28#include "icmp_socket.h"
29#include "translation-table.h"
30#include "originator.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000031#include "ring_buffer.h"
32#include "vis.h"
33#include "aggregation.h"
34#include "gateway_common.h"
35#include "gateway_client.h"
36#include "unicast.h"
37
38void slide_own_bcast_window(struct batman_if *batman_if)
39{
40 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
41 struct hashtable_t *hash = bat_priv->orig_hash;
42 struct hlist_node *walk;
43 struct hlist_head *head;
44 struct element_t *bucket;
45 struct orig_node *orig_node;
46 unsigned long *word;
47 int i;
48 size_t word_index;
49
50 spin_lock_bh(&bat_priv->orig_hash_lock);
51
52 for (i = 0; i < hash->size; i++) {
53 head = &hash->table[i];
54
Marek Lindnerfb778ea2011-01-19 20:01:40 +000055 rcu_read_lock();
56 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000057 orig_node = bucket->data;
Marek Lindner2ae2daf2011-01-19 20:01:42 +000058 spin_lock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000059 word_index = batman_if->if_num * NUM_WORDS;
60 word = &(orig_node->bcast_own[word_index]);
61
62 bit_get_packet(bat_priv, word, 1, 0);
63 orig_node->bcast_own_sum[batman_if->if_num] =
64 bit_packet_count(word);
Marek Lindner2ae2daf2011-01-19 20:01:42 +000065 spin_unlock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000066 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +000067 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000068 }
69
70 spin_unlock_bh(&bat_priv->orig_hash_lock);
71}
72
73static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
74 unsigned char *hna_buff, int hna_buff_len)
75{
76 if ((hna_buff_len != orig_node->hna_buff_len) ||
77 ((hna_buff_len > 0) &&
78 (orig_node->hna_buff_len > 0) &&
79 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
80
81 if (orig_node->hna_buff_len > 0)
82 hna_global_del_orig(bat_priv, orig_node,
83 "originator changed hna");
84
85 if ((hna_buff_len > 0) && (hna_buff))
86 hna_global_add_orig(bat_priv, orig_node,
87 hna_buff, hna_buff_len);
88 }
89}
90
91static void update_route(struct bat_priv *bat_priv,
92 struct orig_node *orig_node,
93 struct neigh_node *neigh_node,
94 unsigned char *hna_buff, int hna_buff_len)
95{
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000096 struct neigh_node *neigh_node_tmp;
97
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000098 /* route deleted */
99 if ((orig_node->router) && (!neigh_node)) {
100
101 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
102 orig_node->orig);
103 hna_global_del_orig(bat_priv, orig_node,
104 "originator timed out");
105
106 /* route added */
107 } else if ((!orig_node->router) && (neigh_node)) {
108
109 bat_dbg(DBG_ROUTES, bat_priv,
110 "Adding route towards: %pM (via %pM)\n",
111 orig_node->orig, neigh_node->addr);
112 hna_global_add_orig(bat_priv, orig_node,
113 hna_buff, hna_buff_len);
114
115 /* route changed */
116 } else {
117 bat_dbg(DBG_ROUTES, bat_priv,
118 "Changing route towards: %pM "
119 "(now via %pM - was via %pM)\n",
120 orig_node->orig, neigh_node->addr,
121 orig_node->router->addr);
122 }
123
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000124 if (neigh_node)
125 kref_get(&neigh_node->refcount);
126 neigh_node_tmp = orig_node->router;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000127 orig_node->router = neigh_node;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +0000128 if (neigh_node_tmp)
129 kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130}
131
132
133void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
134 struct neigh_node *neigh_node, unsigned char *hna_buff,
135 int hna_buff_len)
136{
137
138 if (!orig_node)
139 return;
140
141 if (orig_node->router != neigh_node)
142 update_route(bat_priv, orig_node, neigh_node,
143 hna_buff, hna_buff_len);
144 /* may be just HNA changed */
145 else
146 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
147}
148
149static int is_bidirectional_neigh(struct orig_node *orig_node,
150 struct orig_node *orig_neigh_node,
151 struct batman_packet *batman_packet,
152 struct batman_if *if_incoming)
153{
154 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
155 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
Marek Lindner9591a792010-12-12 21:57:11 +0000156 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000157 unsigned char total_count;
Marek Lindnera775eb82011-01-19 20:01:39 +0000158 int ret = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000159
160 if (orig_node == orig_neigh_node) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000161 rcu_read_lock();
162 hlist_for_each_entry_rcu(tmp_neigh_node, node,
163 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000164
165 if (compare_orig(tmp_neigh_node->addr,
166 orig_neigh_node->orig) &&
167 (tmp_neigh_node->if_incoming == if_incoming))
168 neigh_node = tmp_neigh_node;
169 }
170
171 if (!neigh_node)
172 neigh_node = create_neighbor(orig_node,
173 orig_neigh_node,
174 orig_neigh_node->orig,
175 if_incoming);
176 /* create_neighbor failed, return 0 */
177 if (!neigh_node)
Marek Lindnera775eb82011-01-19 20:01:39 +0000178 goto unlock;
179
180 kref_get(&neigh_node->refcount);
181 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000182
183 neigh_node->last_valid = jiffies;
184 } else {
185 /* find packet count of corresponding one hop neighbor */
Marek Lindnerf987ed62010-12-12 21:57:12 +0000186 rcu_read_lock();
187 hlist_for_each_entry_rcu(tmp_neigh_node, node,
188 &orig_neigh_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000189
190 if (compare_orig(tmp_neigh_node->addr,
191 orig_neigh_node->orig) &&
192 (tmp_neigh_node->if_incoming == if_incoming))
193 neigh_node = tmp_neigh_node;
194 }
195
196 if (!neigh_node)
197 neigh_node = create_neighbor(orig_neigh_node,
198 orig_neigh_node,
199 orig_neigh_node->orig,
200 if_incoming);
201 /* create_neighbor failed, return 0 */
202 if (!neigh_node)
Marek Lindnera775eb82011-01-19 20:01:39 +0000203 goto unlock;
204
205 kref_get(&neigh_node->refcount);
206 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000207 }
208
209 orig_node->last_valid = jiffies;
210
211 /* pay attention to not get a value bigger than 100 % */
212 total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
213 neigh_node->real_packet_count ?
214 neigh_node->real_packet_count :
215 orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
216
217 /* if we have too few packets (too less data) we set tq_own to zero */
218 /* if we receive too few packets it is not considered bidirectional */
219 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
220 (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
221 orig_neigh_node->tq_own = 0;
222 else
223 /* neigh_node->real_packet_count is never zero as we
224 * only purge old information when getting new
225 * information */
226 orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
227 neigh_node->real_packet_count;
228
229 /*
230 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
231 * affect the nearly-symmetric links only a little, but
232 * punishes asymmetric links more. This will give a value
233 * between 0 and TQ_MAX_VALUE
234 */
235 orig_neigh_node->tq_asym_penalty =
236 TQ_MAX_VALUE -
237 (TQ_MAX_VALUE *
238 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
239 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
240 (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
241 (TQ_LOCAL_WINDOW_SIZE *
242 TQ_LOCAL_WINDOW_SIZE *
243 TQ_LOCAL_WINDOW_SIZE);
244
245 batman_packet->tq = ((batman_packet->tq *
246 orig_neigh_node->tq_own *
247 orig_neigh_node->tq_asym_penalty) /
248 (TQ_MAX_VALUE * TQ_MAX_VALUE));
249
250 bat_dbg(DBG_BATMAN, bat_priv,
251 "bidirectional: "
252 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
253 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
254 "total tq: %3i\n",
255 orig_node->orig, orig_neigh_node->orig, total_count,
256 neigh_node->real_packet_count, orig_neigh_node->tq_own,
257 orig_neigh_node->tq_asym_penalty, batman_packet->tq);
258
259 /* if link has the minimum required transmission quality
260 * consider it bidirectional */
261 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
Marek Lindnera775eb82011-01-19 20:01:39 +0000262 ret = 1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000263
Marek Lindnera775eb82011-01-19 20:01:39 +0000264 goto out;
265
266unlock:
267 rcu_read_unlock();
268out:
269 if (neigh_node)
270 kref_put(&neigh_node->refcount, neigh_node_free_ref);
271 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000272}
273
274static void update_orig(struct bat_priv *bat_priv,
275 struct orig_node *orig_node,
276 struct ethhdr *ethhdr,
277 struct batman_packet *batman_packet,
278 struct batman_if *if_incoming,
279 unsigned char *hna_buff, int hna_buff_len,
280 char is_duplicate)
281{
282 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000283 struct orig_node *orig_node_tmp;
Marek Lindner9591a792010-12-12 21:57:11 +0000284 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000285 int tmp_hna_buff_len;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000286 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000287
288 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
289 "Searching and updating originator entry of received packet\n");
290
Marek Lindnerf987ed62010-12-12 21:57:12 +0000291 rcu_read_lock();
292 hlist_for_each_entry_rcu(tmp_neigh_node, node,
293 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000294 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
295 (tmp_neigh_node->if_incoming == if_incoming)) {
296 neigh_node = tmp_neigh_node;
297 continue;
298 }
299
300 if (is_duplicate)
301 continue;
302
303 ring_buffer_set(tmp_neigh_node->tq_recv,
304 &tmp_neigh_node->tq_index, 0);
305 tmp_neigh_node->tq_avg =
306 ring_buffer_avg(tmp_neigh_node->tq_recv);
307 }
308
309 if (!neigh_node) {
310 struct orig_node *orig_tmp;
311
312 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
313 if (!orig_tmp)
Marek Lindnera775eb82011-01-19 20:01:39 +0000314 goto unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000315
316 neigh_node = create_neighbor(orig_node, orig_tmp,
317 ethhdr->h_source, if_incoming);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000318
319 kref_put(&orig_tmp->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000320 if (!neigh_node)
Marek Lindnera775eb82011-01-19 20:01:39 +0000321 goto unlock;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000322 } else
323 bat_dbg(DBG_BATMAN, bat_priv,
324 "Updating existing last-hop neighbor of originator\n");
325
Marek Lindnera775eb82011-01-19 20:01:39 +0000326 kref_get(&neigh_node->refcount);
327 rcu_read_unlock();
328
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000329 orig_node->flags = batman_packet->flags;
330 neigh_node->last_valid = jiffies;
331
332 ring_buffer_set(neigh_node->tq_recv,
333 &neigh_node->tq_index,
334 batman_packet->tq);
335 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
336
337 if (!is_duplicate) {
338 orig_node->last_ttl = batman_packet->ttl;
339 neigh_node->last_ttl = batman_packet->ttl;
340 }
341
342 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
343 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
344
345 /* if this neighbor already is our next hop there is nothing
346 * to change */
347 if (orig_node->router == neigh_node)
348 goto update_hna;
349
350 /* if this neighbor does not offer a better TQ we won't consider it */
351 if ((orig_node->router) &&
352 (orig_node->router->tq_avg > neigh_node->tq_avg))
353 goto update_hna;
354
355 /* if the TQ is the same and the link not more symetric we
356 * won't consider it either */
357 if ((orig_node->router) &&
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000358 (neigh_node->tq_avg == orig_node->router->tq_avg)) {
359 orig_node_tmp = orig_node->router->orig_node;
360 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
361 bcast_own_sum_orig =
362 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
363 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
364
365 orig_node_tmp = neigh_node->orig_node;
366 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
367 bcast_own_sum_neigh =
368 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
369 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
370
371 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
372 goto update_hna;
373 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000374
375 update_routes(bat_priv, orig_node, neigh_node,
376 hna_buff, tmp_hna_buff_len);
377 goto update_gw;
378
379update_hna:
380 update_routes(bat_priv, orig_node, orig_node->router,
381 hna_buff, tmp_hna_buff_len);
382
383update_gw:
384 if (orig_node->gw_flags != batman_packet->gw_flags)
385 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
386
387 orig_node->gw_flags = batman_packet->gw_flags;
388
389 /* restart gateway selection if fast or late switching was enabled */
390 if ((orig_node->gw_flags) &&
391 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
392 (atomic_read(&bat_priv->gw_sel_class) > 2))
393 gw_check_election(bat_priv, orig_node);
Marek Lindnera775eb82011-01-19 20:01:39 +0000394
395 goto out;
396
397unlock:
398 rcu_read_unlock();
399out:
400 if (neigh_node)
401 kref_put(&neigh_node->refcount, neigh_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000402}
403
404/* checks whether the host restarted and is in the protection time.
405 * returns:
406 * 0 if the packet is to be accepted
407 * 1 if the packet is to be ignored.
408 */
409static int window_protected(struct bat_priv *bat_priv,
410 int32_t seq_num_diff,
411 unsigned long *last_reset)
412{
413 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
414 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
415 if (time_after(jiffies, *last_reset +
416 msecs_to_jiffies(RESET_PROTECTION_MS))) {
417
418 *last_reset = jiffies;
419 bat_dbg(DBG_BATMAN, bat_priv,
420 "old packet received, start protection\n");
421
422 return 0;
423 } else
424 return 1;
425 }
426 return 0;
427}
428
429/* processes a batman packet for all interfaces, adjusts the sequence number and
430 * finds out whether it is a duplicate.
431 * returns:
432 * 1 the packet is a duplicate
433 * 0 the packet has not yet been received
434 * -1 the packet is old and has been received while the seqno window
435 * was protected. Caller should drop it.
436 */
437static char count_real_packets(struct ethhdr *ethhdr,
438 struct batman_packet *batman_packet,
439 struct batman_if *if_incoming)
440{
441 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
442 struct orig_node *orig_node;
443 struct neigh_node *tmp_neigh_node;
Marek Lindner9591a792010-12-12 21:57:11 +0000444 struct hlist_node *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000445 char is_duplicate = 0;
446 int32_t seq_diff;
447 int need_update = 0;
448 int set_mark;
449
450 orig_node = get_orig_node(bat_priv, batman_packet->orig);
451 if (!orig_node)
452 return 0;
453
454 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
455
456 /* signalize caller that the packet is to be dropped. */
457 if (window_protected(bat_priv, seq_diff,
458 &orig_node->batman_seqno_reset))
Marek Lindner16b1aba2011-01-19 20:01:42 +0000459 goto err;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000460
Marek Lindnerf987ed62010-12-12 21:57:12 +0000461 rcu_read_lock();
462 hlist_for_each_entry_rcu(tmp_neigh_node, node,
463 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000464
465 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
466 orig_node->last_real_seqno,
467 batman_packet->seqno);
468
469 if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
470 (tmp_neigh_node->if_incoming == if_incoming))
471 set_mark = 1;
472 else
473 set_mark = 0;
474
475 /* if the window moved, set the update flag. */
476 need_update |= bit_get_packet(bat_priv,
477 tmp_neigh_node->real_bits,
478 seq_diff, set_mark);
479
480 tmp_neigh_node->real_packet_count =
481 bit_packet_count(tmp_neigh_node->real_bits);
482 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000483 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000484
485 if (need_update) {
486 bat_dbg(DBG_BATMAN, bat_priv,
487 "updating last_seqno: old %d, new %d\n",
488 orig_node->last_real_seqno, batman_packet->seqno);
489 orig_node->last_real_seqno = batman_packet->seqno;
490 }
491
Marek Lindner16b1aba2011-01-19 20:01:42 +0000492 kref_put(&orig_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000493 return is_duplicate;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000494
495err:
496 kref_put(&orig_node->refcount, orig_node_free_ref);
497 return -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000498}
499
500/* copy primary address for bonding */
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000501static void mark_bonding_address(struct orig_node *orig_node,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000502 struct orig_node *orig_neigh_node,
503 struct batman_packet *batman_packet)
504
505{
506 if (batman_packet->flags & PRIMARIES_FIRST_HOP)
507 memcpy(orig_neigh_node->primary_addr,
508 orig_node->orig, ETH_ALEN);
509
510 return;
511}
512
513/* mark possible bond.candidates in the neighbor list */
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000514void update_bonding_candidates(struct orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000515{
516 int candidates;
517 int interference_candidate;
518 int best_tq;
Marek Lindner9591a792010-12-12 21:57:11 +0000519 struct hlist_node *node, *node2;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000520 struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
521 struct neigh_node *first_candidate, *last_candidate;
522
523 /* update the candidates for this originator */
524 if (!orig_node->router) {
525 orig_node->bond.candidates = 0;
526 return;
527 }
528
529 best_tq = orig_node->router->tq_avg;
530
531 /* update bond.candidates */
532
533 candidates = 0;
534
535 /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
536 * as "bonding partner" */
537
538 /* first, zero the list */
Marek Lindnerf987ed62010-12-12 21:57:12 +0000539 rcu_read_lock();
540 hlist_for_each_entry_rcu(tmp_neigh_node, node,
541 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000542 tmp_neigh_node->next_bond_candidate = NULL;
543 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000544 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000545
546 first_candidate = NULL;
547 last_candidate = NULL;
Marek Lindnerf987ed62010-12-12 21:57:12 +0000548
549 rcu_read_lock();
550 hlist_for_each_entry_rcu(tmp_neigh_node, node,
551 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000552
553 /* only consider if it has the same primary address ... */
554 if (memcmp(orig_node->orig,
555 tmp_neigh_node->orig_node->primary_addr,
556 ETH_ALEN) != 0)
557 continue;
558
559 /* ... and is good enough to be considered */
560 if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
561 continue;
562
563 /* check if we have another candidate with the same
564 * mac address or interface. If we do, we won't
565 * select this candidate because of possible interference. */
566
567 interference_candidate = 0;
Marek Lindnerf987ed62010-12-12 21:57:12 +0000568 hlist_for_each_entry_rcu(tmp_neigh_node2, node2,
569 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000570
571 if (tmp_neigh_node2 == tmp_neigh_node)
572 continue;
573
574 /* we only care if the other candidate is even
575 * considered as candidate. */
576 if (!tmp_neigh_node2->next_bond_candidate)
577 continue;
578
579
580 if ((tmp_neigh_node->if_incoming ==
581 tmp_neigh_node2->if_incoming)
582 || (memcmp(tmp_neigh_node->addr,
583 tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
584
585 interference_candidate = 1;
586 break;
587 }
588 }
589 /* don't care further if it is an interference candidate */
590 if (interference_candidate)
591 continue;
592
593 if (!first_candidate) {
594 first_candidate = tmp_neigh_node;
595 tmp_neigh_node->next_bond_candidate = first_candidate;
596 } else
597 tmp_neigh_node->next_bond_candidate = last_candidate;
598
599 last_candidate = tmp_neigh_node;
600
601 candidates++;
602 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000603 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000604
605 if (candidates > 0) {
606 first_candidate->next_bond_candidate = last_candidate;
607 orig_node->bond.selected = first_candidate;
608 }
609
610 orig_node->bond.candidates = candidates;
611}
612
613void receive_bat_packet(struct ethhdr *ethhdr,
614 struct batman_packet *batman_packet,
615 unsigned char *hna_buff, int hna_buff_len,
616 struct batman_if *if_incoming)
617{
618 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
619 struct batman_if *batman_if;
620 struct orig_node *orig_neigh_node, *orig_node;
621 char has_directlink_flag;
622 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
623 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
624 char is_duplicate;
625 uint32_t if_incoming_seqno;
626
627 /* Silently drop when the batman packet is actually not a
628 * correct packet.
629 *
630 * This might happen if a packet is padded (e.g. Ethernet has a
631 * minimum frame length of 64 byte) and the aggregation interprets
632 * it as an additional length.
633 *
634 * TODO: A more sane solution would be to have a bit in the
635 * batman_packet to detect whether the packet is the last
636 * packet in an aggregation. Here we expect that the padding
637 * is always zero (or not 0x01)
638 */
639 if (batman_packet->packet_type != BAT_PACKET)
640 return;
641
642 /* could be changed by schedule_own_packet() */
643 if_incoming_seqno = atomic_read(&if_incoming->seqno);
644
645 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
646
647 is_single_hop_neigh = (compare_orig(ethhdr->h_source,
648 batman_packet->orig) ? 1 : 0);
649
650 bat_dbg(DBG_BATMAN, bat_priv,
651 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
652 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
653 "TTL %d, V %d, IDF %d)\n",
654 ethhdr->h_source, if_incoming->net_dev->name,
655 if_incoming->net_dev->dev_addr, batman_packet->orig,
656 batman_packet->prev_sender, batman_packet->seqno,
657 batman_packet->tq, batman_packet->ttl, batman_packet->version,
658 has_directlink_flag);
659
660 rcu_read_lock();
661 list_for_each_entry_rcu(batman_if, &if_list, list) {
662 if (batman_if->if_status != IF_ACTIVE)
663 continue;
664
665 if (batman_if->soft_iface != if_incoming->soft_iface)
666 continue;
667
668 if (compare_orig(ethhdr->h_source,
669 batman_if->net_dev->dev_addr))
670 is_my_addr = 1;
671
672 if (compare_orig(batman_packet->orig,
673 batman_if->net_dev->dev_addr))
674 is_my_orig = 1;
675
676 if (compare_orig(batman_packet->prev_sender,
677 batman_if->net_dev->dev_addr))
678 is_my_oldorig = 1;
679
680 if (compare_orig(ethhdr->h_source, broadcast_addr))
681 is_broadcast = 1;
682 }
683 rcu_read_unlock();
684
685 if (batman_packet->version != COMPAT_VERSION) {
686 bat_dbg(DBG_BATMAN, bat_priv,
687 "Drop packet: incompatible batman version (%i)\n",
688 batman_packet->version);
689 return;
690 }
691
692 if (is_my_addr) {
693 bat_dbg(DBG_BATMAN, bat_priv,
694 "Drop packet: received my own broadcast (sender: %pM"
695 ")\n",
696 ethhdr->h_source);
697 return;
698 }
699
700 if (is_broadcast) {
701 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
702 "ignoring all packets with broadcast source addr (sender: %pM"
703 ")\n", ethhdr->h_source);
704 return;
705 }
706
707 if (is_my_orig) {
708 unsigned long *word;
709 int offset;
710
711 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000712 if (!orig_neigh_node)
713 return;
714
715 /* neighbor has to indicate direct link and it has to
716 * come via the corresponding interface */
717 /* if received seqno equals last send seqno save new
718 * seqno for bidirectional check */
719 if (has_directlink_flag &&
720 compare_orig(if_incoming->net_dev->dev_addr,
721 batman_packet->orig) &&
722 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
723 offset = if_incoming->if_num * NUM_WORDS;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000724
725 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000726 word = &(orig_neigh_node->bcast_own[offset]);
727 bit_mark(word, 0);
728 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
729 bit_packet_count(word);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000730 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000731 }
732
733 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
734 "originator packet from myself (via neighbor)\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000735 kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000736 return;
737 }
738
739 if (is_my_oldorig) {
740 bat_dbg(DBG_BATMAN, bat_priv,
741 "Drop packet: ignoring all rebroadcast echos (sender: "
742 "%pM)\n", ethhdr->h_source);
743 return;
744 }
745
746 orig_node = get_orig_node(bat_priv, batman_packet->orig);
747 if (!orig_node)
748 return;
749
750 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
751
752 if (is_duplicate == -1) {
753 bat_dbg(DBG_BATMAN, bat_priv,
754 "Drop packet: packet within seqno protection time "
755 "(sender: %pM)\n", ethhdr->h_source);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000756 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000757 }
758
759 if (batman_packet->tq == 0) {
760 bat_dbg(DBG_BATMAN, bat_priv,
761 "Drop packet: originator packet with tq equal 0\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000762 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000763 }
764
765 /* avoid temporary routing loops */
766 if ((orig_node->router) &&
767 (orig_node->router->orig_node->router) &&
768 (compare_orig(orig_node->router->addr,
769 batman_packet->prev_sender)) &&
770 !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
771 (compare_orig(orig_node->router->addr,
772 orig_node->router->orig_node->router->addr))) {
773 bat_dbg(DBG_BATMAN, bat_priv,
774 "Drop packet: ignoring all rebroadcast packets that "
775 "may make me loop (sender: %pM)\n", ethhdr->h_source);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000776 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000777 }
778
779 /* if sender is a direct neighbor the sender mac equals
780 * originator mac */
781 orig_neigh_node = (is_single_hop_neigh ?
782 orig_node :
783 get_orig_node(bat_priv, ethhdr->h_source));
784 if (!orig_neigh_node)
Marek Lindner16b1aba2011-01-19 20:01:42 +0000785 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000786
787 /* drop packet if sender is not a direct neighbor and if we
788 * don't route towards it */
789 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
790 bat_dbg(DBG_BATMAN, bat_priv,
791 "Drop packet: OGM via unknown neighbor!\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000792 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000793 }
794
795 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
796 batman_packet, if_incoming);
797
798 /* update ranking if it is not a duplicate or has the same
799 * seqno and similar ttl as the non-duplicate */
800 if (is_bidirectional &&
801 (!is_duplicate ||
802 ((orig_node->last_real_seqno == batman_packet->seqno) &&
803 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
804 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
805 if_incoming, hna_buff, hna_buff_len, is_duplicate);
806
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000807 mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
808 update_bonding_candidates(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000809
810 /* is single hop (direct) neighbor */
811 if (is_single_hop_neigh) {
812
813 /* mark direct link on incoming interface */
814 schedule_forward_packet(orig_node, ethhdr, batman_packet,
815 1, hna_buff_len, if_incoming);
816
817 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
818 "rebroadcast neighbor packet with direct link flag\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000819 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000820 }
821
822 /* multihop originator */
823 if (!is_bidirectional) {
824 bat_dbg(DBG_BATMAN, bat_priv,
825 "Drop packet: not received via bidirectional link\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000826 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000827 }
828
829 if (is_duplicate) {
830 bat_dbg(DBG_BATMAN, bat_priv,
831 "Drop packet: duplicate packet received\n");
Marek Lindner16b1aba2011-01-19 20:01:42 +0000832 goto out_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000833 }
834
835 bat_dbg(DBG_BATMAN, bat_priv,
836 "Forwarding packet: rebroadcast originator packet\n");
837 schedule_forward_packet(orig_node, ethhdr, batman_packet,
838 0, hna_buff_len, if_incoming);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000839
840out_neigh:
841 if (!is_single_hop_neigh)
842 kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
843out:
844 kref_put(&orig_node->refcount, orig_node_free_ref);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000845}
846
847int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
848{
849 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
850 struct ethhdr *ethhdr;
851
852 /* drop packet if it has not necessary minimum size */
853 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
854 return NET_RX_DROP;
855
856 ethhdr = (struct ethhdr *)skb_mac_header(skb);
857
858 /* packet with broadcast indication but unicast recipient */
859 if (!is_broadcast_ether_addr(ethhdr->h_dest))
860 return NET_RX_DROP;
861
862 /* packet with broadcast sender address */
863 if (is_broadcast_ether_addr(ethhdr->h_source))
864 return NET_RX_DROP;
865
866 /* create a copy of the skb, if needed, to modify it. */
867 if (skb_cow(skb, 0) < 0)
868 return NET_RX_DROP;
869
870 /* keep skb linear */
871 if (skb_linearize(skb) < 0)
872 return NET_RX_DROP;
873
874 ethhdr = (struct ethhdr *)skb_mac_header(skb);
875
876 spin_lock_bh(&bat_priv->orig_hash_lock);
877 receive_aggr_bat_packet(ethhdr,
878 skb->data,
879 skb_headlen(skb),
880 batman_if);
881 spin_unlock_bh(&bat_priv->orig_hash_lock);
882
883 kfree_skb(skb);
884 return NET_RX_SUCCESS;
885}
886
887static int recv_my_icmp_packet(struct bat_priv *bat_priv,
888 struct sk_buff *skb, size_t icmp_len)
889{
890 struct orig_node *orig_node;
891 struct icmp_packet_rr *icmp_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000892 struct batman_if *batman_if;
893 int ret;
894 uint8_t dstaddr[ETH_ALEN];
895
896 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000897
898 /* add data to device queue */
899 if (icmp_packet->msg_type != ECHO_REQUEST) {
900 bat_socket_receive_packet(icmp_packet, icmp_len);
901 return NET_RX_DROP;
902 }
903
904 if (!bat_priv->primary_if)
905 return NET_RX_DROP;
906
907 /* answer echo request (ping) */
908 /* get routing information */
909 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000910 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000911 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
912 compare_orig, choose_orig,
913 icmp_packet->orig));
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000914 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000915 ret = NET_RX_DROP;
916
917 if ((orig_node) && (orig_node->router)) {
918
919 /* don't lock while sending the packets ... we therefore
920 * copy the required data before sending */
921 batman_if = orig_node->router->if_incoming;
922 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
923 spin_unlock_bh(&bat_priv->orig_hash_lock);
924
925 /* create a copy of the skb, if needed, to modify it. */
926 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
927 return NET_RX_DROP;
928
929 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000930
931 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
932 memcpy(icmp_packet->orig,
933 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
934 icmp_packet->msg_type = ECHO_REPLY;
935 icmp_packet->ttl = TTL;
936
937 send_skb_packet(skb, batman_if, dstaddr);
938 ret = NET_RX_SUCCESS;
939
940 } else
941 spin_unlock_bh(&bat_priv->orig_hash_lock);
942
943 return ret;
944}
945
946static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000947 struct sk_buff *skb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000948{
949 struct orig_node *orig_node;
950 struct icmp_packet *icmp_packet;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000951 struct batman_if *batman_if;
952 int ret;
953 uint8_t dstaddr[ETH_ALEN];
954
955 icmp_packet = (struct icmp_packet *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000956
957 /* send TTL exceeded if packet is an echo request (traceroute) */
958 if (icmp_packet->msg_type != ECHO_REQUEST) {
959 pr_debug("Warning - can't forward icmp packet from %pM to "
960 "%pM: ttl exceeded\n", icmp_packet->orig,
961 icmp_packet->dst);
962 return NET_RX_DROP;
963 }
964
965 if (!bat_priv->primary_if)
966 return NET_RX_DROP;
967
968 /* get routing information */
969 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000970 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000971 orig_node = ((struct orig_node *)
972 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
973 icmp_packet->orig));
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000974 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000975 ret = NET_RX_DROP;
976
977 if ((orig_node) && (orig_node->router)) {
978
979 /* don't lock while sending the packets ... we therefore
980 * copy the required data before sending */
981 batman_if = orig_node->router->if_incoming;
982 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
983 spin_unlock_bh(&bat_priv->orig_hash_lock);
984
985 /* create a copy of the skb, if needed, to modify it. */
986 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
987 return NET_RX_DROP;
988
989 icmp_packet = (struct icmp_packet *) skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000990
991 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
992 memcpy(icmp_packet->orig,
993 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
994 icmp_packet->msg_type = TTL_EXCEEDED;
995 icmp_packet->ttl = TTL;
996
997 send_skb_packet(skb, batman_if, dstaddr);
998 ret = NET_RX_SUCCESS;
999
1000 } else
1001 spin_unlock_bh(&bat_priv->orig_hash_lock);
1002
1003 return ret;
1004}
1005
1006
1007int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
1008{
1009 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1010 struct icmp_packet_rr *icmp_packet;
1011 struct ethhdr *ethhdr;
1012 struct orig_node *orig_node;
1013 struct batman_if *batman_if;
1014 int hdr_size = sizeof(struct icmp_packet);
1015 int ret;
1016 uint8_t dstaddr[ETH_ALEN];
1017
1018 /**
1019 * we truncate all incoming icmp packets if they don't match our size
1020 */
1021 if (skb->len >= sizeof(struct icmp_packet_rr))
1022 hdr_size = sizeof(struct icmp_packet_rr);
1023
1024 /* drop packet if it has not necessary minimum size */
1025 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1026 return NET_RX_DROP;
1027
1028 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1029
1030 /* packet with unicast indication but broadcast recipient */
1031 if (is_broadcast_ether_addr(ethhdr->h_dest))
1032 return NET_RX_DROP;
1033
1034 /* packet with broadcast sender address */
1035 if (is_broadcast_ether_addr(ethhdr->h_source))
1036 return NET_RX_DROP;
1037
1038 /* not for me */
1039 if (!is_my_mac(ethhdr->h_dest))
1040 return NET_RX_DROP;
1041
1042 icmp_packet = (struct icmp_packet_rr *)skb->data;
1043
1044 /* add record route information if not full */
1045 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
1046 (icmp_packet->rr_cur < BAT_RR_LEN)) {
1047 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
1048 ethhdr->h_dest, ETH_ALEN);
1049 icmp_packet->rr_cur++;
1050 }
1051
1052 /* packet for me */
1053 if (is_my_mac(icmp_packet->dst))
1054 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
1055
1056 /* TTL exceeded */
1057 if (icmp_packet->ttl < 2)
Simon Wunderlich74ef1152010-12-29 16:15:19 +00001058 return recv_icmp_ttl_exceeded(bat_priv, skb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001059
1060 ret = NET_RX_DROP;
1061
1062 /* get routing information */
1063 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001064 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001065 orig_node = ((struct orig_node *)
1066 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1067 icmp_packet->dst));
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001068 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001069
1070 if ((orig_node) && (orig_node->router)) {
1071
1072 /* don't lock while sending the packets ... we therefore
1073 * copy the required data before sending */
1074 batman_if = orig_node->router->if_incoming;
1075 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
1076 spin_unlock_bh(&bat_priv->orig_hash_lock);
1077
1078 /* create a copy of the skb, if needed, to modify it. */
1079 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1080 return NET_RX_DROP;
1081
1082 icmp_packet = (struct icmp_packet_rr *)skb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001083
1084 /* decrement ttl */
1085 icmp_packet->ttl--;
1086
1087 /* route it */
1088 send_skb_packet(skb, batman_if, dstaddr);
1089 ret = NET_RX_SUCCESS;
1090
1091 } else
1092 spin_unlock_bh(&bat_priv->orig_hash_lock);
1093
1094 return ret;
1095}
1096
1097/* find a suitable router for this originator, and use
1098 * bonding if possible. */
1099struct neigh_node *find_router(struct bat_priv *bat_priv,
1100 struct orig_node *orig_node,
1101 struct batman_if *recv_if)
1102{
1103 struct orig_node *primary_orig_node;
1104 struct orig_node *router_orig;
1105 struct neigh_node *router, *first_candidate, *best_router;
1106 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1107 int bonding_enabled;
1108
1109 if (!orig_node)
1110 return NULL;
1111
1112 if (!orig_node->router)
1113 return NULL;
1114
1115 /* without bonding, the first node should
1116 * always choose the default router. */
1117
1118 bonding_enabled = atomic_read(&bat_priv->bonding);
1119
1120 if ((!recv_if) && (!bonding_enabled))
1121 return orig_node->router;
1122
1123 router_orig = orig_node->router->orig_node;
1124
1125 /* if we have something in the primary_addr, we can search
1126 * for a potential bonding candidate. */
1127 if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
1128 return orig_node->router;
1129
1130 /* find the orig_node which has the primary interface. might
1131 * even be the same as our router_orig in many cases */
1132
1133 if (memcmp(router_orig->primary_addr,
1134 router_orig->orig, ETH_ALEN) == 0) {
1135 primary_orig_node = router_orig;
1136 } else {
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001137 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001138 primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
1139 choose_orig,
1140 router_orig->primary_addr);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001141 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001142
1143 if (!primary_orig_node)
1144 return orig_node->router;
1145 }
1146
1147 /* with less than 2 candidates, we can't do any
1148 * bonding and prefer the original router. */
1149
1150 if (primary_orig_node->bond.candidates < 2)
1151 return orig_node->router;
1152
1153
1154 /* all nodes between should choose a candidate which
1155 * is is not on the interface where the packet came
1156 * in. */
1157 first_candidate = primary_orig_node->bond.selected;
1158 router = first_candidate;
1159
1160 if (bonding_enabled) {
1161 /* in the bonding case, send the packets in a round
1162 * robin fashion over the remaining interfaces. */
1163 do {
1164 /* recv_if == NULL on the first node. */
1165 if (router->if_incoming != recv_if)
1166 break;
1167
1168 router = router->next_bond_candidate;
1169 } while (router != first_candidate);
1170
1171 primary_orig_node->bond.selected = router->next_bond_candidate;
1172
1173 } else {
1174 /* if bonding is disabled, use the best of the
1175 * remaining candidates which are not using
1176 * this interface. */
1177 best_router = first_candidate;
1178
1179 do {
1180 /* recv_if == NULL on the first node. */
1181 if ((router->if_incoming != recv_if) &&
1182 (router->tq_avg > best_router->tq_avg))
1183 best_router = router;
1184
1185 router = router->next_bond_candidate;
1186 } while (router != first_candidate);
1187
1188 router = best_router;
1189 }
1190
1191 return router;
1192}
1193
1194static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1195{
1196 struct ethhdr *ethhdr;
1197
1198 /* drop packet if it has not necessary minimum size */
1199 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1200 return -1;
1201
1202 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1203
1204 /* packet with unicast indication but broadcast recipient */
1205 if (is_broadcast_ether_addr(ethhdr->h_dest))
1206 return -1;
1207
1208 /* packet with broadcast sender address */
1209 if (is_broadcast_ether_addr(ethhdr->h_source))
1210 return -1;
1211
1212 /* not for me */
1213 if (!is_my_mac(ethhdr->h_dest))
1214 return -1;
1215
1216 return 0;
1217}
1218
1219int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
1220 int hdr_size)
1221{
1222 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1223 struct orig_node *orig_node;
1224 struct neigh_node *router;
1225 struct batman_if *batman_if;
1226 uint8_t dstaddr[ETH_ALEN];
1227 struct unicast_packet *unicast_packet;
1228 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1229 int ret;
1230 struct sk_buff *new_skb;
1231
1232 unicast_packet = (struct unicast_packet *)skb->data;
1233
1234 /* TTL exceeded */
1235 if (unicast_packet->ttl < 2) {
1236 pr_debug("Warning - can't forward unicast packet from %pM to "
1237 "%pM: ttl exceeded\n", ethhdr->h_source,
1238 unicast_packet->dest);
1239 return NET_RX_DROP;
1240 }
1241
1242 /* get routing information */
1243 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001244 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001245 orig_node = ((struct orig_node *)
1246 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1247 unicast_packet->dest));
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001248 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001249
1250 router = find_router(bat_priv, orig_node, recv_if);
1251
1252 if (!router) {
1253 spin_unlock_bh(&bat_priv->orig_hash_lock);
1254 return NET_RX_DROP;
1255 }
1256
1257 /* don't lock while sending the packets ... we therefore
1258 * copy the required data before sending */
1259
1260 batman_if = router->if_incoming;
1261 memcpy(dstaddr, router->addr, ETH_ALEN);
1262
1263 spin_unlock_bh(&bat_priv->orig_hash_lock);
1264
1265 /* create a copy of the skb, if needed, to modify it. */
1266 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1267 return NET_RX_DROP;
1268
1269 unicast_packet = (struct unicast_packet *)skb->data;
1270
1271 if (unicast_packet->packet_type == BAT_UNICAST &&
1272 atomic_read(&bat_priv->fragmentation) &&
1273 skb->len > batman_if->net_dev->mtu)
1274 return frag_send_skb(skb, bat_priv, batman_if,
1275 dstaddr);
1276
1277 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
Sven Eckelmannae361ce2011-01-25 22:02:31 +00001278 frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001279
1280 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1281
1282 if (ret == NET_RX_DROP)
1283 return NET_RX_DROP;
1284
1285 /* packet was buffered for late merge */
1286 if (!new_skb)
1287 return NET_RX_SUCCESS;
1288
1289 skb = new_skb;
1290 unicast_packet = (struct unicast_packet *)skb->data;
1291 }
1292
1293 /* decrement ttl */
1294 unicast_packet->ttl--;
1295
1296 /* route it */
1297 send_skb_packet(skb, batman_if, dstaddr);
1298
1299 return NET_RX_SUCCESS;
1300}
1301
1302int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1303{
1304 struct unicast_packet *unicast_packet;
1305 int hdr_size = sizeof(struct unicast_packet);
1306
1307 if (check_unicast_packet(skb, hdr_size) < 0)
1308 return NET_RX_DROP;
1309
1310 unicast_packet = (struct unicast_packet *)skb->data;
1311
1312 /* packet for me */
1313 if (is_my_mac(unicast_packet->dest)) {
1314 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1315 return NET_RX_SUCCESS;
1316 }
1317
1318 return route_unicast_packet(skb, recv_if, hdr_size);
1319}
1320
1321int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
1322{
1323 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1324 struct unicast_frag_packet *unicast_packet;
1325 int hdr_size = sizeof(struct unicast_frag_packet);
1326 struct sk_buff *new_skb = NULL;
1327 int ret;
1328
1329 if (check_unicast_packet(skb, hdr_size) < 0)
1330 return NET_RX_DROP;
1331
1332 unicast_packet = (struct unicast_frag_packet *)skb->data;
1333
1334 /* packet for me */
1335 if (is_my_mac(unicast_packet->dest)) {
1336
1337 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1338
1339 if (ret == NET_RX_DROP)
1340 return NET_RX_DROP;
1341
1342 /* packet was buffered for late merge */
1343 if (!new_skb)
1344 return NET_RX_SUCCESS;
1345
1346 interface_rx(recv_if->soft_iface, new_skb, recv_if,
1347 sizeof(struct unicast_packet));
1348 return NET_RX_SUCCESS;
1349 }
1350
1351 return route_unicast_packet(skb, recv_if, hdr_size);
1352}
1353
1354
1355int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
1356{
1357 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1358 struct orig_node *orig_node;
1359 struct bcast_packet *bcast_packet;
1360 struct ethhdr *ethhdr;
1361 int hdr_size = sizeof(struct bcast_packet);
1362 int32_t seq_diff;
1363
1364 /* drop packet if it has not necessary minimum size */
1365 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1366 return NET_RX_DROP;
1367
1368 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1369
1370 /* packet with broadcast indication but unicast recipient */
1371 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1372 return NET_RX_DROP;
1373
1374 /* packet with broadcast sender address */
1375 if (is_broadcast_ether_addr(ethhdr->h_source))
1376 return NET_RX_DROP;
1377
1378 /* ignore broadcasts sent by myself */
1379 if (is_my_mac(ethhdr->h_source))
1380 return NET_RX_DROP;
1381
1382 bcast_packet = (struct bcast_packet *)skb->data;
1383
1384 /* ignore broadcasts originated by myself */
1385 if (is_my_mac(bcast_packet->orig))
1386 return NET_RX_DROP;
1387
1388 if (bcast_packet->ttl < 2)
1389 return NET_RX_DROP;
1390
1391 spin_lock_bh(&bat_priv->orig_hash_lock);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001392 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001393 orig_node = ((struct orig_node *)
1394 hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
1395 bcast_packet->orig));
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001396 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001397
1398 if (!orig_node) {
1399 spin_unlock_bh(&bat_priv->orig_hash_lock);
1400 return NET_RX_DROP;
1401 }
1402
1403 /* check whether the packet is a duplicate */
1404 if (get_bit_status(orig_node->bcast_bits,
1405 orig_node->last_bcast_seqno,
1406 ntohl(bcast_packet->seqno))) {
1407 spin_unlock_bh(&bat_priv->orig_hash_lock);
1408 return NET_RX_DROP;
1409 }
1410
1411 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1412
1413 /* check whether the packet is old and the host just restarted. */
1414 if (window_protected(bat_priv, seq_diff,
1415 &orig_node->bcast_seqno_reset)) {
1416 spin_unlock_bh(&bat_priv->orig_hash_lock);
1417 return NET_RX_DROP;
1418 }
1419
1420 /* mark broadcast in flood history, update window position
1421 * if required. */
1422 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1423 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1424
1425 spin_unlock_bh(&bat_priv->orig_hash_lock);
1426 /* rebroadcast packet */
1427 add_bcast_packet_to_list(bat_priv, skb);
1428
1429 /* broadcast for me */
1430 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1431
1432 return NET_RX_SUCCESS;
1433}
1434
1435int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
1436{
1437 struct vis_packet *vis_packet;
1438 struct ethhdr *ethhdr;
1439 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1440 int hdr_size = sizeof(struct vis_packet);
1441
1442 /* keep skb linear */
1443 if (skb_linearize(skb) < 0)
1444 return NET_RX_DROP;
1445
1446 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1447 return NET_RX_DROP;
1448
1449 vis_packet = (struct vis_packet *)skb->data;
1450 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1451
1452 /* not for me */
1453 if (!is_my_mac(ethhdr->h_dest))
1454 return NET_RX_DROP;
1455
1456 /* ignore own packets */
1457 if (is_my_mac(vis_packet->vis_orig))
1458 return NET_RX_DROP;
1459
1460 if (is_my_mac(vis_packet->sender_orig))
1461 return NET_RX_DROP;
1462
1463 switch (vis_packet->vis_type) {
1464 case VIS_TYPE_SERVER_SYNC:
1465 receive_server_sync_packet(bat_priv, vis_packet,
1466 skb_headlen(skb));
1467 break;
1468
1469 case VIS_TYPE_CLIENT_UPDATE:
1470 receive_client_update_packet(bat_priv, vis_packet,
1471 skb_headlen(skb));
1472 break;
1473
1474 default: /* ignore unknown packet */
1475 break;
1476 }
1477
1478 /* We take a copy of the data in the packet, so we should
1479 always free the skbuf. */
1480 return NET_RX_DROP;
1481}