blob: 79f8973810c0b018ec3bfc437ba3aec8ebe12d52 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "send.h"
24#include "routing.h"
25#include "translation-table.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000028#include "vis.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000029#include "gateway_common.h"
30#include "originator.h"
31
32static void send_outstanding_bcast_packet(struct work_struct *work);
33
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000034/* send out an already prepared packet to the given address via the
35 * specified batman interface */
Sven Eckelmann747e4222011-05-14 23:14:50 +020036int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
37 const uint8_t *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000038{
39 struct ethhdr *ethhdr;
40
Marek Lindnere6c10f42011-02-18 12:33:20 +000041 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000042 goto send_skb_err;
43
Marek Lindnere6c10f42011-02-18 12:33:20 +000044 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000045 goto send_skb_err;
46
Marek Lindnere6c10f42011-02-18 12:33:20 +000047 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmann67969582012-03-26 16:22:45 +020048 pr_warn("Interface %s is not up - can't send packet via that interface!\n",
49 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000050 goto send_skb_err;
51 }
52
53 /* push to the ethernet header. */
Antonio Quartulli0d125072012-02-18 11:27:34 +010054 if (my_skb_head_push(skb, ETH_HLEN) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000055 goto send_skb_err;
56
57 skb_reset_mac_header(skb);
58
Sven Eckelmann40e0c4f2012-03-07 09:07:48 +010059 ethhdr = (struct ethhdr *)skb_mac_header(skb);
Marek Lindnere6c10f42011-02-18 12:33:20 +000060 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
62 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
63
64 skb_set_network_header(skb, ETH_HLEN);
65 skb->priority = TC_PRIO_CONTROL;
66 skb->protocol = __constant_htons(ETH_P_BATMAN);
67
Marek Lindnere6c10f42011-02-18 12:33:20 +000068 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000069
70 /* dev_queue_xmit() returns a negative result on error. However on
71 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
72 * (which is > 0). This will not be treated as an error. */
73
74 return dev_queue_xmit(skb);
75send_skb_err:
76 kfree_skb(skb);
77 return NET_XMIT_DROP;
78}
79
Marek Lindnerb9dacc52011-08-03 09:09:30 +020080void schedule_bat_ogm(struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000081{
Marek Lindnere6c10f42011-02-18 12:33:20 +000082 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083
Marek Lindnere6c10f42011-02-18 12:33:20 +000084 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
85 (hard_iface->if_status == IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000086 return;
87
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000088 /**
89 * the interface gets activated here to avoid race conditions between
90 * the moment of activating the interface in
91 * hardif_activate_interface() where the originator mac is set and
92 * outdated packets (especially uninitialized mac addresses) in the
93 * packet queue
94 */
Marek Lindnere6c10f42011-02-18 12:33:20 +000095 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
96 hard_iface->if_status = IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000097
Marek Lindnerbe9aa4c2012-05-07 04:22:05 +080098 bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000099}
100
101static void forw_packet_free(struct forw_packet *forw_packet)
102{
103 if (forw_packet->skb)
104 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200105 if (forw_packet->if_incoming)
106 hardif_free_ref(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107 kfree(forw_packet);
108}
109
110static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
111 struct forw_packet *forw_packet,
112 unsigned long send_time)
113{
114 INIT_HLIST_NODE(&forw_packet->list);
115
116 /* add new packet to packet list */
117 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
118 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
119 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
120
121 /* start timer for this packet */
122 INIT_DELAYED_WORK(&forw_packet->delayed_work,
123 send_outstanding_bcast_packet);
124 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
125 send_time);
126}
127
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000128/* add a broadcast packet to the queue and setup timers. broadcast packets
Antonio Quartulli015758d2011-07-09 17:52:13 +0200129 * are sent multiple times to increase probability for being received.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130 *
131 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
132 * errors.
133 *
134 * The skb is not consumed, so the caller should make sure that the
135 * skb is freed. */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200136int add_bcast_packet_to_list(struct bat_priv *bat_priv,
Antonio Quartulli86985292011-06-25 19:09:12 +0200137 const struct sk_buff *skb, unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138{
Marek Lindner32ae9b22011-04-20 15:40:58 +0200139 struct hard_iface *primary_if = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000140 struct forw_packet *forw_packet;
141 struct bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200142 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000143
144 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
145 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
146 goto out;
147 }
148
Marek Lindner32ae9b22011-04-20 15:40:58 +0200149 primary_if = primary_if_get_selected(bat_priv);
150 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200151 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000152
Sven Eckelmann704509b2011-05-14 23:14:54 +0200153 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
155 if (!forw_packet)
156 goto out_and_inc;
157
Sven Eckelmann747e4222011-05-14 23:14:50 +0200158 newskb = skb_copy(skb, GFP_ATOMIC);
159 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000160 goto packet_free;
161
162 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200163 bcast_packet = (struct bcast_packet *)newskb->data;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100164 bcast_packet->header.ttl--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000165
Sven Eckelmann747e4222011-05-14 23:14:50 +0200166 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
Sven Eckelmann747e4222011-05-14 23:14:50 +0200168 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200169 forw_packet->if_incoming = primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170
171 /* how often did we send the bcast packet ? */
172 forw_packet->num_packets = 0;
173
Antonio Quartulli86985292011-06-25 19:09:12 +0200174 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175 return NETDEV_TX_OK;
176
177packet_free:
178 kfree(forw_packet);
179out_and_inc:
180 atomic_inc(&bat_priv->bcast_queue_left);
181out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200182 if (primary_if)
183 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000184 return NETDEV_TX_BUSY;
185}
186
187static void send_outstanding_bcast_packet(struct work_struct *work)
188{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000189 struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190 struct delayed_work *delayed_work =
191 container_of(work, struct delayed_work, work);
192 struct forw_packet *forw_packet =
193 container_of(delayed_work, struct forw_packet, delayed_work);
194 struct sk_buff *skb1;
195 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
196 struct bat_priv *bat_priv = netdev_priv(soft_iface);
197
198 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
199 hlist_del(&forw_packet->list);
200 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
201
202 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
203 goto out;
204
205 /* rebroadcast packet */
206 rcu_read_lock();
Marek Lindnere6c10f42011-02-18 12:33:20 +0000207 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
208 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000209 continue;
210
211 /* send a copy of the saved skb */
212 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
213 if (skb1)
Marek Lindnere6c10f42011-02-18 12:33:20 +0000214 send_skb_packet(skb1, hard_iface, broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000215 }
216 rcu_read_unlock();
217
218 forw_packet->num_packets++;
219
220 /* if we still have some more bcasts to send */
221 if (forw_packet->num_packets < 3) {
222 _add_bcast_packet_to_list(bat_priv, forw_packet,
Marek Lindner0b0094e2012-03-01 15:35:20 +0800223 msecs_to_jiffies(5));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000224 return;
225 }
226
227out:
228 forw_packet_free(forw_packet);
229 atomic_inc(&bat_priv->bcast_queue_left);
230}
231
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200232void send_outstanding_bat_ogm_packet(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000233{
234 struct delayed_work *delayed_work =
235 container_of(work, struct delayed_work, work);
236 struct forw_packet *forw_packet =
237 container_of(delayed_work, struct forw_packet, delayed_work);
238 struct bat_priv *bat_priv;
239
240 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
241 spin_lock_bh(&bat_priv->forw_bat_list_lock);
242 hlist_del(&forw_packet->list);
243 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
244
245 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
246 goto out;
247
Marek Lindner01c42242011-11-28 21:31:55 +0800248 bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000249
250 /**
251 * we have to have at least one packet in the queue
252 * to determine the queues wake up time unless we are
253 * shutting down
254 */
255 if (forw_packet->own)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200256 schedule_bat_ogm(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000257
258out:
259 /* don't count own packet */
260 if (!forw_packet->own)
261 atomic_inc(&bat_priv->batman_queue_left);
262
263 forw_packet_free(forw_packet);
264}
265
266void purge_outstanding_packets(struct bat_priv *bat_priv,
Sven Eckelmann747e4222011-05-14 23:14:50 +0200267 const struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000268{
269 struct forw_packet *forw_packet;
270 struct hlist_node *tmp_node, *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200271 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000272
Marek Lindnere6c10f42011-02-18 12:33:20 +0000273 if (hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000274 bat_dbg(DBG_BATMAN, bat_priv,
275 "purge_outstanding_packets(): %s\n",
Marek Lindnere6c10f42011-02-18 12:33:20 +0000276 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000277 else
278 bat_dbg(DBG_BATMAN, bat_priv,
279 "purge_outstanding_packets()\n");
280
281 /* free bcast list */
282 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
283 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
284 &bat_priv->forw_bcast_list, list) {
285
286 /**
Antonio Quartulli015758d2011-07-09 17:52:13 +0200287 * if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000288 * we delete only packets belonging to the given interface
289 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000290 if ((hard_iface) &&
291 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000292 continue;
293
294 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
295
296 /**
297 * send_outstanding_bcast_packet() will lock the list to
298 * delete the item from the list
299 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200300 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000301 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200302
303 if (pending) {
304 hlist_del(&forw_packet->list);
305 forw_packet_free(forw_packet);
306 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000307 }
308 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
309
310 /* free batman packet list */
311 spin_lock_bh(&bat_priv->forw_bat_list_lock);
312 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
313 &bat_priv->forw_bat_list, list) {
314
315 /**
Antonio Quartulli015758d2011-07-09 17:52:13 +0200316 * if purge_outstanding_packets() was called with an argument
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000317 * we delete only packets belonging to the given interface
318 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000319 if ((hard_iface) &&
320 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000321 continue;
322
323 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
324
325 /**
326 * send_outstanding_bat_packet() will lock the list to
327 * delete the item from the list
328 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200329 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000330 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200331
332 if (pending) {
333 hlist_del(&forw_packet->list);
334 forw_packet_free(forw_packet);
335 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000336 }
337 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
338}