| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 1 | /* | 
| Sven Eckelmann | 64afe35 | 2011-01-27 10:38:15 +0100 | [diff] [blame] | 2 |  * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors: | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 3 |  * | 
 | 4 |  * Marek Lindner, Simon Wunderlich | 
 | 5 |  * | 
 | 6 |  * This program is free software; you can redistribute it and/or | 
 | 7 |  * modify it under the terms of version 2 of the GNU General Public | 
 | 8 |  * License as published by the Free Software Foundation. | 
 | 9 |  * | 
 | 10 |  * This program is distributed in the hope that it will be useful, but | 
 | 11 |  * WITHOUT ANY WARRANTY; without even the implied warranty of | 
 | 12 |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 
 | 13 |  * General Public License for more details. | 
 | 14 |  * | 
 | 15 |  * You should have received a copy of the GNU General Public License | 
 | 16 |  * along with this program; if not, write to the Free Software | 
 | 17 |  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | 
 | 18 |  * 02110-1301, USA | 
 | 19 |  * | 
 | 20 |  */ | 
 | 21 |  | 
 | 22 | #include "main.h" | 
 | 23 | #include "send.h" | 
 | 24 | #include "routing.h" | 
 | 25 | #include "translation-table.h" | 
 | 26 | #include "soft-interface.h" | 
 | 27 | #include "hard-interface.h" | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 28 | #include "vis.h" | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 29 | #include "gateway_common.h" | 
 | 30 | #include "originator.h" | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 31 | #include "bat_ogm.h" | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 32 |  | 
 | 33 | static void send_outstanding_bcast_packet(struct work_struct *work); | 
 | 34 |  | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 35 | /* send out an already prepared packet to the given address via the | 
 | 36 |  * specified batman interface */ | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 37 | int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, | 
 | 38 | 		    const uint8_t *dst_addr) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 39 | { | 
 | 40 | 	struct ethhdr *ethhdr; | 
 | 41 |  | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 42 | 	if (hard_iface->if_status != IF_ACTIVE) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 43 | 		goto send_skb_err; | 
 | 44 |  | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 45 | 	if (unlikely(!hard_iface->net_dev)) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 46 | 		goto send_skb_err; | 
 | 47 |  | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 48 | 	if (!(hard_iface->net_dev->flags & IFF_UP)) { | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 49 | 		pr_warning("Interface %s is not up - can't send packet via " | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 50 | 			   "that interface!\n", hard_iface->net_dev->name); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 51 | 		goto send_skb_err; | 
 | 52 | 	} | 
 | 53 |  | 
 | 54 | 	/* push to the ethernet header. */ | 
| Sven Eckelmann | 704509b | 2011-05-14 23:14:54 +0200 | [diff] [blame] | 55 | 	if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 56 | 		goto send_skb_err; | 
 | 57 |  | 
 | 58 | 	skb_reset_mac_header(skb); | 
 | 59 |  | 
 | 60 | 	ethhdr = (struct ethhdr *) skb_mac_header(skb); | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 61 | 	memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 62 | 	memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN); | 
 | 63 | 	ethhdr->h_proto = __constant_htons(ETH_P_BATMAN); | 
 | 64 |  | 
 | 65 | 	skb_set_network_header(skb, ETH_HLEN); | 
 | 66 | 	skb->priority = TC_PRIO_CONTROL; | 
 | 67 | 	skb->protocol = __constant_htons(ETH_P_BATMAN); | 
 | 68 |  | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 69 | 	skb->dev = hard_iface->net_dev; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 70 |  | 
 | 71 | 	/* dev_queue_xmit() returns a negative result on error.	 However on | 
 | 72 | 	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP | 
 | 73 | 	 * (which is > 0). This will not be treated as an error. */ | 
 | 74 |  | 
 | 75 | 	return dev_queue_xmit(skb); | 
 | 76 | send_skb_err: | 
 | 77 | 	kfree_skb(skb); | 
 | 78 | 	return NET_XMIT_DROP; | 
 | 79 | } | 
 | 80 |  | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 81 | static void realloc_packet_buffer(struct hard_iface *hard_iface, | 
| Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 82 | 				  int new_len) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 83 | { | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 84 | 	unsigned char *new_buff; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 85 |  | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 86 | 	new_buff = kmalloc(new_len, GFP_ATOMIC); | 
 | 87 |  | 
 | 88 | 	/* keep old buffer if kmalloc should fail */ | 
 | 89 | 	if (new_buff) { | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 90 | 		memcpy(new_buff, hard_iface->packet_buff, | 
| Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 91 | 		       BATMAN_OGM_LEN); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 92 |  | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 93 | 		kfree(hard_iface->packet_buff); | 
 | 94 | 		hard_iface->packet_buff = new_buff; | 
 | 95 | 		hard_iface->packet_len = new_len; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 96 | 	} | 
 | 97 | } | 
 | 98 |  | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 99 | /* when calling this function (hard_iface == primary_if) has to be true */ | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 100 | static int prepare_packet_buffer(struct bat_priv *bat_priv, | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 101 | 				  struct hard_iface *hard_iface) | 
 | 102 | { | 
 | 103 | 	int new_len; | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 104 |  | 
| Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 105 | 	new_len = BATMAN_OGM_LEN + | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 106 | 		  tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); | 
 | 107 |  | 
 | 108 | 	/* if we have too many changes for one packet don't send any | 
 | 109 | 	 * and wait for the tt table request which will be fragmented */ | 
 | 110 | 	if (new_len > hard_iface->soft_iface->mtu) | 
| Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 111 | 		new_len = BATMAN_OGM_LEN; | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 112 |  | 
 | 113 | 	realloc_packet_buffer(hard_iface, new_len); | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 114 |  | 
 | 115 | 	atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv)); | 
 | 116 |  | 
 | 117 | 	/* reset the sending counter */ | 
 | 118 | 	atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); | 
 | 119 |  | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 120 | 	return tt_changes_fill_buffer(bat_priv, | 
 | 121 | 				      hard_iface->packet_buff + BATMAN_OGM_LEN, | 
 | 122 | 				      hard_iface->packet_len - BATMAN_OGM_LEN); | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 123 | } | 
 | 124 |  | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 125 | static int reset_packet_buffer(struct bat_priv *bat_priv, | 
| Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 126 | 				struct hard_iface *hard_iface) | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 127 | { | 
| Marek Lindner | b6da4bf | 2011-07-29 17:31:50 +0200 | [diff] [blame] | 128 | 	realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 129 | 	return 0; | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 130 | } | 
 | 131 |  | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 132 | void schedule_bat_ogm(struct hard_iface *hard_iface) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 133 | { | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 134 | 	struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); | 
| Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 135 | 	struct hard_iface *primary_if; | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 136 | 	int tt_num_changes = -1; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 137 |  | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 138 | 	if ((hard_iface->if_status == IF_NOT_IN_USE) || | 
 | 139 | 	    (hard_iface->if_status == IF_TO_BE_REMOVED)) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 140 | 		return; | 
 | 141 |  | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 142 | 	/** | 
 | 143 | 	 * the interface gets activated here to avoid race conditions between | 
 | 144 | 	 * the moment of activating the interface in | 
 | 145 | 	 * hardif_activate_interface() where the originator mac is set and | 
 | 146 | 	 * outdated packets (especially uninitialized mac addresses) in the | 
 | 147 | 	 * packet queue | 
 | 148 | 	 */ | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 149 | 	if (hard_iface->if_status == IF_TO_BE_ACTIVATED) | 
 | 150 | 		hard_iface->if_status = IF_ACTIVE; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 151 |  | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 152 | 	primary_if = primary_if_get_selected(bat_priv); | 
 | 153 |  | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 154 | 	if (hard_iface == primary_if) { | 
 | 155 | 		/* if at least one change happened */ | 
 | 156 | 		if (atomic_read(&bat_priv->tt_local_changes) > 0) { | 
| Antonio Quartulli | 058d0e2 | 2011-07-07 01:40:58 +0200 | [diff] [blame] | 157 | 			tt_commit_changes(bat_priv); | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 158 | 			tt_num_changes = prepare_packet_buffer(bat_priv, | 
 | 159 | 							       hard_iface); | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 160 | 		} | 
 | 161 |  | 
| Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 162 | 		/* if the changes have been sent often enough */ | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 163 | 		if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt)) | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 164 | 			tt_num_changes = reset_packet_buffer(bat_priv, | 
 | 165 | 							     hard_iface); | 
| Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 166 | 	} | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 167 |  | 
| Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 168 | 	if (primary_if) | 
 | 169 | 		hardif_free_ref(primary_if); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 170 |  | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 171 | 	bat_ogm_schedule(hard_iface, tt_num_changes); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 172 | } | 
 | 173 |  | 
 | 174 | static void forw_packet_free(struct forw_packet *forw_packet) | 
 | 175 | { | 
 | 176 | 	if (forw_packet->skb) | 
 | 177 | 		kfree_skb(forw_packet->skb); | 
| Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 178 | 	if (forw_packet->if_incoming) | 
 | 179 | 		hardif_free_ref(forw_packet->if_incoming); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 180 | 	kfree(forw_packet); | 
 | 181 | } | 
 | 182 |  | 
 | 183 | static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, | 
 | 184 | 				      struct forw_packet *forw_packet, | 
 | 185 | 				      unsigned long send_time) | 
 | 186 | { | 
 | 187 | 	INIT_HLIST_NODE(&forw_packet->list); | 
 | 188 |  | 
 | 189 | 	/* add new packet to packet list */ | 
 | 190 | 	spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 
 | 191 | 	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list); | 
 | 192 | 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 
 | 193 |  | 
 | 194 | 	/* start timer for this packet */ | 
 | 195 | 	INIT_DELAYED_WORK(&forw_packet->delayed_work, | 
 | 196 | 			  send_outstanding_bcast_packet); | 
 | 197 | 	queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, | 
 | 198 | 			   send_time); | 
 | 199 | } | 
 | 200 |  | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 201 | /* add a broadcast packet to the queue and setup timers. broadcast packets | 
| Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 202 |  * are sent multiple times to increase probability for being received. | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 203 |  * | 
 | 204 |  * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on | 
 | 205 |  * errors. | 
 | 206 |  * | 
 | 207 |  * The skb is not consumed, so the caller should make sure that the | 
 | 208 |  * skb is freed. */ | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 209 | int add_bcast_packet_to_list(struct bat_priv *bat_priv, | 
| Antonio Quartulli | 8698529 | 2011-06-25 19:09:12 +0200 | [diff] [blame] | 210 | 			     const struct sk_buff *skb, unsigned long delay) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 211 | { | 
| Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 212 | 	struct hard_iface *primary_if = NULL; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 213 | 	struct forw_packet *forw_packet; | 
 | 214 | 	struct bcast_packet *bcast_packet; | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 215 | 	struct sk_buff *newskb; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 216 |  | 
 | 217 | 	if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { | 
 | 218 | 		bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); | 
 | 219 | 		goto out; | 
 | 220 | 	} | 
 | 221 |  | 
| Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 222 | 	primary_if = primary_if_get_selected(bat_priv); | 
 | 223 | 	if (!primary_if) | 
| Marek Lindner | ca06c6e | 2011-05-14 20:01:22 +0200 | [diff] [blame] | 224 | 		goto out_and_inc; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 225 |  | 
| Sven Eckelmann | 704509b | 2011-05-14 23:14:54 +0200 | [diff] [blame] | 226 | 	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 227 |  | 
 | 228 | 	if (!forw_packet) | 
 | 229 | 		goto out_and_inc; | 
 | 230 |  | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 231 | 	newskb = skb_copy(skb, GFP_ATOMIC); | 
 | 232 | 	if (!newskb) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 233 | 		goto packet_free; | 
 | 234 |  | 
 | 235 | 	/* as we have a copy now, it is safe to decrease the TTL */ | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 236 | 	bcast_packet = (struct bcast_packet *)newskb->data; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 237 | 	bcast_packet->ttl--; | 
 | 238 |  | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 239 | 	skb_reset_mac_header(newskb); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 240 |  | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 241 | 	forw_packet->skb = newskb; | 
| Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 242 | 	forw_packet->if_incoming = primary_if; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 243 |  | 
 | 244 | 	/* how often did we send the bcast packet ? */ | 
 | 245 | 	forw_packet->num_packets = 0; | 
 | 246 |  | 
| Antonio Quartulli | 8698529 | 2011-06-25 19:09:12 +0200 | [diff] [blame] | 247 | 	_add_bcast_packet_to_list(bat_priv, forw_packet, delay); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 248 | 	return NETDEV_TX_OK; | 
 | 249 |  | 
 | 250 | packet_free: | 
 | 251 | 	kfree(forw_packet); | 
 | 252 | out_and_inc: | 
 | 253 | 	atomic_inc(&bat_priv->bcast_queue_left); | 
 | 254 | out: | 
| Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 255 | 	if (primary_if) | 
 | 256 | 		hardif_free_ref(primary_if); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 257 | 	return NETDEV_TX_BUSY; | 
 | 258 | } | 
 | 259 |  | 
 | 260 | static void send_outstanding_bcast_packet(struct work_struct *work) | 
 | 261 | { | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 262 | 	struct hard_iface *hard_iface; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 263 | 	struct delayed_work *delayed_work = | 
 | 264 | 		container_of(work, struct delayed_work, work); | 
 | 265 | 	struct forw_packet *forw_packet = | 
 | 266 | 		container_of(delayed_work, struct forw_packet, delayed_work); | 
 | 267 | 	struct sk_buff *skb1; | 
 | 268 | 	struct net_device *soft_iface = forw_packet->if_incoming->soft_iface; | 
 | 269 | 	struct bat_priv *bat_priv = netdev_priv(soft_iface); | 
 | 270 |  | 
 | 271 | 	spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 
 | 272 | 	hlist_del(&forw_packet->list); | 
 | 273 | 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 
 | 274 |  | 
 | 275 | 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) | 
 | 276 | 		goto out; | 
 | 277 |  | 
 | 278 | 	/* rebroadcast packet */ | 
 | 279 | 	rcu_read_lock(); | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 280 | 	list_for_each_entry_rcu(hard_iface, &hardif_list, list) { | 
 | 281 | 		if (hard_iface->soft_iface != soft_iface) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 282 | 			continue; | 
 | 283 |  | 
 | 284 | 		/* send a copy of the saved skb */ | 
 | 285 | 		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); | 
 | 286 | 		if (skb1) | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 287 | 			send_skb_packet(skb1, hard_iface, broadcast_addr); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 288 | 	} | 
 | 289 | 	rcu_read_unlock(); | 
 | 290 |  | 
 | 291 | 	forw_packet->num_packets++; | 
 | 292 |  | 
 | 293 | 	/* if we still have some more bcasts to send */ | 
 | 294 | 	if (forw_packet->num_packets < 3) { | 
 | 295 | 		_add_bcast_packet_to_list(bat_priv, forw_packet, | 
 | 296 | 					  ((5 * HZ) / 1000)); | 
 | 297 | 		return; | 
 | 298 | 	} | 
 | 299 |  | 
 | 300 | out: | 
 | 301 | 	forw_packet_free(forw_packet); | 
 | 302 | 	atomic_inc(&bat_priv->bcast_queue_left); | 
 | 303 | } | 
 | 304 |  | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 305 | void send_outstanding_bat_ogm_packet(struct work_struct *work) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 306 | { | 
 | 307 | 	struct delayed_work *delayed_work = | 
 | 308 | 		container_of(work, struct delayed_work, work); | 
 | 309 | 	struct forw_packet *forw_packet = | 
 | 310 | 		container_of(delayed_work, struct forw_packet, delayed_work); | 
 | 311 | 	struct bat_priv *bat_priv; | 
 | 312 |  | 
 | 313 | 	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface); | 
 | 314 | 	spin_lock_bh(&bat_priv->forw_bat_list_lock); | 
 | 315 | 	hlist_del(&forw_packet->list); | 
 | 316 | 	spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 
 | 317 |  | 
 | 318 | 	if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING) | 
 | 319 | 		goto out; | 
 | 320 |  | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 321 | 	bat_ogm_emit(forw_packet); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 322 |  | 
 | 323 | 	/** | 
 | 324 | 	 * we have to have at least one packet in the queue | 
 | 325 | 	 * to determine the queues wake up time unless we are | 
 | 326 | 	 * shutting down | 
 | 327 | 	 */ | 
 | 328 | 	if (forw_packet->own) | 
| Marek Lindner | b9dacc5 | 2011-08-03 09:09:30 +0200 | [diff] [blame] | 329 | 		schedule_bat_ogm(forw_packet->if_incoming); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 330 |  | 
 | 331 | out: | 
 | 332 | 	/* don't count own packet */ | 
 | 333 | 	if (!forw_packet->own) | 
 | 334 | 		atomic_inc(&bat_priv->batman_queue_left); | 
 | 335 |  | 
 | 336 | 	forw_packet_free(forw_packet); | 
 | 337 | } | 
 | 338 |  | 
 | 339 | void purge_outstanding_packets(struct bat_priv *bat_priv, | 
| Sven Eckelmann | 747e422 | 2011-05-14 23:14:50 +0200 | [diff] [blame] | 340 | 			       const struct hard_iface *hard_iface) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 341 | { | 
 | 342 | 	struct forw_packet *forw_packet; | 
 | 343 | 	struct hlist_node *tmp_node, *safe_tmp_node; | 
| Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 344 | 	bool pending; | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 345 |  | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 346 | 	if (hard_iface) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 347 | 		bat_dbg(DBG_BATMAN, bat_priv, | 
 | 348 | 			"purge_outstanding_packets(): %s\n", | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 349 | 			hard_iface->net_dev->name); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 350 | 	else | 
 | 351 | 		bat_dbg(DBG_BATMAN, bat_priv, | 
 | 352 | 			"purge_outstanding_packets()\n"); | 
 | 353 |  | 
 | 354 | 	/* free bcast list */ | 
 | 355 | 	spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 
 | 356 | 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | 
 | 357 | 				  &bat_priv->forw_bcast_list, list) { | 
 | 358 |  | 
 | 359 | 		/** | 
| Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 360 | 		 * if purge_outstanding_packets() was called with an argument | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 361 | 		 * we delete only packets belonging to the given interface | 
 | 362 | 		 */ | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 363 | 		if ((hard_iface) && | 
 | 364 | 		    (forw_packet->if_incoming != hard_iface)) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 365 | 			continue; | 
 | 366 |  | 
 | 367 | 		spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 
 | 368 |  | 
 | 369 | 		/** | 
 | 370 | 		 * send_outstanding_bcast_packet() will lock the list to | 
 | 371 | 		 * delete the item from the list | 
 | 372 | 		 */ | 
| Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 373 | 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 374 | 		spin_lock_bh(&bat_priv->forw_bcast_list_lock); | 
| Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 375 |  | 
 | 376 | 		if (pending) { | 
 | 377 | 			hlist_del(&forw_packet->list); | 
 | 378 | 			forw_packet_free(forw_packet); | 
 | 379 | 		} | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 380 | 	} | 
 | 381 | 	spin_unlock_bh(&bat_priv->forw_bcast_list_lock); | 
 | 382 |  | 
 | 383 | 	/* free batman packet list */ | 
 | 384 | 	spin_lock_bh(&bat_priv->forw_bat_list_lock); | 
 | 385 | 	hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, | 
 | 386 | 				  &bat_priv->forw_bat_list, list) { | 
 | 387 |  | 
 | 388 | 		/** | 
| Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 389 | 		 * if purge_outstanding_packets() was called with an argument | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 390 | 		 * we delete only packets belonging to the given interface | 
 | 391 | 		 */ | 
| Marek Lindner | e6c10f4 | 2011-02-18 12:33:20 +0000 | [diff] [blame] | 392 | 		if ((hard_iface) && | 
 | 393 | 		    (forw_packet->if_incoming != hard_iface)) | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 394 | 			continue; | 
 | 395 |  | 
 | 396 | 		spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 
 | 397 |  | 
 | 398 | 		/** | 
 | 399 | 		 * send_outstanding_bat_packet() will lock the list to | 
 | 400 | 		 * delete the item from the list | 
 | 401 | 		 */ | 
| Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 402 | 		pending = cancel_delayed_work_sync(&forw_packet->delayed_work); | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 403 | 		spin_lock_bh(&bat_priv->forw_bat_list_lock); | 
| Sven Eckelmann | 6d5808d | 2011-05-11 20:59:06 +0200 | [diff] [blame] | 404 |  | 
 | 405 | 		if (pending) { | 
 | 406 | 			hlist_del(&forw_packet->list); | 
 | 407 | 			forw_packet_free(forw_packet); | 
 | 408 | 		} | 
| Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 409 | 	} | 
 | 410 | 	spin_unlock_bh(&bat_priv->forw_bat_list_lock); | 
 | 411 | } |