| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  *	Forwarding decision | 
 | 3 |  *	Linux ethernet bridge | 
 | 4 |  * | 
 | 5 |  *	Authors: | 
 | 6 |  *	Lennert Buytenhek		<buytenh@gnu.org> | 
 | 7 |  * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 |  *	This program is free software; you can redistribute it and/or | 
 | 9 |  *	modify it under the terms of the GNU General Public License | 
 | 10 |  *	as published by the Free Software Foundation; either version | 
 | 11 |  *	2 of the License, or (at your option) any later version. | 
 | 12 |  */ | 
 | 13 |  | 
| Herbert Xu | 025d89c | 2010-02-27 19:41:43 +0000 | [diff] [blame] | 14 | #include <linux/err.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 15 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/kernel.h> | 
 | 17 | #include <linux/netdevice.h> | 
| WANG Cong | c06ee96 | 2010-05-06 00:48:24 -0700 | [diff] [blame] | 18 | #include <linux/netpoll.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/skbuff.h> | 
| Stephen Hemminger | 85ca719 | 2006-04-26 02:39:19 -0700 | [diff] [blame] | 20 | #include <linux/if_vlan.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/netfilter_bridge.h> | 
 | 22 | #include "br_private.h" | 
 | 23 |  | 
| David S. Miller | 87faf3c | 2010-03-16 14:37:47 -0700 | [diff] [blame] | 24 | static int deliver_clone(const struct net_bridge_port *prev, | 
 | 25 | 			 struct sk_buff *skb, | 
| Michael Braun | 7f7708f | 2010-03-16 00:26:22 -0700 | [diff] [blame] | 26 | 			 void (*__packet_hook)(const struct net_bridge_port *p, | 
 | 27 | 					       struct sk_buff *skb)); | 
 | 28 |  | 
| Stephen Hemminger | 9ef513b | 2006-05-25 15:58:54 -0700 | [diff] [blame] | 29 | /* Don't forward packets to originating port or forwarding diasabled */ | 
| YOSHIFUJI Hideaki | 9d6f229 | 2007-02-09 23:24:35 +0900 | [diff] [blame] | 30 | static inline int should_deliver(const struct net_bridge_port *p, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | 				 const struct sk_buff *skb) | 
 | 32 | { | 
| Fischer, Anna | 3982d3d | 2009-08-13 06:55:16 +0000 | [diff] [blame] | 33 | 	return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && | 
 | 34 | 		p->state == BR_STATE_FORWARDING); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | } | 
 | 36 |  | 
| Stephen Hemminger | 85ca719 | 2006-04-26 02:39:19 -0700 | [diff] [blame] | 37 | static inline unsigned packet_length(const struct sk_buff *skb) | 
 | 38 | { | 
 | 39 | 	return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); | 
 | 40 | } | 
 | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 
 | 43 | { | 
| Herbert Xu | 7967168 | 2006-06-22 02:40:14 -0700 | [diff] [blame] | 44 | 	/* drop mtu oversized packets except gso */ | 
| Herbert Xu | 89114af | 2006-07-08 13:34:32 -0700 | [diff] [blame] | 45 | 	if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | 		kfree_skb(skb); | 
 | 47 | 	else { | 
| Bart De Schuymer | e26c28e | 2010-04-13 11:41:39 +0200 | [diff] [blame] | 48 | 		/* ip_fragment doesn't copy the MAC header */ | 
| Stephen Hemminger | 3a13813 | 2006-08-26 20:28:30 -0700 | [diff] [blame] | 49 | 		if (nf_bridge_maybe_copy_header(skb)) | 
 | 50 | 			kfree_skb(skb); | 
| Stephen Hemminger | 0731762 | 2006-08-29 17:48:17 -0700 | [diff] [blame] | 51 | 		else { | 
| Stephen Hemminger | 3a13813 | 2006-08-26 20:28:30 -0700 | [diff] [blame] | 52 | 			skb_push(skb, ETH_HLEN); | 
| Herbert Xu | 91d2c34 | 2010-06-10 16:12:50 +0000 | [diff] [blame] | 53 | 			dev_queue_xmit(skb); | 
| Stephen Hemminger | 3a13813 | 2006-08-26 20:28:30 -0700 | [diff] [blame] | 54 | 		} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | 	} | 
 | 56 |  | 
 | 57 | 	return 0; | 
 | 58 | } | 
 | 59 |  | 
 | 60 | int br_forward_finish(struct sk_buff *skb) | 
 | 61 | { | 
| Jan Engelhardt | 713aefa | 2010-03-23 04:07:21 +0100 | [diff] [blame] | 62 | 	return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, | 
| Stephen Hemminger | 9ef513b | 2006-05-25 15:58:54 -0700 | [diff] [blame] | 63 | 		       br_dev_queue_push_xmit); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | } | 
 | 66 |  | 
 | 67 | static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | 
 | 68 | { | 
 | 69 | 	skb->dev = to->dev; | 
| Herbert Xu | 91d2c34 | 2010-06-10 16:12:50 +0000 | [diff] [blame] | 70 |  | 
 | 71 | 	if (unlikely(netpoll_tx_running(to->dev))) { | 
 | 72 | 		if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) | 
 | 73 | 			kfree_skb(skb); | 
 | 74 | 		else { | 
 | 75 | 			skb_push(skb, ETH_HLEN); | 
 | 76 | 			br_netpoll_send_skb(to, skb); | 
 | 77 | 		} | 
 | 78 | 		return; | 
 | 79 | 	} | 
 | 80 |  | 
| Jan Engelhardt | 713aefa | 2010-03-23 04:07:21 +0100 | [diff] [blame] | 81 | 	NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 
 | 82 | 		br_forward_finish); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | } | 
 | 84 |  | 
 | 85 | static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | 
 | 86 | { | 
 | 87 | 	struct net_device *indev; | 
 | 88 |  | 
| Herbert Xu | 4906f99 | 2009-02-09 15:07:18 -0800 | [diff] [blame] | 89 | 	if (skb_warn_if_lro(skb)) { | 
 | 90 | 		kfree_skb(skb); | 
 | 91 | 		return; | 
 | 92 | 	} | 
 | 93 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | 	indev = skb->dev; | 
 | 95 | 	skb->dev = to->dev; | 
| Herbert Xu | 35fc92a | 2007-03-26 23:22:20 -0700 | [diff] [blame] | 96 | 	skb_forward_csum(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 |  | 
| Jan Engelhardt | 713aefa | 2010-03-23 04:07:21 +0100 | [diff] [blame] | 98 | 	NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, | 
 | 99 | 		br_forward_finish); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | } | 
 | 101 |  | 
 | 102 | /* called with rcu_read_lock */ | 
 | 103 | void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | 
 | 104 | { | 
 | 105 | 	if (should_deliver(to, skb)) { | 
 | 106 | 		__br_deliver(to, skb); | 
 | 107 | 		return; | 
 | 108 | 	} | 
 | 109 |  | 
 | 110 | 	kfree_skb(skb); | 
 | 111 | } | 
 | 112 |  | 
 | 113 | /* called with rcu_read_lock */ | 
| Michael Braun | 7f7708f | 2010-03-16 00:26:22 -0700 | [diff] [blame] | 114 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | { | 
| Herbert Xu | 4906f99 | 2009-02-09 15:07:18 -0800 | [diff] [blame] | 116 | 	if (should_deliver(to, skb)) { | 
| Michael Braun | 7f7708f | 2010-03-16 00:26:22 -0700 | [diff] [blame] | 117 | 		if (skb0) | 
 | 118 | 			deliver_clone(to, skb, __br_forward); | 
 | 119 | 		else | 
 | 120 | 			__br_forward(to, skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | 		return; | 
 | 122 | 	} | 
 | 123 |  | 
| Michael Braun | 7f7708f | 2010-03-16 00:26:22 -0700 | [diff] [blame] | 124 | 	if (!skb0) | 
 | 125 | 		kfree_skb(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | } | 
 | 127 |  | 
| David S. Miller | 87faf3c | 2010-03-16 14:37:47 -0700 | [diff] [blame] | 128 | static int deliver_clone(const struct net_bridge_port *prev, | 
 | 129 | 			 struct sk_buff *skb, | 
| Herbert Xu | 025d89c | 2010-02-27 19:41:43 +0000 | [diff] [blame] | 130 | 			 void (*__packet_hook)(const struct net_bridge_port *p, | 
 | 131 | 					       struct sk_buff *skb)) | 
 | 132 | { | 
| Herbert Xu | fed396a | 2010-06-15 21:43:07 -0700 | [diff] [blame] | 133 | 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | 
 | 134 |  | 
| Herbert Xu | 025d89c | 2010-02-27 19:41:43 +0000 | [diff] [blame] | 135 | 	skb = skb_clone(skb, GFP_ATOMIC); | 
 | 136 | 	if (!skb) { | 
| Herbert Xu | 025d89c | 2010-02-27 19:41:43 +0000 | [diff] [blame] | 137 | 		dev->stats.tx_dropped++; | 
 | 138 | 		return -ENOMEM; | 
 | 139 | 	} | 
 | 140 |  | 
 | 141 | 	__packet_hook(prev, skb); | 
 | 142 | 	return 0; | 
 | 143 | } | 
 | 144 |  | 
 | 145 | static struct net_bridge_port *maybe_deliver( | 
 | 146 | 	struct net_bridge_port *prev, struct net_bridge_port *p, | 
 | 147 | 	struct sk_buff *skb, | 
 | 148 | 	void (*__packet_hook)(const struct net_bridge_port *p, | 
 | 149 | 			      struct sk_buff *skb)) | 
 | 150 | { | 
 | 151 | 	int err; | 
 | 152 |  | 
 | 153 | 	if (!should_deliver(p, skb)) | 
 | 154 | 		return prev; | 
 | 155 |  | 
 | 156 | 	if (!prev) | 
 | 157 | 		goto out; | 
 | 158 |  | 
 | 159 | 	err = deliver_clone(prev, skb, __packet_hook); | 
 | 160 | 	if (err) | 
 | 161 | 		return ERR_PTR(err); | 
 | 162 |  | 
 | 163 | out: | 
 | 164 | 	return p; | 
 | 165 | } | 
 | 166 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | /* called under bridge lock */ | 
| Herbert Xu | e081e1e | 2007-09-16 16:20:48 -0700 | [diff] [blame] | 168 | static void br_flood(struct net_bridge *br, struct sk_buff *skb, | 
| Herbert Xu | b33084b | 2010-02-27 19:41:41 +0000 | [diff] [blame] | 169 | 		     struct sk_buff *skb0, | 
 | 170 | 		     void (*__packet_hook)(const struct net_bridge_port *p, | 
 | 171 | 					   struct sk_buff *skb)) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | { | 
 | 173 | 	struct net_bridge_port *p; | 
 | 174 | 	struct net_bridge_port *prev; | 
 | 175 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | 	prev = NULL; | 
 | 177 |  | 
 | 178 | 	list_for_each_entry_rcu(p, &br->port_list, list) { | 
| Herbert Xu | 025d89c | 2010-02-27 19:41:43 +0000 | [diff] [blame] | 179 | 		prev = maybe_deliver(prev, p, skb, __packet_hook); | 
 | 180 | 		if (IS_ERR(prev)) | 
 | 181 | 			goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | 	} | 
 | 183 |  | 
| Herbert Xu | b33084b | 2010-02-27 19:41:41 +0000 | [diff] [blame] | 184 | 	if (!prev) | 
 | 185 | 		goto out; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 |  | 
| Herbert Xu | 025d89c | 2010-02-27 19:41:43 +0000 | [diff] [blame] | 187 | 	if (skb0) | 
 | 188 | 		deliver_clone(prev, skb, __packet_hook); | 
 | 189 | 	else | 
 | 190 | 		__packet_hook(prev, skb); | 
| Herbert Xu | b33084b | 2010-02-27 19:41:41 +0000 | [diff] [blame] | 191 | 	return; | 
 | 192 |  | 
 | 193 | out: | 
 | 194 | 	if (!skb0) | 
 | 195 | 		kfree_skb(skb); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | } | 
 | 197 |  | 
 | 198 |  | 
 | 199 | /* called with rcu_read_lock */ | 
| Herbert Xu | e081e1e | 2007-09-16 16:20:48 -0700 | [diff] [blame] | 200 | void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | { | 
| Herbert Xu | b33084b | 2010-02-27 19:41:41 +0000 | [diff] [blame] | 202 | 	br_flood(br, skb, NULL, __br_deliver); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } | 
 | 204 |  | 
 | 205 | /* called under bridge lock */ | 
| Herbert Xu | b33084b | 2010-02-27 19:41:41 +0000 | [diff] [blame] | 206 | void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, | 
 | 207 | 		      struct sk_buff *skb2) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | { | 
| Herbert Xu | b33084b | 2010-02-27 19:41:41 +0000 | [diff] [blame] | 209 | 	br_flood(br, skb, skb2, __br_forward); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | } | 
| Herbert Xu | 5cb5e94 | 2010-02-27 19:41:46 +0000 | [diff] [blame] | 211 |  | 
 | 212 | #ifdef CONFIG_BRIDGE_IGMP_SNOOPING | 
 | 213 | /* called with rcu_read_lock */ | 
 | 214 | static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, | 
 | 215 | 			       struct sk_buff *skb, struct sk_buff *skb0, | 
 | 216 | 			       void (*__packet_hook)( | 
 | 217 | 					const struct net_bridge_port *p, | 
 | 218 | 					struct sk_buff *skb)) | 
 | 219 | { | 
 | 220 | 	struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; | 
 | 221 | 	struct net_bridge *br = netdev_priv(dev); | 
| stephen hemminger | afe0159 | 2010-04-27 15:01:07 +0000 | [diff] [blame] | 222 | 	struct net_bridge_port *prev = NULL; | 
| Herbert Xu | 5cb5e94 | 2010-02-27 19:41:46 +0000 | [diff] [blame] | 223 | 	struct net_bridge_port_group *p; | 
 | 224 | 	struct hlist_node *rp; | 
 | 225 |  | 
| stephen hemminger | 168d40e | 2010-04-27 15:01:05 +0000 | [diff] [blame] | 226 | 	rp = rcu_dereference(br->router_list.first); | 
| stephen hemminger | 83f6a74 | 2010-04-27 15:01:06 +0000 | [diff] [blame] | 227 | 	p = mdst ? rcu_dereference(mdst->ports) : NULL; | 
| Herbert Xu | 5cb5e94 | 2010-02-27 19:41:46 +0000 | [diff] [blame] | 228 | 	while (p || rp) { | 
| stephen hemminger | afe0159 | 2010-04-27 15:01:07 +0000 | [diff] [blame] | 229 | 		struct net_bridge_port *port, *lport, *rport; | 
 | 230 |  | 
| Herbert Xu | 5cb5e94 | 2010-02-27 19:41:46 +0000 | [diff] [blame] | 231 | 		lport = p ? p->port : NULL; | 
 | 232 | 		rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : | 
 | 233 | 			     NULL; | 
 | 234 |  | 
 | 235 | 		port = (unsigned long)lport > (unsigned long)rport ? | 
 | 236 | 		       lport : rport; | 
 | 237 |  | 
 | 238 | 		prev = maybe_deliver(prev, port, skb, __packet_hook); | 
 | 239 | 		if (IS_ERR(prev)) | 
 | 240 | 			goto out; | 
 | 241 |  | 
 | 242 | 		if ((unsigned long)lport >= (unsigned long)port) | 
| stephen hemminger | 83f6a74 | 2010-04-27 15:01:06 +0000 | [diff] [blame] | 243 | 			p = rcu_dereference(p->next); | 
| Herbert Xu | 5cb5e94 | 2010-02-27 19:41:46 +0000 | [diff] [blame] | 244 | 		if ((unsigned long)rport >= (unsigned long)port) | 
| stephen hemminger | 168d40e | 2010-04-27 15:01:05 +0000 | [diff] [blame] | 245 | 			rp = rcu_dereference(rp->next); | 
| Herbert Xu | 5cb5e94 | 2010-02-27 19:41:46 +0000 | [diff] [blame] | 246 | 	} | 
 | 247 |  | 
 | 248 | 	if (!prev) | 
 | 249 | 		goto out; | 
 | 250 |  | 
 | 251 | 	if (skb0) | 
 | 252 | 		deliver_clone(prev, skb, __packet_hook); | 
 | 253 | 	else | 
 | 254 | 		__packet_hook(prev, skb); | 
 | 255 | 	return; | 
 | 256 |  | 
 | 257 | out: | 
 | 258 | 	if (!skb0) | 
 | 259 | 		kfree_skb(skb); | 
 | 260 | } | 
 | 261 |  | 
 | 262 | /* called with rcu_read_lock */ | 
 | 263 | void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, | 
 | 264 | 			  struct sk_buff *skb) | 
 | 265 | { | 
 | 266 | 	br_multicast_flood(mdst, skb, NULL, __br_deliver); | 
 | 267 | } | 
 | 268 |  | 
 | 269 | /* called with rcu_read_lock */ | 
 | 270 | void br_multicast_forward(struct net_bridge_mdb_entry *mdst, | 
 | 271 | 			  struct sk_buff *skb, struct sk_buff *skb2) | 
 | 272 | { | 
 | 273 | 	br_multicast_flood(mdst, skb, skb2, __br_forward); | 
 | 274 | } | 
 | 275 | #endif |