| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | *	Forwarding decision | 
|  | 3 | *	Linux ethernet bridge | 
|  | 4 | * | 
|  | 5 | *	Authors: | 
|  | 6 | *	Lennert Buytenhek		<buytenh@gnu.org> | 
|  | 7 | * | 
|  | 8 | *	$Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $ | 
|  | 9 | * | 
|  | 10 | *	This program is free software; you can redistribute it and/or | 
|  | 11 | *	modify it under the terms of the GNU General Public License | 
|  | 12 | *	as published by the Free Software Foundation; either version | 
|  | 13 | *	2 of the License, or (at your option) any later version. | 
|  | 14 | */ | 
|  | 15 |  | 
|  | 16 | #include <linux/kernel.h> | 
|  | 17 | #include <linux/netdevice.h> | 
|  | 18 | #include <linux/skbuff.h> | 
|  | 19 | #include <linux/netfilter_bridge.h> | 
|  | 20 | #include "br_private.h" | 
|  | 21 |  | 
|  | 22 | static inline int should_deliver(const struct net_bridge_port *p, | 
|  | 23 | const struct sk_buff *skb) | 
|  | 24 | { | 
|  | 25 | if (skb->dev == p->dev || | 
|  | 26 | p->state != BR_STATE_FORWARDING) | 
|  | 27 | return 0; | 
|  | 28 |  | 
|  | 29 | return 1; | 
|  | 30 | } | 
|  | 31 |  | 
|  | 32 | int br_dev_queue_push_xmit(struct sk_buff *skb) | 
|  | 33 | { | 
| Vlad Drukker | 2a7bc3c | 2005-09-22 23:35:34 -0700 | [diff] [blame] | 34 | /* drop mtu oversized packets except tso */ | 
|  | 35 | if (skb->len > skb->dev->mtu && !skb_shinfo(skb)->tso_size) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | kfree_skb(skb); | 
|  | 37 | else { | 
|  | 38 | #ifdef CONFIG_BRIDGE_NETFILTER | 
|  | 39 | /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ | 
|  | 40 | nf_bridge_maybe_copy_header(skb); | 
|  | 41 | #endif | 
|  | 42 | skb_push(skb, ETH_HLEN); | 
|  | 43 |  | 
|  | 44 | dev_queue_xmit(skb); | 
|  | 45 | } | 
|  | 46 |  | 
|  | 47 | return 0; | 
|  | 48 | } | 
|  | 49 |  | 
|  | 50 | int br_forward_finish(struct sk_buff *skb) | 
|  | 51 | { | 
|  | 52 | NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, | 
|  | 53 | br_dev_queue_push_xmit); | 
|  | 54 |  | 
|  | 55 | return 0; | 
|  | 56 | } | 
|  | 57 |  | 
|  | 58 | static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | 
|  | 59 | { | 
|  | 60 | skb->dev = to->dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, | 
|  | 62 | br_forward_finish); | 
|  | 63 | } | 
|  | 64 |  | 
|  | 65 | static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | 
|  | 66 | { | 
|  | 67 | struct net_device *indev; | 
|  | 68 |  | 
|  | 69 | indev = skb->dev; | 
|  | 70 | skb->dev = to->dev; | 
|  | 71 | skb->ip_summed = CHECKSUM_NONE; | 
|  | 72 |  | 
|  | 73 | NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, | 
|  | 74 | br_forward_finish); | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | /* called with rcu_read_lock */ | 
|  | 78 | void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) | 
|  | 79 | { | 
|  | 80 | if (should_deliver(to, skb)) { | 
|  | 81 | __br_deliver(to, skb); | 
|  | 82 | return; | 
|  | 83 | } | 
|  | 84 |  | 
|  | 85 | kfree_skb(skb); | 
|  | 86 | } | 
|  | 87 |  | 
|  | 88 | /* called with rcu_read_lock */ | 
|  | 89 | void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) | 
|  | 90 | { | 
|  | 91 | if (should_deliver(to, skb)) { | 
|  | 92 | __br_forward(to, skb); | 
|  | 93 | return; | 
|  | 94 | } | 
|  | 95 |  | 
|  | 96 | kfree_skb(skb); | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | /* called under bridge lock */ | 
|  | 100 | static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone, | 
|  | 101 | void (*__packet_hook)(const struct net_bridge_port *p, | 
|  | 102 | struct sk_buff *skb)) | 
|  | 103 | { | 
|  | 104 | struct net_bridge_port *p; | 
|  | 105 | struct net_bridge_port *prev; | 
|  | 106 |  | 
|  | 107 | if (clone) { | 
|  | 108 | struct sk_buff *skb2; | 
|  | 109 |  | 
|  | 110 | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { | 
|  | 111 | br->statistics.tx_dropped++; | 
|  | 112 | return; | 
|  | 113 | } | 
|  | 114 |  | 
|  | 115 | skb = skb2; | 
|  | 116 | } | 
|  | 117 |  | 
|  | 118 | prev = NULL; | 
|  | 119 |  | 
|  | 120 | list_for_each_entry_rcu(p, &br->port_list, list) { | 
|  | 121 | if (should_deliver(p, skb)) { | 
|  | 122 | if (prev != NULL) { | 
|  | 123 | struct sk_buff *skb2; | 
|  | 124 |  | 
|  | 125 | if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { | 
|  | 126 | br->statistics.tx_dropped++; | 
|  | 127 | kfree_skb(skb); | 
|  | 128 | return; | 
|  | 129 | } | 
|  | 130 |  | 
|  | 131 | __packet_hook(prev, skb2); | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | prev = p; | 
|  | 135 | } | 
|  | 136 | } | 
|  | 137 |  | 
|  | 138 | if (prev != NULL) { | 
|  | 139 | __packet_hook(prev, skb); | 
|  | 140 | return; | 
|  | 141 | } | 
|  | 142 |  | 
|  | 143 | kfree_skb(skb); | 
|  | 144 | } | 
|  | 145 |  | 
|  | 146 |  | 
|  | 147 | /* called with rcu_read_lock */ | 
|  | 148 | void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone) | 
|  | 149 | { | 
|  | 150 | br_flood(br, skb, clone, __br_deliver); | 
|  | 151 | } | 
|  | 152 |  | 
|  | 153 | /* called under bridge lock */ | 
|  | 154 | void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone) | 
|  | 155 | { | 
|  | 156 | br_flood(br, skb, clone, __br_forward); | 
|  | 157 | } |