blob: d979710684b24b28b6e2cd232e0cff6f7661f7e4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090023 * Bradford Johnson: Fix faulty handling of some frames when
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +090034 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45#include <asm/uaccess.h>
46#include <asm/system.h>
47#include <linux/module.h>
48#include <linux/types.h>
49#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <linux/mm.h>
51#include <linux/string.h>
52#include <linux/errno.h>
Al Viroa1f8e7f72006-10-19 16:08:53 -040053#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090054#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include <linux/socket.h>
57#include <linux/sockios.h>
58#include <linux/in.h>
59#include <linux/inet.h>
60#include <linux/netdevice.h>
61#include <linux/etherdevice.h>
62#include <linux/proc_fs.h>
63#include <linux/stat.h>
64#include <linux/init.h>
65
66#include <net/snmp.h>
67#include <net/ip.h>
68#include <net/protocol.h>
69#include <net/route.h>
Patrick McHardycfacb052006-01-08 22:36:54 -080070#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/skbuff.h>
72#include <net/sock.h>
73#include <net/arp.h>
74#include <net/icmp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#include <net/checksum.h>
76#include <net/inetpeer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/igmp.h>
78#include <linux/netfilter_ipv4.h>
79#include <linux/netfilter_bridge.h>
80#include <linux/mroute.h>
81#include <linux/netlink.h>
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -070082#include <linux/tcp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Brian Haleyab32ea52006-09-22 14:15:41 -070084int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86/* Generate a checksum for an outgoing IP datagram. */
87__inline__ void ip_send_check(struct iphdr *iph)
88{
89 iph->check = 0;
90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91}
92
Herbert Xuc439cb22008-01-11 19:14:00 -080093int __ip_local_out(struct sk_buff *skb)
94{
95 struct iphdr *iph = ip_hdr(skb);
96
97 iph->tot_len = htons(skb->len);
98 ip_send_check(iph);
Jan Engelhardt9bbc7682010-03-23 04:07:29 +010099 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
100 skb_dst(skb)->dev, dst_output);
Herbert Xuc439cb22008-01-11 19:14:00 -0800101}
102
103int ip_local_out(struct sk_buff *skb)
104{
105 int err;
106
107 err = __ip_local_out(skb);
108 if (likely(err == 1))
109 err = dst_output(skb);
110
111 return err;
112}
113EXPORT_SYMBOL_GPL(ip_local_out);
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115/* dev_loopback_xmit for use with netfilter. */
116static int ip_dev_loopback_xmit(struct sk_buff *newskb)
117{
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -0700118 skb_reset_mac_header(newskb);
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300119 __skb_pull(newskb, skb_network_offset(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 newskb->pkt_type = PACKET_LOOPBACK;
121 newskb->ip_summed = CHECKSUM_UNNECESSARY;
Eric Dumazetadf30902009-06-02 05:19:30 +0000122 WARN_ON(!skb_dst(newskb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 netif_rx(newskb);
124 return 0;
125}
126
127static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
128{
129 int ttl = inet->uc_ttl;
130
131 if (ttl < 0)
132 ttl = dst_metric(dst, RTAX_HOPLIMIT);
133 return ttl;
134}
135
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900136/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 * Add an ip header to a skbuff and send it out.
138 *
139 */
140int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
Al Viro13d8eaa2006-09-26 22:27:30 -0700141 __be32 saddr, __be32 daddr, struct ip_options *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
143 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet511c3f92009-06-02 05:14:27 +0000144 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 struct iphdr *iph;
146
147 /* Build the IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300148 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
149 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700150 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 iph->version = 4;
152 iph->ihl = 5;
153 iph->tos = inet->tos;
154 if (ip_dont_fragment(sk, &rt->u.dst))
155 iph->frag_off = htons(IP_DF);
156 else
157 iph->frag_off = 0;
158 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
159 iph->daddr = rt->rt_dst;
160 iph->saddr = rt->rt_src;
161 iph->protocol = sk->sk_protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 if (opt && opt->optlen) {
165 iph->ihl += opt->optlen>>2;
166 ip_options_build(skb, opt, daddr, rt, 0);
167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800170 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172 /* Send it out. */
Herbert Xuc439cb22008-01-11 19:14:00 -0800173 return ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
Arnaldo Carvalho de Melod8c97a92005-08-09 20:12:12 -0700176EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static inline int ip_finish_output2(struct sk_buff *skb)
179{
Eric Dumazetadf30902009-06-02 05:19:30 +0000180 struct dst_entry *dst = skb_dst(skb);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700181 struct rtable *rt = (struct rtable *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 struct net_device *dev = dst->dev;
Chuck Leverc2636b42007-10-23 21:07:32 -0700183 unsigned int hh_len = LL_RESERVED_SPACE(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Neil Hormanedf391f2009-04-27 02:45:02 -0700185 if (rt->rt_type == RTN_MULTICAST) {
186 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
187 } else if (rt->rt_type == RTN_BROADCAST)
188 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
Mitsuru Chinen80787eb2007-04-30 00:48:20 -0700189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 /* Be paranoid, rather than too clever. */
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700191 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 struct sk_buff *skb2;
193
194 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
195 if (skb2 == NULL) {
196 kfree_skb(skb);
197 return -ENOMEM;
198 }
199 if (skb->sk)
200 skb_set_owner_w(skb2, skb->sk);
201 kfree_skb(skb);
202 skb = skb2;
203 }
204
Stephen Hemminger3644f0c2006-12-07 15:08:17 -0800205 if (dst->hh)
206 return neigh_hh_output(dst->hh, skb);
207 else if (dst->neighbour)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 return dst->neighbour->output(skb);
209
210 if (net_ratelimit())
211 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212 kfree_skb(skb);
213 return -EINVAL;
214}
215
John Heffner628a5c52007-04-20 15:53:27 -0700216static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217{
218 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219
220 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
Eric Dumazetadf30902009-06-02 05:19:30 +0000221 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
John Heffner628a5c52007-04-20 15:53:27 -0700222}
223
Patrick McHardy861d0482007-10-15 01:48:39 -0700224static int ip_finish_output(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225{
Patrick McHardy5c901da2006-01-06 23:05:36 -0800226#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227 /* Policy lookup after SNAT yielded a new policy */
Eric Dumazetadf30902009-06-02 05:19:30 +0000228 if (skb_dst(skb)->xfrm != NULL) {
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800229 IPCB(skb)->flags |= IPSKB_REROUTED;
230 return dst_output(skb);
231 }
Patrick McHardy5c901da2006-01-06 23:05:36 -0800232#endif
John Heffner628a5c52007-04-20 15:53:27 -0700233 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800234 return ip_fragment(skb, ip_finish_output2);
235 else
236 return ip_finish_output2(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237}
238
239int ip_mc_output(struct sk_buff *skb)
240{
241 struct sock *sk = skb->sk;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000242 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 struct net_device *dev = rt->u.dst.dev;
244
245 /*
246 * If the indicated interface is up and running, send the packet.
247 */
Neil Hormanedf391f2009-04-27 02:45:02 -0700248 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250 skb->dev = dev;
251 skb->protocol = htons(ETH_P_IP);
252
253 /*
254 * Multicasts are looped back for other local users
255 */
256
257 if (rt->rt_flags&RTCF_MULTICAST) {
Octavian Purdila7ad68482010-01-06 20:37:01 -0800258 if (sk_mc_loop(sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259#ifdef CONFIG_IP_MROUTE
260 /* Small optimization: do not loopback not local frames,
261 which returned after forwarding; they will be dropped
262 by ip_mr_input in any case.
263 Note, that local frames are looped back to be delivered
264 to local recipients.
265
266 This check is duplicated in ip_mr_input at the moment.
267 */
Joe Perches9d4fb272009-11-23 10:41:23 -0800268 &&
269 ((rt->rt_flags & RTCF_LOCAL) ||
270 !(IPCB(skb)->flags & IPSKB_FORWARDED))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271#endif
Joe Perches9d4fb272009-11-23 10:41:23 -0800272 ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
274 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100275 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
276 newskb, NULL, newskb->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 ip_dev_loopback_xmit);
278 }
279
280 /* Multicasts with ttl 0 must not go beyond the host */
281
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700282 if (ip_hdr(skb)->ttl == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 kfree_skb(skb);
284 return 0;
285 }
286 }
287
288 if (rt->rt_flags&RTCF_BROADCAST) {
289 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
290 if (newskb)
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100291 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
292 NULL, newskb->dev, ip_dev_loopback_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 }
294
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100295 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
296 skb->dev, ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800297 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300int ip_output(struct sk_buff *skb)
301{
Eric Dumazetadf30902009-06-02 05:19:30 +0000302 struct net_device *dev = skb_dst(skb)->dev;
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800303
Neil Hormanedf391f2009-04-27 02:45:02 -0700304 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Patrick McHardy1bd9bef2006-01-05 12:20:59 -0800306 skb->dev = dev;
307 skb->protocol = htons(ETH_P_IP);
308
Jan Engelhardt9bbc7682010-03-23 04:07:29 +0100309 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900310 ip_finish_output,
Patrick McHardy48d5cad2006-02-15 15:10:22 -0800311 !(IPCB(skb)->flags & IPSKB_REROUTED));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
Shan Wei4e15ed42010-04-15 16:43:08 +0000314int ip_queue_xmit(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
David S. Millere89862f2007-01-26 01:04:55 -0800316 struct sock *sk = skb->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 struct inet_sock *inet = inet_sk(sk);
318 struct ip_options *opt = inet->opt;
319 struct rtable *rt;
320 struct iphdr *iph;
321
322 /* Skip all of this if the packet is already routed,
323 * f.e. by something like SCTP.
324 */
Eric Dumazet511c3f92009-06-02 05:14:27 +0000325 rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 if (rt != NULL)
327 goto packet_routed;
328
329 /* Make sure we can route this packet. */
330 rt = (struct rtable *)__sk_dst_check(sk, 0);
331 if (rt == NULL) {
Al Viro3ca3c682006-09-27 18:28:07 -0700332 __be32 daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334 /* Use correct destination address if we have options. */
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000335 daddr = inet->inet_daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if(opt && opt->srr)
337 daddr = opt->faddr;
338
339 {
340 struct flowi fl = { .oif = sk->sk_bound_dev_if,
Atis Elsts914a9ab2009-10-01 15:16:49 -0700341 .mark = sk->sk_mark,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 .nl_u = { .ip4_u =
343 { .daddr = daddr,
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000344 .saddr = inet->inet_saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 .tos = RT_CONN_FLAGS(sk) } },
346 .proto = sk->sk_protocol,
KOVACS Krisztian86b08d862008-10-01 07:44:42 -0700347 .flags = inet_sk_flowi_flags(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 .uli_u = { .ports =
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000349 { .sport = inet->inet_sport,
350 .dport = inet->inet_dport } } };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 /* If this fails, retransmit mechanism of transport layer will
353 * keep trying until route appears or the connection times
354 * itself out.
355 */
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700356 security_sk_classify_flow(sk, &fl);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900357 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 goto no_route;
359 }
Arnaldo Carvalho de Melo6cbb0df2005-08-09 19:49:02 -0700360 sk_setup_caps(sk, &rt->u.dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
Eric Dumazetadf30902009-06-02 05:19:30 +0000362 skb_dst_set(skb, dst_clone(&rt->u.dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364packet_routed:
365 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
366 goto no_route;
367
368 /* OK, we know where to send it, allocate and build IP header. */
Arnaldo Carvalho de Melo8856dfa2007-03-10 19:40:39 -0300369 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
370 skb_reset_network_header(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700371 iph = ip_hdr(skb);
Al Viro714e85b2006-11-14 20:51:49 -0800372 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
Shan Wei4e15ed42010-04-15 16:43:08 +0000373 if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 iph->frag_off = htons(IP_DF);
375 else
376 iph->frag_off = 0;
377 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
378 iph->protocol = sk->sk_protocol;
379 iph->saddr = rt->rt_src;
380 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 /* Transport layer set skb->h.foo itself. */
382
383 if (opt && opt->optlen) {
384 iph->ihl += opt->optlen >> 2;
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000385 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
387
Herbert Xu89f5f0a2005-11-08 09:41:56 -0800388 ip_select_ident_more(iph, &rt->u.dst, sk,
Herbert Xu79671682006-06-22 02:40:14 -0700389 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -0800392 skb->mark = sk->sk_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Herbert Xuc439cb22008-01-11 19:14:00 -0800394 return ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
396no_route:
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700397 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 kfree_skb(skb);
399 return -EHOSTUNREACH;
400}
401
402
403static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
404{
405 to->pkt_type = from->pkt_type;
406 to->priority = from->priority;
407 to->protocol = from->protocol;
Eric Dumazetadf30902009-06-02 05:19:30 +0000408 skb_dst_drop(to);
409 skb_dst_set(to, dst_clone(skb_dst(from)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 to->dev = from->dev;
Thomas Graf82e91ff2006-11-09 15:19:14 -0800411 to->mark = from->mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
413 /* Copy the flags to each fragment. */
414 IPCB(to)->flags = IPCB(from)->flags;
415
416#ifdef CONFIG_NET_SCHED
417 to->tc_index = from->tc_index;
418#endif
Yasuyuki Kozakaie7ac05f2007-03-14 16:44:01 -0700419 nf_copy(to, from);
Jozsef Kadlecsikba9dda32007-07-07 22:21:23 -0700420#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
421 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
422 to->nf_trace = from->nf_trace;
423#endif
Julian Anastasovc98d80e2005-10-22 13:39:21 +0300424#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
425 to->ipvs_property = from->ipvs_property;
426#endif
James Morris984bc162006-06-09 00:29:17 -0700427 skb_copy_secmark(to, from);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
430/*
431 * This IP datagram is too large to be sent in one piece. Break it up into
432 * smaller pieces (each of size equal to IP header plus
433 * a block of the data of the original IP data part) that will yet fit in a
434 * single device frame, and queue such a frame for sending.
435 */
436
Jianjun Kongd9319102008-11-03 00:23:42 -0800437int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438{
439 struct iphdr *iph;
440 int raw = 0;
441 int ptr;
442 struct net_device *dev;
443 struct sk_buff *skb2;
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700444 unsigned int mtu, hlen, left, len, ll_rs, pad;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 int offset;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -0800446 __be16 not_last_frag;
Eric Dumazet511c3f92009-06-02 05:14:27 +0000447 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 int err = 0;
449
450 dev = rt->u.dst.dev;
451
452 /*
453 * Point into the IP datagram header.
454 */
455
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700456 iph = ip_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457
458 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700459 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
John Heffner628a5c52007-04-20 15:53:27 -0700461 htonl(ip_skb_dst_mtu(skb)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 kfree_skb(skb);
463 return -EMSGSIZE;
464 }
465
466 /*
467 * Setup starting values.
468 */
469
470 hlen = iph->ihl * 4;
471 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
Bart De Schuymer6c79bf02010-04-20 16:22:01 +0200472#ifdef CONFIG_BRIDGE_NETFILTER
473 if (skb->nf_bridge)
474 mtu -= nf_bridge_mtu_reduction(skb);
475#endif
Herbert Xu89cee8b2005-12-13 23:14:27 -0800476 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 /* When frag_list is given, use it. First, check its validity:
479 * some transformers could create wrong frag_list or break existing
480 * one, it is not prohibited. In this case fall back to copying.
481 *
482 * LATER: this step can be merged to real generation of fragments,
483 * we can switch to copy when see the first bad fragment.
484 */
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700485 if (skb_has_frags(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 struct sk_buff *frag;
487 int first_len = skb_pagelen(skb);
Herbert Xu29ffe1a2008-01-28 20:45:20 -0800488 int truesizes = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 if (first_len - hlen > mtu ||
491 ((first_len - hlen) & 7) ||
492 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
493 skb_cloned(skb))
494 goto slow_path;
495
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700496 skb_walk_frags(skb, frag) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 /* Correct geometry. */
498 if (frag->len > mtu ||
499 ((frag->len & 7) && frag->next) ||
500 skb_headroom(frag) < hlen)
501 goto slow_path;
502
503 /* Partially cloned skb? */
504 if (skb_shared(frag))
505 goto slow_path;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700506
507 BUG_ON(frag->sk);
508 if (skb->sk) {
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700509 frag->sk = skb->sk;
510 frag->destructor = sock_wfree;
Herbert Xu2fdba6b2005-05-18 22:52:33 -0700511 }
Patrick McHardyb2722b12009-12-01 15:53:57 -0800512 truesizes += frag->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 }
514
515 /* Everything is OK. Generate! */
516
517 err = 0;
518 offset = 0;
519 frag = skb_shinfo(skb)->frag_list;
David S. Millerd7fcf1a2009-06-09 00:19:37 -0700520 skb_frag_list_init(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 skb->data_len = first_len - skb_headlen(skb);
Herbert Xu29ffe1a2008-01-28 20:45:20 -0800522 skb->truesize -= truesizes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 skb->len = first_len;
524 iph->tot_len = htons(first_len);
525 iph->frag_off = htons(IP_MF);
526 ip_send_check(iph);
527
528 for (;;) {
529 /* Prepare header of the next frame,
530 * before previous one went down. */
531 if (frag) {
532 frag->ip_summed = CHECKSUM_NONE;
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -0300533 skb_reset_transport_header(frag);
Arnaldo Carvalho de Meloe2d1bca2007-04-10 20:46:21 -0700534 __skb_push(frag, hlen);
535 skb_reset_network_header(frag);
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700536 memcpy(skb_network_header(frag), iph, hlen);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700537 iph = ip_hdr(frag);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 iph->tot_len = htons(frag->len);
539 ip_copy_metadata(frag, skb);
540 if (offset == 0)
541 ip_options_fragment(frag);
542 offset += skb->len - hlen;
543 iph->frag_off = htons(offset>>3);
544 if (frag->next != NULL)
545 iph->frag_off |= htons(IP_MF);
546 /* Ready, complete checksum */
547 ip_send_check(iph);
548 }
549
550 err = output(skb);
551
Wei Dongdafee492006-08-02 13:41:21 -0700552 if (!err)
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700553 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 if (err || !frag)
555 break;
556
557 skb = frag;
558 frag = skb->next;
559 skb->next = NULL;
560 }
561
562 if (err == 0) {
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700563 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 return 0;
565 }
566
567 while (frag) {
568 skb = frag->next;
569 kfree_skb(frag);
570 frag = skb;
571 }
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700572 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 return err;
574 }
575
576slow_path:
577 left = skb->len - hlen; /* Space per frame */
578 ptr = raw + hlen; /* Where to start from */
579
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
Stephen Hemminger9bcfcaf2006-08-29 17:48:57 -0700581 * we need to make room for the encapsulating header
582 */
583 pad = nf_bridge_pad(skb);
584 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
585 mtu -= pad;
586
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 /*
588 * Fragment the datagram.
589 */
590
591 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
592 not_last_frag = iph->frag_off & htons(IP_MF);
593
594 /*
595 * Keep copying data until we run out.
596 */
597
Stephen Hemminger132adf52007-03-08 20:44:43 -0800598 while (left > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 len = left;
600 /* IF: it doesn't fit, use 'mtu' - the data space left */
601 if (len > mtu)
602 len = mtu;
603 /* IF: we are not sending upto and including the packet end
604 then align the next start on an eight byte boundary */
605 if (len < left) {
606 len &= ~7;
607 }
608 /*
609 * Allocate buffer.
610 */
611
612 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
Patrick McHardy64ce2072005-08-09 20:50:53 -0700613 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 err = -ENOMEM;
615 goto fail;
616 }
617
618 /*
619 * Set up data on packet
620 */
621
622 ip_copy_metadata(skb2, skb);
623 skb_reserve(skb2, ll_rs);
624 skb_put(skb2, len + hlen);
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700625 skb_reset_network_header(skb2);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700626 skb2->transport_header = skb2->network_header + hlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 /*
629 * Charge the memory for the fragment to any owner
630 * it might possess
631 */
632
633 if (skb->sk)
634 skb_set_owner_w(skb2, skb->sk);
635
636 /*
637 * Copy the packet header into the new buffer.
638 */
639
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300640 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
642 /*
643 * Copy a block of the IP datagram.
644 */
Arnaldo Carvalho de Melobff9b612007-03-16 17:19:57 -0300645 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 BUG();
647 left -= len;
648
649 /*
650 * Fill in the new header fields.
651 */
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700652 iph = ip_hdr(skb2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 iph->frag_off = htons((offset >> 3));
654
655 /* ANK: dirty, but effective trick. Upgrade options only if
656 * the segment to be fragmented was THE FIRST (otherwise,
657 * options are already fixed) and make it ONCE
658 * on the initial skb, so that all the following fragments
659 * will inherit fixed options.
660 */
661 if (offset == 0)
662 ip_options_fragment(skb);
663
664 /*
665 * Added AC : If we are fragmenting a fragment that's not the
666 * last fragment then keep MF on each bit
667 */
668 if (left > 0 || not_last_frag)
669 iph->frag_off |= htons(IP_MF);
670 ptr += len;
671 offset += len;
672
673 /*
674 * Put this fragment into the sending queue.
675 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 iph->tot_len = htons(len + hlen);
677
678 ip_send_check(iph);
679
680 err = output(skb2);
681 if (err)
682 goto fail;
Wei Dongdafee492006-08-02 13:41:21 -0700683
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700684 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 }
686 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700687 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return err;
689
690fail:
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900691 kfree_skb(skb);
Pavel Emelyanov5e38e272008-07-16 20:19:49 -0700692 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return err;
694}
695
Patrick McHardy2e2f7ae2006-04-04 13:42:35 -0700696EXPORT_SYMBOL(ip_fragment);
697
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698int
699ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
700{
701 struct iovec *iov = from;
702
Patrick McHardy84fa7932006-08-29 16:44:56 -0700703 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
705 return -EFAULT;
706 } else {
Al Viro44bb9362006-11-14 21:36:14 -0800707 __wsum csum = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
709 return -EFAULT;
710 skb->csum = csum_block_add(skb->csum, csum, odd);
711 }
712 return 0;
713}
714
Al Viro44bb9362006-11-14 21:36:14 -0800715static inline __wsum
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716csum_page(struct page *page, int offset, int copy)
717{
718 char *kaddr;
Al Viro44bb9362006-11-14 21:36:14 -0800719 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 kaddr = kmap(page);
721 csum = csum_partial(kaddr + offset, copy, 0);
722 kunmap(page);
723 return csum;
724}
725
Adrian Bunk4b30b1c2005-11-29 16:27:20 -0800726static inline int ip_ufo_append_data(struct sock *sk,
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700727 int getfrag(void *from, char *to, int offset, int len,
728 int odd, struct sk_buff *skb),
729 void *from, int length, int hh_len, int fragheaderlen,
Jianjun Kongd9319102008-11-03 00:23:42 -0800730 int transhdrlen, int mtu, unsigned int flags)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700731{
732 struct sk_buff *skb;
733 int err;
734
735 /* There is support for UDP fragmentation offload by network
736 * device, so create one single skb packet containing complete
737 * udp datagram
738 */
739 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
740 skb = sock_alloc_send_skb(sk,
741 hh_len + fragheaderlen + transhdrlen + 20,
742 (flags & MSG_DONTWAIT), &err);
743
744 if (skb == NULL)
745 return err;
746
747 /* reserve space for Hardware header */
748 skb_reserve(skb, hh_len);
749
750 /* create space for UDP/IP header */
Jianjun Kongd9319102008-11-03 00:23:42 -0800751 skb_put(skb, fragheaderlen + transhdrlen);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700752
753 /* initialize network header pointer */
Arnaldo Carvalho de Meloc1d2bbe2007-04-10 20:45:18 -0700754 skb_reset_network_header(skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700755
756 /* initialize protocol header pointer */
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700757 skb->transport_header = skb->network_header + fragheaderlen;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700758
Patrick McHardy84fa7932006-08-29 16:44:56 -0700759 skb->ip_summed = CHECKSUM_PARTIAL;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700760 skb->csum = 0;
761 sk->sk_sndmsg_off = 0;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700762
Kostya Bbe9164e2008-04-29 22:36:30 -0700763 /* specify the length of each IP datagram fragment */
Herbert Xu79671682006-06-22 02:40:14 -0700764 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700765 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700766 __skb_queue_tail(&sk->sk_write_queue, skb);
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700767 }
Kostya Bbe9164e2008-04-29 22:36:30 -0700768
769 return skb_append_datato_frags(sk, skb, getfrag, from,
770 (length - transhdrlen));
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700771}
772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773/*
774 * ip_append_data() and ip_append_page() can make one large IP datagram
775 * from many pieces of data. Each pieces will be holded on the socket
776 * until ip_push_pending_frames() is called. Each piece can be a page
777 * or non-page data.
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900778 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 * Not only UDP, other transport protocols - e.g. raw sockets - can use
780 * this interface potentially.
781 *
782 * LATER: length must be adjusted by pad at tail, when it is required.
783 */
784int ip_append_data(struct sock *sk,
785 int getfrag(void *from, char *to, int offset, int len,
786 int odd, struct sk_buff *skb),
787 void *from, int length, int transhdrlen,
Eric Dumazet2e77d892008-11-24 15:52:46 -0800788 struct ipcm_cookie *ipc, struct rtable **rtp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 unsigned int flags)
790{
791 struct inet_sock *inet = inet_sk(sk);
792 struct sk_buff *skb;
793
794 struct ip_options *opt = NULL;
795 int hh_len;
796 int exthdrlen;
797 int mtu;
798 int copy;
799 int err;
800 int offset = 0;
801 unsigned int maxfraglen, fragheaderlen;
802 int csummode = CHECKSUM_NONE;
Eric Dumazet2e77d892008-11-24 15:52:46 -0800803 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
805 if (flags&MSG_PROBE)
806 return 0;
807
808 if (skb_queue_empty(&sk->sk_write_queue)) {
809 /*
810 * setup for corking.
811 */
812 opt = ipc->opt;
813 if (opt) {
814 if (inet->cork.opt == NULL) {
815 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
816 if (unlikely(inet->cork.opt == NULL))
817 return -ENOBUFS;
818 }
819 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
820 inet->cork.flags |= IPCORK_OPT;
821 inet->cork.addr = ipc->addr;
822 }
Eric Dumazet2e77d892008-11-24 15:52:46 -0800823 rt = *rtp;
Julien TINNES788d9082009-08-27 15:26:58 +0200824 if (unlikely(!rt))
825 return -EFAULT;
Eric Dumazet2e77d892008-11-24 15:52:46 -0800826 /*
827 * We steal reference to this route, caller should not release it
828 */
829 *rtp = NULL;
John Heffner628a5c52007-04-20 15:53:27 -0700830 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
831 rt->u.dst.dev->mtu :
832 dst_mtu(rt->u.dst.path);
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -0400833 inet->cork.dst = &rt->u.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 inet->cork.length = 0;
835 sk->sk_sndmsg_page = NULL;
836 sk->sk_sndmsg_off = 0;
837 if ((exthdrlen = rt->u.dst.header_len) != 0) {
838 length += exthdrlen;
839 transhdrlen += exthdrlen;
840 }
841 } else {
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -0400842 rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 if (inet->cork.flags & IPCORK_OPT)
844 opt = inet->cork.opt;
845
846 transhdrlen = 0;
847 exthdrlen = 0;
848 mtu = inet->cork.fragsize;
849 }
850 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
851
852 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
853 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
854
855 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +0000856 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
857 mtu-exthdrlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return -EMSGSIZE;
859 }
860
861 /*
862 * transhdrlen > 0 means that this is the first fragment and we wish
863 * it won't be fragmented in the future.
864 */
865 if (transhdrlen &&
866 length + fragheaderlen <= mtu &&
Stephen Hemmingerd212f872007-06-27 00:47:37 -0700867 rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 !exthdrlen)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700869 csummode = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 inet->cork.length += length;
Kostya Bbe9164e2008-04-29 22:36:30 -0700872 if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
873 (sk->sk_protocol == IPPROTO_UDP) &&
874 (rt->u.dst.dev->features & NETIF_F_UFO)) {
Patrick McHardybaa829d2006-03-12 20:35:12 -0800875 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
876 fragheaderlen, transhdrlen, mtu,
877 flags);
878 if (err)
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700879 goto error;
Ananda Rajue89e9cf2005-10-18 15:46:41 -0700880 return 0;
881 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883 /* So, what's going on in the loop below?
884 *
885 * We use calculated fragment length to generate chained skb,
886 * each of segments is IP fragment ready for sending to network after
887 * adding appropriate IP header.
888 */
889
890 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
891 goto alloc_new_skb;
892
893 while (length > 0) {
894 /* Check if the remaining data fits into current packet. */
895 copy = mtu - skb->len;
896 if (copy < length)
897 copy = maxfraglen - skb->len;
898 if (copy <= 0) {
899 char *data;
900 unsigned int datalen;
901 unsigned int fraglen;
902 unsigned int fraggap;
903 unsigned int alloclen;
904 struct sk_buff *skb_prev;
905alloc_new_skb:
906 skb_prev = skb;
907 if (skb_prev)
908 fraggap = skb_prev->len - maxfraglen;
909 else
910 fraggap = 0;
911
912 /*
913 * If remaining data exceeds the mtu,
914 * we know we need more fragment(s).
915 */
916 datalen = length + fraggap;
917 if (datalen > mtu - fragheaderlen)
918 datalen = maxfraglen - fragheaderlen;
919 fraglen = datalen + fragheaderlen;
920
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900921 if ((flags & MSG_MORE) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 !(rt->u.dst.dev->features&NETIF_F_SG))
923 alloclen = mtu;
924 else
925 alloclen = datalen + fragheaderlen;
926
927 /* The last fragment gets additional space at tail.
928 * Note, with MSG_MORE we overallocate on fragments,
929 * because we have no idea what fragment will be
930 * the last.
931 */
Zach Brown3d9dd752006-04-14 16:04:18 -0700932 if (datalen == length + fraggap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 alloclen += rt->u.dst.trailer_len;
934
935 if (transhdrlen) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900936 skb = sock_alloc_send_skb(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 alloclen + hh_len + 15,
938 (flags & MSG_DONTWAIT), &err);
939 } else {
940 skb = NULL;
941 if (atomic_read(&sk->sk_wmem_alloc) <=
942 2 * sk->sk_sndbuf)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900943 skb = sock_wmalloc(sk,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 alloclen + hh_len + 15, 1,
945 sk->sk_allocation);
946 if (unlikely(skb == NULL))
947 err = -ENOBUFS;
Patrick Ohly51f31ca2009-02-12 05:03:39 +0000948 else
949 /* only the initial fragment is
950 time stamped */
951 ipc->shtx.flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 }
953 if (skb == NULL)
954 goto error;
955
956 /*
957 * Fill in the control structures
958 */
959 skb->ip_summed = csummode;
960 skb->csum = 0;
961 skb_reserve(skb, hh_len);
Patrick Ohly51f31ca2009-02-12 05:03:39 +0000962 *skb_tx(skb) = ipc->shtx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964 /*
965 * Find where to start putting bytes.
966 */
967 data = skb_put(skb, fraglen);
Arnaldo Carvalho de Meloc14d2452007-03-11 22:39:41 -0300968 skb_set_network_header(skb, exthdrlen);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -0700969 skb->transport_header = (skb->network_header +
970 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 data += fragheaderlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 if (fraggap) {
974 skb->csum = skb_copy_and_csum_bits(
975 skb_prev, maxfraglen,
976 data + transhdrlen, fraggap, 0);
977 skb_prev->csum = csum_sub(skb_prev->csum,
978 skb->csum);
979 data += fraggap;
Herbert Xue9fa4f72006-08-13 20:12:58 -0700980 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982
983 copy = datalen - transhdrlen - fraggap;
984 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
985 err = -EFAULT;
986 kfree_skb(skb);
987 goto error;
988 }
989
990 offset += copy;
991 length -= datalen - fraggap;
992 transhdrlen = 0;
993 exthdrlen = 0;
994 csummode = CHECKSUM_NONE;
995
996 /*
997 * Put the packet on the pending queue.
998 */
999 __skb_queue_tail(&sk->sk_write_queue, skb);
1000 continue;
1001 }
1002
1003 if (copy > length)
1004 copy = length;
1005
1006 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1007 unsigned int off;
1008
1009 off = skb->len;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001010 if (getfrag(from, skb_put(skb, copy),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 offset, copy, off, skb) < 0) {
1012 __skb_trim(skb, off);
1013 err = -EFAULT;
1014 goto error;
1015 }
1016 } else {
1017 int i = skb_shinfo(skb)->nr_frags;
1018 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1019 struct page *page = sk->sk_sndmsg_page;
1020 int off = sk->sk_sndmsg_off;
1021 unsigned int left;
1022
1023 if (page && (left = PAGE_SIZE - off) > 0) {
1024 if (copy >= left)
1025 copy = left;
1026 if (page != frag->page) {
1027 if (i == MAX_SKB_FRAGS) {
1028 err = -EMSGSIZE;
1029 goto error;
1030 }
1031 get_page(page);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001032 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 frag = &skb_shinfo(skb)->frags[i];
1034 }
1035 } else if (i < MAX_SKB_FRAGS) {
1036 if (copy > PAGE_SIZE)
1037 copy = PAGE_SIZE;
1038 page = alloc_pages(sk->sk_allocation, 0);
1039 if (page == NULL) {
1040 err = -ENOMEM;
1041 goto error;
1042 }
1043 sk->sk_sndmsg_page = page;
1044 sk->sk_sndmsg_off = 0;
1045
1046 skb_fill_page_desc(skb, i, page, 0, 0);
1047 frag = &skb_shinfo(skb)->frags[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 } else {
1049 err = -EMSGSIZE;
1050 goto error;
1051 }
1052 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1053 err = -EFAULT;
1054 goto error;
1055 }
1056 sk->sk_sndmsg_off += copy;
1057 frag->size += copy;
1058 skb->len += copy;
1059 skb->data_len += copy;
Herbert Xuf945fa72008-01-22 22:39:26 -08001060 skb->truesize += copy;
1061 atomic_add(copy, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 }
1063 offset += copy;
1064 length -= copy;
1065 }
1066
1067 return 0;
1068
1069error:
1070 inet->cork.length -= length;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001071 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001072 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073}
1074
1075ssize_t ip_append_page(struct sock *sk, struct page *page,
1076 int offset, size_t size, int flags)
1077{
1078 struct inet_sock *inet = inet_sk(sk);
1079 struct sk_buff *skb;
1080 struct rtable *rt;
1081 struct ip_options *opt = NULL;
1082 int hh_len;
1083 int mtu;
1084 int len;
1085 int err;
1086 unsigned int maxfraglen, fragheaderlen, fraggap;
1087
1088 if (inet->hdrincl)
1089 return -EPERM;
1090
1091 if (flags&MSG_PROBE)
1092 return 0;
1093
1094 if (skb_queue_empty(&sk->sk_write_queue))
1095 return -EINVAL;
1096
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001097 rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 if (inet->cork.flags & IPCORK_OPT)
1099 opt = inet->cork.opt;
1100
1101 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1102 return -EOPNOTSUPP;
1103
1104 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1105 mtu = inet->cork.fragsize;
1106
1107 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1108 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1109
1110 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
Eric Dumazetc720c7e2009-10-15 06:30:45 +00001111 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 return -EMSGSIZE;
1113 }
1114
1115 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1116 return -EINVAL;
1117
1118 inet->cork.length += size;
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001119 if ((sk->sk_protocol == IPPROTO_UDP) &&
Herbert Xu79671682006-06-22 02:40:14 -07001120 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1121 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
Herbert Xuf83ef8c2006-06-30 13:37:03 -07001122 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
Herbert Xu79671682006-06-22 02:40:14 -07001123 }
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001124
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
1126 while (size > 0) {
1127 int i;
1128
Herbert Xu89114af2006-07-08 13:34:32 -07001129 if (skb_is_gso(skb))
Ananda Rajue89e9cf2005-10-18 15:46:41 -07001130 len = size;
1131 else {
1132
1133 /* Check if the remaining data fits into current packet. */
1134 len = mtu - skb->len;
1135 if (len < size)
1136 len = maxfraglen - skb->len;
1137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (len <= 0) {
1139 struct sk_buff *skb_prev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 int alloclen;
1141
1142 skb_prev = skb;
Jayachandran C0d0d2bb2005-10-13 11:43:02 -07001143 fraggap = skb_prev->len - maxfraglen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 alloclen = fragheaderlen + hh_len + fraggap + 15;
1146 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1147 if (unlikely(!skb)) {
1148 err = -ENOBUFS;
1149 goto error;
1150 }
1151
1152 /*
1153 * Fill in the control structures
1154 */
1155 skb->ip_summed = CHECKSUM_NONE;
1156 skb->csum = 0;
1157 skb_reserve(skb, hh_len);
1158
1159 /*
1160 * Find where to start putting bytes.
1161 */
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001162 skb_put(skb, fragheaderlen + fraggap);
Arnaldo Carvalho de Melo2ca9e6f2007-03-10 19:15:25 -03001163 skb_reset_network_header(skb);
Arnaldo Carvalho de Melob0e380b2007-04-10 21:21:55 -07001164 skb->transport_header = (skb->network_header +
1165 fragheaderlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 if (fraggap) {
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001167 skb->csum = skb_copy_and_csum_bits(skb_prev,
1168 maxfraglen,
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001169 skb_transport_header(skb),
Arnaldo Carvalho de Melo967b05f2007-03-13 13:51:52 -03001170 fraggap, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 skb_prev->csum = csum_sub(skb_prev->csum,
1172 skb->csum);
Herbert Xue9fa4f72006-08-13 20:12:58 -07001173 pskb_trim_unique(skb_prev, maxfraglen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 }
1175
1176 /*
1177 * Put the packet on the pending queue.
1178 */
1179 __skb_queue_tail(&sk->sk_write_queue, skb);
1180 continue;
1181 }
1182
1183 i = skb_shinfo(skb)->nr_frags;
1184 if (len > size)
1185 len = size;
1186 if (skb_can_coalesce(skb, i, page, offset)) {
1187 skb_shinfo(skb)->frags[i-1].size += len;
1188 } else if (i < MAX_SKB_FRAGS) {
1189 get_page(page);
1190 skb_fill_page_desc(skb, i, page, offset, len);
1191 } else {
1192 err = -EMSGSIZE;
1193 goto error;
1194 }
1195
1196 if (skb->ip_summed == CHECKSUM_NONE) {
Al Viro44bb9362006-11-14 21:36:14 -08001197 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 csum = csum_page(page, offset, len);
1199 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1200 }
1201
1202 skb->len += len;
1203 skb->data_len += len;
David S. Miller1e34a112008-01-22 23:44:31 -08001204 skb->truesize += len;
1205 atomic_add(len, &sk->sk_wmem_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 offset += len;
1207 size -= len;
1208 }
1209 return 0;
1210
1211error:
1212 inet->cork.length -= size;
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001213 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 return err;
1215}
1216
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001217static void ip_cork_release(struct inet_sock *inet)
1218{
1219 inet->cork.flags &= ~IPCORK_OPT;
1220 kfree(inet->cork.opt);
1221 inet->cork.opt = NULL;
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001222 dst_release(inet->cork.dst);
1223 inet->cork.dst = NULL;
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001224}
1225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226/*
1227 * Combined all pending IP fragments on the socket as one IP datagram
1228 * and push them out.
1229 */
1230int ip_push_pending_frames(struct sock *sk)
1231{
1232 struct sk_buff *skb, *tmp_skb;
1233 struct sk_buff **tail_skb;
1234 struct inet_sock *inet = inet_sk(sk);
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001235 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 struct ip_options *opt = NULL;
YOSHIFUJI Hideakic8cdaf92008-03-10 04:30:37 -04001237 struct rtable *rt = (struct rtable *)inet->cork.dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 struct iphdr *iph;
Alexey Dobriyan76ab6082006-01-06 13:24:29 -08001239 __be16 df = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 __u8 ttl;
1241 int err = 0;
1242
1243 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1244 goto out;
1245 tail_skb = &(skb_shinfo(skb)->frag_list);
1246
1247 /* move skb->data to ip header from ext header */
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -07001248 if (skb->data < skb_network_header(skb))
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03001249 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
Arnaldo Carvalho de Melocfe1fc72007-03-16 17:26:39 -03001251 __skb_pull(tmp_skb, skb_network_header_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 *tail_skb = tmp_skb;
1253 tail_skb = &(tmp_skb->next);
1254 skb->len += tmp_skb->len;
1255 skb->data_len += tmp_skb->len;
1256 skb->truesize += tmp_skb->truesize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 tmp_skb->destructor = NULL;
1258 tmp_skb->sk = NULL;
1259 }
1260
1261 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1262 * to fragment the frame generated here. No matter, what transforms
1263 * how transforms change size of the packet, it will come out.
1264 */
John Heffner628a5c52007-04-20 15:53:27 -07001265 if (inet->pmtudisc < IP_PMTUDISC_DO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 skb->local_df = 1;
1267
1268 /* DF bit is set when we want to see DF on outgoing frames.
1269 * If local_df is set too, we still allow to fragment this frame
1270 * locally. */
John Heffner628a5c52007-04-20 15:53:27 -07001271 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 (skb->len <= dst_mtu(&rt->u.dst) &&
1273 ip_dont_fragment(sk, &rt->u.dst)))
1274 df = htons(IP_DF);
1275
1276 if (inet->cork.flags & IPCORK_OPT)
1277 opt = inet->cork.opt;
1278
1279 if (rt->rt_type == RTN_MULTICAST)
1280 ttl = inet->mc_ttl;
1281 else
1282 ttl = ip_select_ttl(inet, &rt->u.dst);
1283
1284 iph = (struct iphdr *)skb->data;
1285 iph->version = 4;
1286 iph->ihl = 5;
1287 if (opt) {
1288 iph->ihl += opt->optlen>>2;
1289 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1290 }
1291 iph->tos = inet->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 iph->frag_off = df;
Alexey Kuznetsov1a55d572006-03-22 14:27:59 -08001293 ip_select_ident(iph, &rt->u.dst, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 iph->ttl = ttl;
1295 iph->protocol = sk->sk_protocol;
1296 iph->saddr = rt->rt_src;
1297 iph->daddr = rt->rt_dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 skb->priority = sk->sk_priority;
Laszlo Attila Toth4a19ec52008-01-30 19:08:16 -08001300 skb->mark = sk->sk_mark;
Eric Dumazeta21bba92008-11-24 16:07:50 -08001301 /*
1302 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1303 * on dst refcount
1304 */
1305 inet->cork.dst = NULL;
Eric Dumazetadf30902009-06-02 05:19:30 +00001306 skb_dst_set(skb, &rt->u.dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
David L Stevens96793b42007-09-17 09:57:33 -07001308 if (iph->protocol == IPPROTO_ICMP)
Pavel Emelyanov0388b002008-07-14 23:00:43 -07001309 icmp_out_count(net, ((struct icmphdr *)
David L Stevens96793b42007-09-17 09:57:33 -07001310 skb_transport_header(skb))->type);
1311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 /* Netfilter gets whole the not fragmented skb. */
Herbert Xuc439cb22008-01-11 19:14:00 -08001313 err = ip_local_out(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 if (err) {
1315 if (err > 0)
Eric Dumazet6ce9e7b2009-09-02 18:05:33 -07001316 err = net_xmit_errno(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 if (err)
1318 goto error;
1319 }
1320
1321out:
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001322 ip_cork_release(inet);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 return err;
1324
1325error:
Pavel Emelyanov5e38e272008-07-16 20:19:49 -07001326 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 goto out;
1328}
1329
1330/*
1331 * Throw away all pending data on the socket.
1332 */
1333void ip_flush_pending_frames(struct sock *sk)
1334{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 struct sk_buff *skb;
1336
1337 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1338 kfree_skb(skb);
1339
Pavel Emelyanov429f08e2007-11-05 21:03:24 -08001340 ip_cork_release(inet_sk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341}
1342
1343
1344/*
1345 * Fetch data from kernel space and fill in checksum if needed.
1346 */
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001347static int ip_reply_glue_bits(void *dptr, char *to, int offset,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 int len, int odd, struct sk_buff *skb)
1349{
Al Viro50842052006-11-14 21:36:34 -08001350 __wsum csum;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
1352 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1353 skb->csum = csum_block_add(skb->csum, csum, odd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001354 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355}
1356
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001357/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 * Generic function to send a packet as reply to another packet.
1359 * Used to send TCP resets so far. ICMP should use this function too.
1360 *
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001361 * Should run single threaded per socket because it uses the sock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 * structure to pass arguments.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 */
1364void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1365 unsigned int len)
1366{
1367 struct inet_sock *inet = inet_sk(sk);
1368 struct {
1369 struct ip_options opt;
1370 char data[40];
1371 } replyopts;
1372 struct ipcm_cookie ipc;
Al Viro3ca3c682006-09-27 18:28:07 -07001373 __be32 daddr;
Eric Dumazet511c3f92009-06-02 05:14:27 +00001374 struct rtable *rt = skb_rtable(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 if (ip_options_echo(&replyopts.opt, skb))
1377 return;
1378
1379 daddr = ipc.addr = rt->rt_src;
1380 ipc.opt = NULL;
Patrick Ohly51f31ca2009-02-12 05:03:39 +00001381 ipc.shtx.flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
1383 if (replyopts.opt.optlen) {
1384 ipc.opt = &replyopts.opt;
1385
1386 if (ipc.opt->srr)
1387 daddr = replyopts.opt.faddr;
1388 }
1389
1390 {
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001391 struct flowi fl = { .oif = arg->bound_dev_if,
1392 .nl_u = { .ip4_u =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 { .daddr = daddr,
1394 .saddr = rt->rt_spec_dst,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001395 .tos = RT_TOS(ip_hdr(skb)->tos) } },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 /* Not quite clean, but right. */
1397 .uli_u = { .ports =
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001398 { .sport = tcp_hdr(skb)->dest,
1399 .dport = tcp_hdr(skb)->source } },
KOVACS Krisztian86b08d862008-10-01 07:44:42 -07001400 .proto = sk->sk_protocol,
1401 .flags = ip_reply_arg_flowi_flags(arg) };
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -07001402 security_skb_classify_flow(skb, &fl);
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09001403 if (ip_route_output_key(sock_net(sk), &rt, &fl))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 return;
1405 }
1406
1407 /* And let IP do all the hard work.
1408
1409 This chunk is not reenterable, hence spinlock.
1410 Note that it uses the fact, that this function is called
1411 with locally disabled BH and that sk cannot be already spinlocked.
1412 */
1413 bh_lock_sock(sk);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001414 inet->tos = ip_hdr(skb)->tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 sk->sk_priority = skb->priority;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001416 sk->sk_protocol = ip_hdr(skb)->protocol;
Patrick McHardyf0e48db2007-06-04 21:32:46 -07001417 sk->sk_bound_dev_if = arg->bound_dev_if;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
Eric Dumazet2e77d892008-11-24 15:52:46 -08001419 &ipc, &rt, MSG_DONTWAIT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1421 if (arg->csumoffset >= 0)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001422 *((__sum16 *)skb_transport_header(skb) +
1423 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1424 arg->csum));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 skb->ip_summed = CHECKSUM_NONE;
1426 ip_push_pending_frames(sk);
1427 }
1428
1429 bh_unlock_sock(sk);
1430
1431 ip_rt_put(rt);
1432}
1433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434void __init ip_init(void)
1435{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 ip_rt_init();
1437 inet_initpeers();
1438
1439#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1440 igmp_mc_proc_init();
1441#endif
1442}
1443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444EXPORT_SYMBOL(ip_generic_getfrag);
1445EXPORT_SYMBOL(ip_queue_xmit);
1446EXPORT_SYMBOL(ip_send_check);