net: No dst refcounting in ip_queue_xmit()
[safe/jmp/linux-2.6] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Authors:     Ross Biro
9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *              Donald Becker, <becker@super.org>
11  *              Alan Cox, <Alan.Cox@linux.org>
12  *              Richard Underwood
13  *              Stefan Becker, <stefanb@yello.ping.de>
14  *              Jorge Cwik, <jorge@laser.satlink.net>
15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *      See ip_input.c for original log
19  *
20  *      Fixes:
21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
23  *              Bradford Johnson:       Fix faulty handling of some frames when
24  *                                      no route is found.
25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
26  *                                      (in case if packet not accepted by
27  *                                      output firewall rules)
28  *              Mike McLagan    :       Routing by source
29  *              Alexey Kuznetsov:       use new route cache
30  *              Andi Kleen:             Fix broken PMTU recovery and remove
31  *                                      some redundant tests.
32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
35  *                                      for decreased register pressure on x86
36  *                                      and more readibility.
37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
38  *                                      silently drop skb instead of failing with -EPERM.
39  *              Detlev Wengorz  :       Copy protocol for fragments.
40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
41  *                                      datagrams.
42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
43  */
44
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83
84 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85
86 /* Generate a checksum for an outgoing IP datagram. */
87 __inline__ void ip_send_check(struct iphdr *iph)
88 {
89         iph->check = 0;
90         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91 }
92
93 int __ip_local_out(struct sk_buff *skb)
94 {
95         struct iphdr *iph = ip_hdr(skb);
96
97         iph->tot_len = htons(skb->len);
98         ip_send_check(iph);
99         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
100                        skb_dst(skb)->dev, dst_output);
101 }
102
103 int ip_local_out(struct sk_buff *skb)
104 {
105         int err;
106
107         err = __ip_local_out(skb);
108         if (likely(err == 1))
109                 err = dst_output(skb);
110
111         return err;
112 }
113 EXPORT_SYMBOL_GPL(ip_local_out);
114
115 /* dev_loopback_xmit for use with netfilter. */
116 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
117 {
118         skb_reset_mac_header(newskb);
119         __skb_pull(newskb, skb_network_offset(newskb));
120         newskb->pkt_type = PACKET_LOOPBACK;
121         newskb->ip_summed = CHECKSUM_UNNECESSARY;
122         WARN_ON(!skb_dst(newskb));
123         netif_rx_ni(newskb);
124         return 0;
125 }
126
127 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
128 {
129         int ttl = inet->uc_ttl;
130
131         if (ttl < 0)
132                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
133         return ttl;
134 }
135
136 /*
137  *              Add an ip header to a skbuff and send it out.
138  *
139  */
140 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
141                           __be32 saddr, __be32 daddr, struct ip_options *opt)
142 {
143         struct inet_sock *inet = inet_sk(sk);
144         struct rtable *rt = skb_rtable(skb);
145         struct iphdr *iph;
146
147         /* Build the IP header. */
148         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
149         skb_reset_network_header(skb);
150         iph = ip_hdr(skb);
151         iph->version  = 4;
152         iph->ihl      = 5;
153         iph->tos      = inet->tos;
154         if (ip_dont_fragment(sk, &rt->u.dst))
155                 iph->frag_off = htons(IP_DF);
156         else
157                 iph->frag_off = 0;
158         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
159         iph->daddr    = rt->rt_dst;
160         iph->saddr    = rt->rt_src;
161         iph->protocol = sk->sk_protocol;
162         ip_select_ident(iph, &rt->u.dst, sk);
163
164         if (opt && opt->optlen) {
165                 iph->ihl += opt->optlen>>2;
166                 ip_options_build(skb, opt, daddr, rt, 0);
167         }
168
169         skb->priority = sk->sk_priority;
170         skb->mark = sk->sk_mark;
171
172         /* Send it out. */
173         return ip_local_out(skb);
174 }
175
176 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177
178 static inline int ip_finish_output2(struct sk_buff *skb)
179 {
180         struct dst_entry *dst = skb_dst(skb);
181         struct rtable *rt = (struct rtable *)dst;
182         struct net_device *dev = dst->dev;
183         unsigned int hh_len = LL_RESERVED_SPACE(dev);
184
185         if (rt->rt_type == RTN_MULTICAST) {
186                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
187         } else if (rt->rt_type == RTN_BROADCAST)
188                 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
189
190         /* Be paranoid, rather than too clever. */
191         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
192                 struct sk_buff *skb2;
193
194                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
195                 if (skb2 == NULL) {
196                         kfree_skb(skb);
197                         return -ENOMEM;
198                 }
199                 if (skb->sk)
200                         skb_set_owner_w(skb2, skb->sk);
201                 kfree_skb(skb);
202                 skb = skb2;
203         }
204
205         if (dst->hh)
206                 return neigh_hh_output(dst->hh, skb);
207         else if (dst->neighbour)
208                 return dst->neighbour->output(skb);
209
210         if (net_ratelimit())
211                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
212         kfree_skb(skb);
213         return -EINVAL;
214 }
215
216 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217 {
218         struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219
220         return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
221                skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
222 }
223
224 static int ip_finish_output(struct sk_buff *skb)
225 {
226 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227         /* Policy lookup after SNAT yielded a new policy */
228         if (skb_dst(skb)->xfrm != NULL) {
229                 IPCB(skb)->flags |= IPSKB_REROUTED;
230                 return dst_output(skb);
231         }
232 #endif
233         if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
234                 return ip_fragment(skb, ip_finish_output2);
235         else
236                 return ip_finish_output2(skb);
237 }
238
239 int ip_mc_output(struct sk_buff *skb)
240 {
241         struct sock *sk = skb->sk;
242         struct rtable *rt = skb_rtable(skb);
243         struct net_device *dev = rt->u.dst.dev;
244
245         /*
246          *      If the indicated interface is up and running, send the packet.
247          */
248         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
249
250         skb->dev = dev;
251         skb->protocol = htons(ETH_P_IP);
252
253         /*
254          *      Multicasts are looped back for other local users
255          */
256
257         if (rt->rt_flags&RTCF_MULTICAST) {
258                 if (sk_mc_loop(sk)
259 #ifdef CONFIG_IP_MROUTE
260                 /* Small optimization: do not loopback not local frames,
261                    which returned after forwarding; they will be  dropped
262                    by ip_mr_input in any case.
263                    Note, that local frames are looped back to be delivered
264                    to local recipients.
265
266                    This check is duplicated in ip_mr_input at the moment.
267                  */
268                     &&
269                     ((rt->rt_flags & RTCF_LOCAL) ||
270                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
271 #endif
272                    ) {
273                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
274                         if (newskb)
275                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
276                                         newskb, NULL, newskb->dev,
277                                         ip_dev_loopback_xmit);
278                 }
279
280                 /* Multicasts with ttl 0 must not go beyond the host */
281
282                 if (ip_hdr(skb)->ttl == 0) {
283                         kfree_skb(skb);
284                         return 0;
285                 }
286         }
287
288         if (rt->rt_flags&RTCF_BROADCAST) {
289                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
290                 if (newskb)
291                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
292                                 NULL, newskb->dev, ip_dev_loopback_xmit);
293         }
294
295         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
296                             skb->dev, ip_finish_output,
297                             !(IPCB(skb)->flags & IPSKB_REROUTED));
298 }
299
300 int ip_output(struct sk_buff *skb)
301 {
302         struct net_device *dev = skb_dst(skb)->dev;
303
304         IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
305
306         skb->dev = dev;
307         skb->protocol = htons(ETH_P_IP);
308
309         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
310                             ip_finish_output,
311                             !(IPCB(skb)->flags & IPSKB_REROUTED));
312 }
313
314 int ip_queue_xmit(struct sk_buff *skb)
315 {
316         struct sock *sk = skb->sk;
317         struct inet_sock *inet = inet_sk(sk);
318         struct ip_options *opt = inet->opt;
319         struct rtable *rt;
320         struct iphdr *iph;
321         int res;
322
323         /* Skip all of this if the packet is already routed,
324          * f.e. by something like SCTP.
325          */
326         rcu_read_lock();
327         rt = skb_rtable(skb);
328         if (rt != NULL)
329                 goto packet_routed;
330
331         /* Make sure we can route this packet. */
332         rt = (struct rtable *)__sk_dst_check(sk, 0);
333         if (rt == NULL) {
334                 __be32 daddr;
335
336                 /* Use correct destination address if we have options. */
337                 daddr = inet->inet_daddr;
338                 if(opt && opt->srr)
339                         daddr = opt->faddr;
340
341                 {
342                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
343                                             .mark = sk->sk_mark,
344                                             .nl_u = { .ip4_u =
345                                                       { .daddr = daddr,
346                                                         .saddr = inet->inet_saddr,
347                                                         .tos = RT_CONN_FLAGS(sk) } },
348                                             .proto = sk->sk_protocol,
349                                             .flags = inet_sk_flowi_flags(sk),
350                                             .uli_u = { .ports =
351                                                        { .sport = inet->inet_sport,
352                                                          .dport = inet->inet_dport } } };
353
354                         /* If this fails, retransmit mechanism of transport layer will
355                          * keep trying until route appears or the connection times
356                          * itself out.
357                          */
358                         security_sk_classify_flow(sk, &fl);
359                         if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
360                                 goto no_route;
361                 }
362                 sk_setup_caps(sk, &rt->u.dst);
363         }
364         skb_dst_set_noref(skb, &rt->u.dst);
365
366 packet_routed:
367         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
368                 goto no_route;
369
370         /* OK, we know where to send it, allocate and build IP header. */
371         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
372         skb_reset_network_header(skb);
373         iph = ip_hdr(skb);
374         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
375         if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
376                 iph->frag_off = htons(IP_DF);
377         else
378                 iph->frag_off = 0;
379         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
380         iph->protocol = sk->sk_protocol;
381         iph->saddr    = rt->rt_src;
382         iph->daddr    = rt->rt_dst;
383         /* Transport layer set skb->h.foo itself. */
384
385         if (opt && opt->optlen) {
386                 iph->ihl += opt->optlen >> 2;
387                 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
388         }
389
390         ip_select_ident_more(iph, &rt->u.dst, sk,
391                              (skb_shinfo(skb)->gso_segs ?: 1) - 1);
392
393         skb->priority = sk->sk_priority;
394         skb->mark = sk->sk_mark;
395
396         res = ip_local_out(skb);
397         rcu_read_unlock();
398         return res;
399
400 no_route:
401         rcu_read_unlock();
402         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
403         kfree_skb(skb);
404         return -EHOSTUNREACH;
405 }
406
407
408 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
409 {
410         to->pkt_type = from->pkt_type;
411         to->priority = from->priority;
412         to->protocol = from->protocol;
413         skb_dst_drop(to);
414         skb_dst_set(to, dst_clone(skb_dst(from)));
415         to->dev = from->dev;
416         to->mark = from->mark;
417
418         /* Copy the flags to each fragment. */
419         IPCB(to)->flags = IPCB(from)->flags;
420
421 #ifdef CONFIG_NET_SCHED
422         to->tc_index = from->tc_index;
423 #endif
424         nf_copy(to, from);
425 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
426     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
427         to->nf_trace = from->nf_trace;
428 #endif
429 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
430         to->ipvs_property = from->ipvs_property;
431 #endif
432         skb_copy_secmark(to, from);
433 }
434
435 /*
436  *      This IP datagram is too large to be sent in one piece.  Break it up into
437  *      smaller pieces (each of size equal to IP header plus
438  *      a block of the data of the original IP data part) that will yet fit in a
439  *      single device frame, and queue such a frame for sending.
440  */
441
442 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
443 {
444         struct iphdr *iph;
445         int raw = 0;
446         int ptr;
447         struct net_device *dev;
448         struct sk_buff *skb2;
449         unsigned int mtu, hlen, left, len, ll_rs, pad;
450         int offset;
451         __be16 not_last_frag;
452         struct rtable *rt = skb_rtable(skb);
453         int err = 0;
454
455         dev = rt->u.dst.dev;
456
457         /*
458          *      Point into the IP datagram header.
459          */
460
461         iph = ip_hdr(skb);
462
463         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
464                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
465                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
466                           htonl(ip_skb_dst_mtu(skb)));
467                 kfree_skb(skb);
468                 return -EMSGSIZE;
469         }
470
471         /*
472          *      Setup starting values.
473          */
474
475         hlen = iph->ihl * 4;
476         mtu = dst_mtu(&rt->u.dst) - hlen;       /* Size of data space */
477 #ifdef CONFIG_BRIDGE_NETFILTER
478         if (skb->nf_bridge)
479                 mtu -= nf_bridge_mtu_reduction(skb);
480 #endif
481         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
482
483         /* When frag_list is given, use it. First, check its validity:
484          * some transformers could create wrong frag_list or break existing
485          * one, it is not prohibited. In this case fall back to copying.
486          *
487          * LATER: this step can be merged to real generation of fragments,
488          * we can switch to copy when see the first bad fragment.
489          */
490         if (skb_has_frags(skb)) {
491                 struct sk_buff *frag;
492                 int first_len = skb_pagelen(skb);
493                 int truesizes = 0;
494
495                 if (first_len - hlen > mtu ||
496                     ((first_len - hlen) & 7) ||
497                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
498                     skb_cloned(skb))
499                         goto slow_path;
500
501                 skb_walk_frags(skb, frag) {
502                         /* Correct geometry. */
503                         if (frag->len > mtu ||
504                             ((frag->len & 7) && frag->next) ||
505                             skb_headroom(frag) < hlen)
506                             goto slow_path;
507
508                         /* Partially cloned skb? */
509                         if (skb_shared(frag))
510                                 goto slow_path;
511
512                         BUG_ON(frag->sk);
513                         if (skb->sk) {
514                                 frag->sk = skb->sk;
515                                 frag->destructor = sock_wfree;
516                         }
517                         truesizes += frag->truesize;
518                 }
519
520                 /* Everything is OK. Generate! */
521
522                 err = 0;
523                 offset = 0;
524                 frag = skb_shinfo(skb)->frag_list;
525                 skb_frag_list_init(skb);
526                 skb->data_len = first_len - skb_headlen(skb);
527                 skb->truesize -= truesizes;
528                 skb->len = first_len;
529                 iph->tot_len = htons(first_len);
530                 iph->frag_off = htons(IP_MF);
531                 ip_send_check(iph);
532
533                 for (;;) {
534                         /* Prepare header of the next frame,
535                          * before previous one went down. */
536                         if (frag) {
537                                 frag->ip_summed = CHECKSUM_NONE;
538                                 skb_reset_transport_header(frag);
539                                 __skb_push(frag, hlen);
540                                 skb_reset_network_header(frag);
541                                 memcpy(skb_network_header(frag), iph, hlen);
542                                 iph = ip_hdr(frag);
543                                 iph->tot_len = htons(frag->len);
544                                 ip_copy_metadata(frag, skb);
545                                 if (offset == 0)
546                                         ip_options_fragment(frag);
547                                 offset += skb->len - hlen;
548                                 iph->frag_off = htons(offset>>3);
549                                 if (frag->next != NULL)
550                                         iph->frag_off |= htons(IP_MF);
551                                 /* Ready, complete checksum */
552                                 ip_send_check(iph);
553                         }
554
555                         err = output(skb);
556
557                         if (!err)
558                                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
559                         if (err || !frag)
560                                 break;
561
562                         skb = frag;
563                         frag = skb->next;
564                         skb->next = NULL;
565                 }
566
567                 if (err == 0) {
568                         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
569                         return 0;
570                 }
571
572                 while (frag) {
573                         skb = frag->next;
574                         kfree_skb(frag);
575                         frag = skb;
576                 }
577                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
578                 return err;
579         }
580
581 slow_path:
582         left = skb->len - hlen;         /* Space per frame */
583         ptr = raw + hlen;               /* Where to start from */
584
585         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
586          * we need to make room for the encapsulating header
587          */
588         pad = nf_bridge_pad(skb);
589         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
590         mtu -= pad;
591
592         /*
593          *      Fragment the datagram.
594          */
595
596         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
597         not_last_frag = iph->frag_off & htons(IP_MF);
598
599         /*
600          *      Keep copying data until we run out.
601          */
602
603         while (left > 0) {
604                 len = left;
605                 /* IF: it doesn't fit, use 'mtu' - the data space left */
606                 if (len > mtu)
607                         len = mtu;
608                 /* IF: we are not sending upto and including the packet end
609                    then align the next start on an eight byte boundary */
610                 if (len < left) {
611                         len &= ~7;
612                 }
613                 /*
614                  *      Allocate buffer.
615                  */
616
617                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
618                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
619                         err = -ENOMEM;
620                         goto fail;
621                 }
622
623                 /*
624                  *      Set up data on packet
625                  */
626
627                 ip_copy_metadata(skb2, skb);
628                 skb_reserve(skb2, ll_rs);
629                 skb_put(skb2, len + hlen);
630                 skb_reset_network_header(skb2);
631                 skb2->transport_header = skb2->network_header + hlen;
632
633                 /*
634                  *      Charge the memory for the fragment to any owner
635                  *      it might possess
636                  */
637
638                 if (skb->sk)
639                         skb_set_owner_w(skb2, skb->sk);
640
641                 /*
642                  *      Copy the packet header into the new buffer.
643                  */
644
645                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
646
647                 /*
648                  *      Copy a block of the IP datagram.
649                  */
650                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
651                         BUG();
652                 left -= len;
653
654                 /*
655                  *      Fill in the new header fields.
656                  */
657                 iph = ip_hdr(skb2);
658                 iph->frag_off = htons((offset >> 3));
659
660                 /* ANK: dirty, but effective trick. Upgrade options only if
661                  * the segment to be fragmented was THE FIRST (otherwise,
662                  * options are already fixed) and make it ONCE
663                  * on the initial skb, so that all the following fragments
664                  * will inherit fixed options.
665                  */
666                 if (offset == 0)
667                         ip_options_fragment(skb);
668
669                 /*
670                  *      Added AC : If we are fragmenting a fragment that's not the
671                  *                 last fragment then keep MF on each bit
672                  */
673                 if (left > 0 || not_last_frag)
674                         iph->frag_off |= htons(IP_MF);
675                 ptr += len;
676                 offset += len;
677
678                 /*
679                  *      Put this fragment into the sending queue.
680                  */
681                 iph->tot_len = htons(len + hlen);
682
683                 ip_send_check(iph);
684
685                 err = output(skb2);
686                 if (err)
687                         goto fail;
688
689                 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
690         }
691         kfree_skb(skb);
692         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
693         return err;
694
695 fail:
696         kfree_skb(skb);
697         IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
698         return err;
699 }
700
701 EXPORT_SYMBOL(ip_fragment);
702
703 int
704 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
705 {
706         struct iovec *iov = from;
707
708         if (skb->ip_summed == CHECKSUM_PARTIAL) {
709                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
710                         return -EFAULT;
711         } else {
712                 __wsum csum = 0;
713                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
714                         return -EFAULT;
715                 skb->csum = csum_block_add(skb->csum, csum, odd);
716         }
717         return 0;
718 }
719
720 static inline __wsum
721 csum_page(struct page *page, int offset, int copy)
722 {
723         char *kaddr;
724         __wsum csum;
725         kaddr = kmap(page);
726         csum = csum_partial(kaddr + offset, copy, 0);
727         kunmap(page);
728         return csum;
729 }
730
731 static inline int ip_ufo_append_data(struct sock *sk,
732                         int getfrag(void *from, char *to, int offset, int len,
733                                int odd, struct sk_buff *skb),
734                         void *from, int length, int hh_len, int fragheaderlen,
735                         int transhdrlen, int mtu, unsigned int flags)
736 {
737         struct sk_buff *skb;
738         int err;
739
740         /* There is support for UDP fragmentation offload by network
741          * device, so create one single skb packet containing complete
742          * udp datagram
743          */
744         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
745                 skb = sock_alloc_send_skb(sk,
746                         hh_len + fragheaderlen + transhdrlen + 20,
747                         (flags & MSG_DONTWAIT), &err);
748
749                 if (skb == NULL)
750                         return err;
751
752                 /* reserve space for Hardware header */
753                 skb_reserve(skb, hh_len);
754
755                 /* create space for UDP/IP header */
756                 skb_put(skb, fragheaderlen + transhdrlen);
757
758                 /* initialize network header pointer */
759                 skb_reset_network_header(skb);
760
761                 /* initialize protocol header pointer */
762                 skb->transport_header = skb->network_header + fragheaderlen;
763
764                 skb->ip_summed = CHECKSUM_PARTIAL;
765                 skb->csum = 0;
766                 sk->sk_sndmsg_off = 0;
767
768                 /* specify the length of each IP datagram fragment */
769                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
770                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
771                 __skb_queue_tail(&sk->sk_write_queue, skb);
772         }
773
774         return skb_append_datato_frags(sk, skb, getfrag, from,
775                                        (length - transhdrlen));
776 }
777
778 /*
779  *      ip_append_data() and ip_append_page() can make one large IP datagram
780  *      from many pieces of data. Each pieces will be holded on the socket
781  *      until ip_push_pending_frames() is called. Each piece can be a page
782  *      or non-page data.
783  *
784  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
785  *      this interface potentially.
786  *
787  *      LATER: length must be adjusted by pad at tail, when it is required.
788  */
789 int ip_append_data(struct sock *sk,
790                    int getfrag(void *from, char *to, int offset, int len,
791                                int odd, struct sk_buff *skb),
792                    void *from, int length, int transhdrlen,
793                    struct ipcm_cookie *ipc, struct rtable **rtp,
794                    unsigned int flags)
795 {
796         struct inet_sock *inet = inet_sk(sk);
797         struct sk_buff *skb;
798
799         struct ip_options *opt = NULL;
800         int hh_len;
801         int exthdrlen;
802         int mtu;
803         int copy;
804         int err;
805         int offset = 0;
806         unsigned int maxfraglen, fragheaderlen;
807         int csummode = CHECKSUM_NONE;
808         struct rtable *rt;
809
810         if (flags&MSG_PROBE)
811                 return 0;
812
813         if (skb_queue_empty(&sk->sk_write_queue)) {
814                 /*
815                  * setup for corking.
816                  */
817                 opt = ipc->opt;
818                 if (opt) {
819                         if (inet->cork.opt == NULL) {
820                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
821                                 if (unlikely(inet->cork.opt == NULL))
822                                         return -ENOBUFS;
823                         }
824                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
825                         inet->cork.flags |= IPCORK_OPT;
826                         inet->cork.addr = ipc->addr;
827                 }
828                 rt = *rtp;
829                 if (unlikely(!rt))
830                         return -EFAULT;
831                 /*
832                  * We steal reference to this route, caller should not release it
833                  */
834                 *rtp = NULL;
835                 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
836                                             rt->u.dst.dev->mtu :
837                                             dst_mtu(rt->u.dst.path);
838                 inet->cork.dst = &rt->u.dst;
839                 inet->cork.length = 0;
840                 sk->sk_sndmsg_page = NULL;
841                 sk->sk_sndmsg_off = 0;
842                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
843                         length += exthdrlen;
844                         transhdrlen += exthdrlen;
845                 }
846         } else {
847                 rt = (struct rtable *)inet->cork.dst;
848                 if (inet->cork.flags & IPCORK_OPT)
849                         opt = inet->cork.opt;
850
851                 transhdrlen = 0;
852                 exthdrlen = 0;
853                 mtu = inet->cork.fragsize;
854         }
855         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
856
857         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
858         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
859
860         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
861                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
862                                mtu-exthdrlen);
863                 return -EMSGSIZE;
864         }
865
866         /*
867          * transhdrlen > 0 means that this is the first fragment and we wish
868          * it won't be fragmented in the future.
869          */
870         if (transhdrlen &&
871             length + fragheaderlen <= mtu &&
872             rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
873             !exthdrlen)
874                 csummode = CHECKSUM_PARTIAL;
875
876         inet->cork.length += length;
877         if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
878             (sk->sk_protocol == IPPROTO_UDP) &&
879             (rt->u.dst.dev->features & NETIF_F_UFO)) {
880                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
881                                          fragheaderlen, transhdrlen, mtu,
882                                          flags);
883                 if (err)
884                         goto error;
885                 return 0;
886         }
887
888         /* So, what's going on in the loop below?
889          *
890          * We use calculated fragment length to generate chained skb,
891          * each of segments is IP fragment ready for sending to network after
892          * adding appropriate IP header.
893          */
894
895         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
896                 goto alloc_new_skb;
897
898         while (length > 0) {
899                 /* Check if the remaining data fits into current packet. */
900                 copy = mtu - skb->len;
901                 if (copy < length)
902                         copy = maxfraglen - skb->len;
903                 if (copy <= 0) {
904                         char *data;
905                         unsigned int datalen;
906                         unsigned int fraglen;
907                         unsigned int fraggap;
908                         unsigned int alloclen;
909                         struct sk_buff *skb_prev;
910 alloc_new_skb:
911                         skb_prev = skb;
912                         if (skb_prev)
913                                 fraggap = skb_prev->len - maxfraglen;
914                         else
915                                 fraggap = 0;
916
917                         /*
918                          * If remaining data exceeds the mtu,
919                          * we know we need more fragment(s).
920                          */
921                         datalen = length + fraggap;
922                         if (datalen > mtu - fragheaderlen)
923                                 datalen = maxfraglen - fragheaderlen;
924                         fraglen = datalen + fragheaderlen;
925
926                         if ((flags & MSG_MORE) &&
927                             !(rt->u.dst.dev->features&NETIF_F_SG))
928                                 alloclen = mtu;
929                         else
930                                 alloclen = datalen + fragheaderlen;
931
932                         /* The last fragment gets additional space at tail.
933                          * Note, with MSG_MORE we overallocate on fragments,
934                          * because we have no idea what fragment will be
935                          * the last.
936                          */
937                         if (datalen == length + fraggap)
938                                 alloclen += rt->u.dst.trailer_len;
939
940                         if (transhdrlen) {
941                                 skb = sock_alloc_send_skb(sk,
942                                                 alloclen + hh_len + 15,
943                                                 (flags & MSG_DONTWAIT), &err);
944                         } else {
945                                 skb = NULL;
946                                 if (atomic_read(&sk->sk_wmem_alloc) <=
947                                     2 * sk->sk_sndbuf)
948                                         skb = sock_wmalloc(sk,
949                                                            alloclen + hh_len + 15, 1,
950                                                            sk->sk_allocation);
951                                 if (unlikely(skb == NULL))
952                                         err = -ENOBUFS;
953                                 else
954                                         /* only the initial fragment is
955                                            time stamped */
956                                         ipc->shtx.flags = 0;
957                         }
958                         if (skb == NULL)
959                                 goto error;
960
961                         /*
962                          *      Fill in the control structures
963                          */
964                         skb->ip_summed = csummode;
965                         skb->csum = 0;
966                         skb_reserve(skb, hh_len);
967                         *skb_tx(skb) = ipc->shtx;
968
969                         /*
970                          *      Find where to start putting bytes.
971                          */
972                         data = skb_put(skb, fraglen);
973                         skb_set_network_header(skb, exthdrlen);
974                         skb->transport_header = (skb->network_header +
975                                                  fragheaderlen);
976                         data += fragheaderlen;
977
978                         if (fraggap) {
979                                 skb->csum = skb_copy_and_csum_bits(
980                                         skb_prev, maxfraglen,
981                                         data + transhdrlen, fraggap, 0);
982                                 skb_prev->csum = csum_sub(skb_prev->csum,
983                                                           skb->csum);
984                                 data += fraggap;
985                                 pskb_trim_unique(skb_prev, maxfraglen);
986                         }
987
988                         copy = datalen - transhdrlen - fraggap;
989                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
990                                 err = -EFAULT;
991                                 kfree_skb(skb);
992                                 goto error;
993                         }
994
995                         offset += copy;
996                         length -= datalen - fraggap;
997                         transhdrlen = 0;
998                         exthdrlen = 0;
999                         csummode = CHECKSUM_NONE;
1000
1001                         /*
1002                          * Put the packet on the pending queue.
1003                          */
1004                         __skb_queue_tail(&sk->sk_write_queue, skb);
1005                         continue;
1006                 }
1007
1008                 if (copy > length)
1009                         copy = length;
1010
1011                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1012                         unsigned int off;
1013
1014                         off = skb->len;
1015                         if (getfrag(from, skb_put(skb, copy),
1016                                         offset, copy, off, skb) < 0) {
1017                                 __skb_trim(skb, off);
1018                                 err = -EFAULT;
1019                                 goto error;
1020                         }
1021                 } else {
1022                         int i = skb_shinfo(skb)->nr_frags;
1023                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1024                         struct page *page = sk->sk_sndmsg_page;
1025                         int off = sk->sk_sndmsg_off;
1026                         unsigned int left;
1027
1028                         if (page && (left = PAGE_SIZE - off) > 0) {
1029                                 if (copy >= left)
1030                                         copy = left;
1031                                 if (page != frag->page) {
1032                                         if (i == MAX_SKB_FRAGS) {
1033                                                 err = -EMSGSIZE;
1034                                                 goto error;
1035                                         }
1036                                         get_page(page);
1037                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1038                                         frag = &skb_shinfo(skb)->frags[i];
1039                                 }
1040                         } else if (i < MAX_SKB_FRAGS) {
1041                                 if (copy > PAGE_SIZE)
1042                                         copy = PAGE_SIZE;
1043                                 page = alloc_pages(sk->sk_allocation, 0);
1044                                 if (page == NULL)  {
1045                                         err = -ENOMEM;
1046                                         goto error;
1047                                 }
1048                                 sk->sk_sndmsg_page = page;
1049                                 sk->sk_sndmsg_off = 0;
1050
1051                                 skb_fill_page_desc(skb, i, page, 0, 0);
1052                                 frag = &skb_shinfo(skb)->frags[i];
1053                         } else {
1054                                 err = -EMSGSIZE;
1055                                 goto error;
1056                         }
1057                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1058                                 err = -EFAULT;
1059                                 goto error;
1060                         }
1061                         sk->sk_sndmsg_off += copy;
1062                         frag->size += copy;
1063                         skb->len += copy;
1064                         skb->data_len += copy;
1065                         skb->truesize += copy;
1066                         atomic_add(copy, &sk->sk_wmem_alloc);
1067                 }
1068                 offset += copy;
1069                 length -= copy;
1070         }
1071
1072         return 0;
1073
1074 error:
1075         inet->cork.length -= length;
1076         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1077         return err;
1078 }
1079
1080 ssize_t ip_append_page(struct sock *sk, struct page *page,
1081                        int offset, size_t size, int flags)
1082 {
1083         struct inet_sock *inet = inet_sk(sk);
1084         struct sk_buff *skb;
1085         struct rtable *rt;
1086         struct ip_options *opt = NULL;
1087         int hh_len;
1088         int mtu;
1089         int len;
1090         int err;
1091         unsigned int maxfraglen, fragheaderlen, fraggap;
1092
1093         if (inet->hdrincl)
1094                 return -EPERM;
1095
1096         if (flags&MSG_PROBE)
1097                 return 0;
1098
1099         if (skb_queue_empty(&sk->sk_write_queue))
1100                 return -EINVAL;
1101
1102         rt = (struct rtable *)inet->cork.dst;
1103         if (inet->cork.flags & IPCORK_OPT)
1104                 opt = inet->cork.opt;
1105
1106         if (!(rt->u.dst.dev->features&NETIF_F_SG))
1107                 return -EOPNOTSUPP;
1108
1109         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1110         mtu = inet->cork.fragsize;
1111
1112         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1113         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1114
1115         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1116                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
1117                 return -EMSGSIZE;
1118         }
1119
1120         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1121                 return -EINVAL;
1122
1123         inet->cork.length += size;
1124         if ((sk->sk_protocol == IPPROTO_UDP) &&
1125             (rt->u.dst.dev->features & NETIF_F_UFO)) {
1126                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1127                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1128         }
1129
1130
1131         while (size > 0) {
1132                 int i;
1133
1134                 if (skb_is_gso(skb))
1135                         len = size;
1136                 else {
1137
1138                         /* Check if the remaining data fits into current packet. */
1139                         len = mtu - skb->len;
1140                         if (len < size)
1141                                 len = maxfraglen - skb->len;
1142                 }
1143                 if (len <= 0) {
1144                         struct sk_buff *skb_prev;
1145                         int alloclen;
1146
1147                         skb_prev = skb;
1148                         fraggap = skb_prev->len - maxfraglen;
1149
1150                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1151                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1152                         if (unlikely(!skb)) {
1153                                 err = -ENOBUFS;
1154                                 goto error;
1155                         }
1156
1157                         /*
1158                          *      Fill in the control structures
1159                          */
1160                         skb->ip_summed = CHECKSUM_NONE;
1161                         skb->csum = 0;
1162                         skb_reserve(skb, hh_len);
1163
1164                         /*
1165                          *      Find where to start putting bytes.
1166                          */
1167                         skb_put(skb, fragheaderlen + fraggap);
1168                         skb_reset_network_header(skb);
1169                         skb->transport_header = (skb->network_header +
1170                                                  fragheaderlen);
1171                         if (fraggap) {
1172                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1173                                                                    maxfraglen,
1174                                                     skb_transport_header(skb),
1175                                                                    fraggap, 0);
1176                                 skb_prev->csum = csum_sub(skb_prev->csum,
1177                                                           skb->csum);
1178                                 pskb_trim_unique(skb_prev, maxfraglen);
1179                         }
1180
1181                         /*
1182                          * Put the packet on the pending queue.
1183                          */
1184                         __skb_queue_tail(&sk->sk_write_queue, skb);
1185                         continue;
1186                 }
1187
1188                 i = skb_shinfo(skb)->nr_frags;
1189                 if (len > size)
1190                         len = size;
1191                 if (skb_can_coalesce(skb, i, page, offset)) {
1192                         skb_shinfo(skb)->frags[i-1].size += len;
1193                 } else if (i < MAX_SKB_FRAGS) {
1194                         get_page(page);
1195                         skb_fill_page_desc(skb, i, page, offset, len);
1196                 } else {
1197                         err = -EMSGSIZE;
1198                         goto error;
1199                 }
1200
1201                 if (skb->ip_summed == CHECKSUM_NONE) {
1202                         __wsum csum;
1203                         csum = csum_page(page, offset, len);
1204                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1205                 }
1206
1207                 skb->len += len;
1208                 skb->data_len += len;
1209                 skb->truesize += len;
1210                 atomic_add(len, &sk->sk_wmem_alloc);
1211                 offset += len;
1212                 size -= len;
1213         }
1214         return 0;
1215
1216 error:
1217         inet->cork.length -= size;
1218         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1219         return err;
1220 }
1221
1222 static void ip_cork_release(struct inet_sock *inet)
1223 {
1224         inet->cork.flags &= ~IPCORK_OPT;
1225         kfree(inet->cork.opt);
1226         inet->cork.opt = NULL;
1227         dst_release(inet->cork.dst);
1228         inet->cork.dst = NULL;
1229 }
1230
1231 /*
1232  *      Combined all pending IP fragments on the socket as one IP datagram
1233  *      and push them out.
1234  */
1235 int ip_push_pending_frames(struct sock *sk)
1236 {
1237         struct sk_buff *skb, *tmp_skb;
1238         struct sk_buff **tail_skb;
1239         struct inet_sock *inet = inet_sk(sk);
1240         struct net *net = sock_net(sk);
1241         struct ip_options *opt = NULL;
1242         struct rtable *rt = (struct rtable *)inet->cork.dst;
1243         struct iphdr *iph;
1244         __be16 df = 0;
1245         __u8 ttl;
1246         int err = 0;
1247
1248         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1249                 goto out;
1250         tail_skb = &(skb_shinfo(skb)->frag_list);
1251
1252         /* move skb->data to ip header from ext header */
1253         if (skb->data < skb_network_header(skb))
1254                 __skb_pull(skb, skb_network_offset(skb));
1255         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1256                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1257                 *tail_skb = tmp_skb;
1258                 tail_skb = &(tmp_skb->next);
1259                 skb->len += tmp_skb->len;
1260                 skb->data_len += tmp_skb->len;
1261                 skb->truesize += tmp_skb->truesize;
1262                 tmp_skb->destructor = NULL;
1263                 tmp_skb->sk = NULL;
1264         }
1265
1266         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1267          * to fragment the frame generated here. No matter, what transforms
1268          * how transforms change size of the packet, it will come out.
1269          */
1270         if (inet->pmtudisc < IP_PMTUDISC_DO)
1271                 skb->local_df = 1;
1272
1273         /* DF bit is set when we want to see DF on outgoing frames.
1274          * If local_df is set too, we still allow to fragment this frame
1275          * locally. */
1276         if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1277             (skb->len <= dst_mtu(&rt->u.dst) &&
1278              ip_dont_fragment(sk, &rt->u.dst)))
1279                 df = htons(IP_DF);
1280
1281         if (inet->cork.flags & IPCORK_OPT)
1282                 opt = inet->cork.opt;
1283
1284         if (rt->rt_type == RTN_MULTICAST)
1285                 ttl = inet->mc_ttl;
1286         else
1287                 ttl = ip_select_ttl(inet, &rt->u.dst);
1288
1289         iph = (struct iphdr *)skb->data;
1290         iph->version = 4;
1291         iph->ihl = 5;
1292         if (opt) {
1293                 iph->ihl += opt->optlen>>2;
1294                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1295         }
1296         iph->tos = inet->tos;
1297         iph->frag_off = df;
1298         ip_select_ident(iph, &rt->u.dst, sk);
1299         iph->ttl = ttl;
1300         iph->protocol = sk->sk_protocol;
1301         iph->saddr = rt->rt_src;
1302         iph->daddr = rt->rt_dst;
1303
1304         skb->priority = sk->sk_priority;
1305         skb->mark = sk->sk_mark;
1306         /*
1307          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1308          * on dst refcount
1309          */
1310         inet->cork.dst = NULL;
1311         skb_dst_set(skb, &rt->u.dst);
1312
1313         if (iph->protocol == IPPROTO_ICMP)
1314                 icmp_out_count(net, ((struct icmphdr *)
1315                         skb_transport_header(skb))->type);
1316
1317         /* Netfilter gets whole the not fragmented skb. */
1318         err = ip_local_out(skb);
1319         if (err) {
1320                 if (err > 0)
1321                         err = net_xmit_errno(err);
1322                 if (err)
1323                         goto error;
1324         }
1325
1326 out:
1327         ip_cork_release(inet);
1328         return err;
1329
1330 error:
1331         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1332         goto out;
1333 }
1334
1335 /*
1336  *      Throw away all pending data on the socket.
1337  */
1338 void ip_flush_pending_frames(struct sock *sk)
1339 {
1340         struct sk_buff *skb;
1341
1342         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1343                 kfree_skb(skb);
1344
1345         ip_cork_release(inet_sk(sk));
1346 }
1347
1348
1349 /*
1350  *      Fetch data from kernel space and fill in checksum if needed.
1351  */
1352 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1353                               int len, int odd, struct sk_buff *skb)
1354 {
1355         __wsum csum;
1356
1357         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1358         skb->csum = csum_block_add(skb->csum, csum, odd);
1359         return 0;
1360 }
1361
1362 /*
1363  *      Generic function to send a packet as reply to another packet.
1364  *      Used to send TCP resets so far. ICMP should use this function too.
1365  *
1366  *      Should run single threaded per socket because it uses the sock
1367  *      structure to pass arguments.
1368  */
1369 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1370                    unsigned int len)
1371 {
1372         struct inet_sock *inet = inet_sk(sk);
1373         struct {
1374                 struct ip_options       opt;
1375                 char                    data[40];
1376         } replyopts;
1377         struct ipcm_cookie ipc;
1378         __be32 daddr;
1379         struct rtable *rt = skb_rtable(skb);
1380
1381         if (ip_options_echo(&replyopts.opt, skb))
1382                 return;
1383
1384         daddr = ipc.addr = rt->rt_src;
1385         ipc.opt = NULL;
1386         ipc.shtx.flags = 0;
1387
1388         if (replyopts.opt.optlen) {
1389                 ipc.opt = &replyopts.opt;
1390
1391                 if (ipc.opt->srr)
1392                         daddr = replyopts.opt.faddr;
1393         }
1394
1395         {
1396                 struct flowi fl = { .oif = arg->bound_dev_if,
1397                                     .nl_u = { .ip4_u =
1398                                               { .daddr = daddr,
1399                                                 .saddr = rt->rt_spec_dst,
1400                                                 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1401                                     /* Not quite clean, but right. */
1402                                     .uli_u = { .ports =
1403                                                { .sport = tcp_hdr(skb)->dest,
1404                                                  .dport = tcp_hdr(skb)->source } },
1405                                     .proto = sk->sk_protocol,
1406                                     .flags = ip_reply_arg_flowi_flags(arg) };
1407                 security_skb_classify_flow(skb, &fl);
1408                 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1409                         return;
1410         }
1411
1412         /* And let IP do all the hard work.
1413
1414            This chunk is not reenterable, hence spinlock.
1415            Note that it uses the fact, that this function is called
1416            with locally disabled BH and that sk cannot be already spinlocked.
1417          */
1418         bh_lock_sock(sk);
1419         inet->tos = ip_hdr(skb)->tos;
1420         sk->sk_priority = skb->priority;
1421         sk->sk_protocol = ip_hdr(skb)->protocol;
1422         sk->sk_bound_dev_if = arg->bound_dev_if;
1423         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1424                        &ipc, &rt, MSG_DONTWAIT);
1425         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1426                 if (arg->csumoffset >= 0)
1427                         *((__sum16 *)skb_transport_header(skb) +
1428                           arg->csumoffset) = csum_fold(csum_add(skb->csum,
1429                                                                 arg->csum));
1430                 skb->ip_summed = CHECKSUM_NONE;
1431                 ip_push_pending_frames(sk);
1432         }
1433
1434         bh_unlock_sock(sk);
1435
1436         ip_rt_put(rt);
1437 }
1438
1439 void __init ip_init(void)
1440 {
1441         ip_rt_init();
1442         inet_initpeers();
1443
1444 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1445         igmp_mc_proc_init();
1446 #endif
1447 }
1448
1449 EXPORT_SYMBOL(ip_generic_getfrag);
1450 EXPORT_SYMBOL(ip_queue_xmit);
1451 EXPORT_SYMBOL(ip_send_check);