[NET]: Add NETIF_F_GEN_CSUM and NETIF_F_ALL_CSUM
[safe/jmp/linux-2.6] / net / ipv4 / ip_output.c
1 /*
2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
3  *              operating system.  INET is implemented using the  BSD Socket
4  *              interface as the means of communication with the user level.
5  *
6  *              The Internet Protocol (IP) output module.
7  *
8  * Version:     $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
9  *
10  * Authors:     Ross Biro
11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *              Donald Becker, <becker@super.org>
13  *              Alan Cox, <Alan.Cox@linux.org>
14  *              Richard Underwood
15  *              Stefan Becker, <stefanb@yello.ping.de>
16  *              Jorge Cwik, <jorge@laser.satlink.net>
17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18  *              Hirokazu Takahashi, <taka@valinux.co.jp>
19  *
20  *      See ip_input.c for original log
21  *
22  *      Fixes:
23  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
24  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
25  *              Bradford Johnson:       Fix faulty handling of some frames when 
26  *                                      no route is found.
27  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
28  *                                      (in case if packet not accepted by
29  *                                      output firewall rules)
30  *              Mike McLagan    :       Routing by source
31  *              Alexey Kuznetsov:       use new route cache
32  *              Andi Kleen:             Fix broken PMTU recovery and remove
33  *                                      some redundant tests.
34  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
35  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
36  *              Andi Kleen      :       Split fast and slow ip_build_xmit path 
37  *                                      for decreased register pressure on x86 
38  *                                      and more readibility. 
39  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
40  *                                      silently drop skb instead of failing with -EPERM.
41  *              Detlev Wengorz  :       Copy protocol for fragments.
42  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
43  *                                      datagrams.
44  *              Hirokazu Takahashi:     sendfile() on UDP works now.
45  */
46
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
52 #include <linux/sched.h>
53 #include <linux/mm.h>
54 #include <linux/string.h>
55 #include <linux/errno.h>
56 #include <linux/config.h>
57
58 #include <linux/socket.h>
59 #include <linux/sockios.h>
60 #include <linux/in.h>
61 #include <linux/inet.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/proc_fs.h>
65 #include <linux/stat.h>
66 #include <linux/init.h>
67
68 #include <net/snmp.h>
69 #include <net/ip.h>
70 #include <net/protocol.h>
71 #include <net/route.h>
72 #include <net/xfrm.h>
73 #include <linux/skbuff.h>
74 #include <net/sock.h>
75 #include <net/arp.h>
76 #include <net/icmp.h>
77 #include <net/checksum.h>
78 #include <net/inetpeer.h>
79 #include <net/checksum.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/netfilter_bridge.h>
83 #include <linux/mroute.h>
84 #include <linux/netlink.h>
85 #include <linux/tcp.h>
86
87 int sysctl_ip_default_ttl = IPDEFTTL;
88
89 /* Generate a checksum for an outgoing IP datagram. */
90 __inline__ void ip_send_check(struct iphdr *iph)
91 {
92         iph->check = 0;
93         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
94 }
95
96 /* dev_loopback_xmit for use with netfilter. */
97 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
98 {
99         newskb->mac.raw = newskb->data;
100         __skb_pull(newskb, newskb->nh.raw - newskb->data);
101         newskb->pkt_type = PACKET_LOOPBACK;
102         newskb->ip_summed = CHECKSUM_UNNECESSARY;
103         BUG_TRAP(newskb->dst);
104         netif_rx(newskb);
105         return 0;
106 }
107
108 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
109 {
110         int ttl = inet->uc_ttl;
111
112         if (ttl < 0)
113                 ttl = dst_metric(dst, RTAX_HOPLIMIT);
114         return ttl;
115 }
116
117 /* 
118  *              Add an ip header to a skbuff and send it out.
119  *
120  */
121 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
122                           u32 saddr, u32 daddr, struct ip_options *opt)
123 {
124         struct inet_sock *inet = inet_sk(sk);
125         struct rtable *rt = (struct rtable *)skb->dst;
126         struct iphdr *iph;
127
128         /* Build the IP header. */
129         if (opt)
130                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
131         else
132                 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
133
134         iph->version  = 4;
135         iph->ihl      = 5;
136         iph->tos      = inet->tos;
137         if (ip_dont_fragment(sk, &rt->u.dst))
138                 iph->frag_off = htons(IP_DF);
139         else
140                 iph->frag_off = 0;
141         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
142         iph->daddr    = rt->rt_dst;
143         iph->saddr    = rt->rt_src;
144         iph->protocol = sk->sk_protocol;
145         iph->tot_len  = htons(skb->len);
146         ip_select_ident(iph, &rt->u.dst, sk);
147         skb->nh.iph   = iph;
148
149         if (opt && opt->optlen) {
150                 iph->ihl += opt->optlen>>2;
151                 ip_options_build(skb, opt, daddr, rt, 0);
152         }
153         ip_send_check(iph);
154
155         skb->priority = sk->sk_priority;
156
157         /* Send it out. */
158         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
159                        dst_output);
160 }
161
162 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
163
164 static inline int ip_finish_output2(struct sk_buff *skb)
165 {
166         struct dst_entry *dst = skb->dst;
167         struct hh_cache *hh = dst->hh;
168         struct net_device *dev = dst->dev;
169         int hh_len = LL_RESERVED_SPACE(dev);
170
171         /* Be paranoid, rather than too clever. */
172         if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
173                 struct sk_buff *skb2;
174
175                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
176                 if (skb2 == NULL) {
177                         kfree_skb(skb);
178                         return -ENOMEM;
179                 }
180                 if (skb->sk)
181                         skb_set_owner_w(skb2, skb->sk);
182                 kfree_skb(skb);
183                 skb = skb2;
184         }
185
186         if (hh) {
187                 int hh_alen;
188
189                 read_lock_bh(&hh->hh_lock);
190                 hh_alen = HH_DATA_ALIGN(hh->hh_len);
191                 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
192                 read_unlock_bh(&hh->hh_lock);
193                 skb_push(skb, hh->hh_len);
194                 return hh->hh_output(skb);
195         } else if (dst->neighbour)
196                 return dst->neighbour->output(skb);
197
198         if (net_ratelimit())
199                 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
200         kfree_skb(skb);
201         return -EINVAL;
202 }
203
204 static inline int ip_finish_output(struct sk_buff *skb)
205 {
206 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
207         /* Policy lookup after SNAT yielded a new policy */
208         if (skb->dst->xfrm != NULL) {
209                 IPCB(skb)->flags |= IPSKB_REROUTED;
210                 return dst_output(skb);
211         }
212 #endif
213         if (skb->len > dst_mtu(skb->dst) &&
214             !(skb_shinfo(skb)->ufo_size || skb_shinfo(skb)->tso_size))
215                 return ip_fragment(skb, ip_finish_output2);
216         else
217                 return ip_finish_output2(skb);
218 }
219
220 int ip_mc_output(struct sk_buff *skb)
221 {
222         struct sock *sk = skb->sk;
223         struct rtable *rt = (struct rtable*)skb->dst;
224         struct net_device *dev = rt->u.dst.dev;
225
226         /*
227          *      If the indicated interface is up and running, send the packet.
228          */
229         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
230
231         skb->dev = dev;
232         skb->protocol = htons(ETH_P_IP);
233
234         /*
235          *      Multicasts are looped back for other local users
236          */
237
238         if (rt->rt_flags&RTCF_MULTICAST) {
239                 if ((!sk || inet_sk(sk)->mc_loop)
240 #ifdef CONFIG_IP_MROUTE
241                 /* Small optimization: do not loopback not local frames,
242                    which returned after forwarding; they will be  dropped
243                    by ip_mr_input in any case.
244                    Note, that local frames are looped back to be delivered
245                    to local recipients.
246
247                    This check is duplicated in ip_mr_input at the moment.
248                  */
249                     && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
250 #endif
251                 ) {
252                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
253                         if (newskb)
254                                 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
255                                         newskb->dev, 
256                                         ip_dev_loopback_xmit);
257                 }
258
259                 /* Multicasts with ttl 0 must not go beyond the host */
260
261                 if (skb->nh.iph->ttl == 0) {
262                         kfree_skb(skb);
263                         return 0;
264                 }
265         }
266
267         if (rt->rt_flags&RTCF_BROADCAST) {
268                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
269                 if (newskb)
270                         NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
271                                 newskb->dev, ip_dev_loopback_xmit);
272         }
273
274         return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, skb->dev,
275                             ip_finish_output,
276                             !(IPCB(skb)->flags & IPSKB_REROUTED));
277 }
278
279 int ip_output(struct sk_buff *skb)
280 {
281         struct net_device *dev = skb->dst->dev;
282
283         IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS);
284
285         skb->dev = dev;
286         skb->protocol = htons(ETH_P_IP);
287
288         return NF_HOOK_COND(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
289                             ip_finish_output,
290                             !(IPCB(skb)->flags & IPSKB_REROUTED));
291 }
292
293 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
294 {
295         struct sock *sk = skb->sk;
296         struct inet_sock *inet = inet_sk(sk);
297         struct ip_options *opt = inet->opt;
298         struct rtable *rt;
299         struct iphdr *iph;
300
301         /* Skip all of this if the packet is already routed,
302          * f.e. by something like SCTP.
303          */
304         rt = (struct rtable *) skb->dst;
305         if (rt != NULL)
306                 goto packet_routed;
307
308         /* Make sure we can route this packet. */
309         rt = (struct rtable *)__sk_dst_check(sk, 0);
310         if (rt == NULL) {
311                 u32 daddr;
312
313                 /* Use correct destination address if we have options. */
314                 daddr = inet->daddr;
315                 if(opt && opt->srr)
316                         daddr = opt->faddr;
317
318                 {
319                         struct flowi fl = { .oif = sk->sk_bound_dev_if,
320                                             .nl_u = { .ip4_u =
321                                                       { .daddr = daddr,
322                                                         .saddr = inet->saddr,
323                                                         .tos = RT_CONN_FLAGS(sk) } },
324                                             .proto = sk->sk_protocol,
325                                             .uli_u = { .ports =
326                                                        { .sport = inet->sport,
327                                                          .dport = inet->dport } } };
328
329                         /* If this fails, retransmit mechanism of transport layer will
330                          * keep trying until route appears or the connection times
331                          * itself out.
332                          */
333                         if (ip_route_output_flow(&rt, &fl, sk, 0))
334                                 goto no_route;
335                 }
336                 sk_setup_caps(sk, &rt->u.dst);
337         }
338         skb->dst = dst_clone(&rt->u.dst);
339
340 packet_routed:
341         if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
342                 goto no_route;
343
344         /* OK, we know where to send it, allocate and build IP header. */
345         iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
346         *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
347         iph->tot_len = htons(skb->len);
348         if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
349                 iph->frag_off = htons(IP_DF);
350         else
351                 iph->frag_off = 0;
352         iph->ttl      = ip_select_ttl(inet, &rt->u.dst);
353         iph->protocol = sk->sk_protocol;
354         iph->saddr    = rt->rt_src;
355         iph->daddr    = rt->rt_dst;
356         skb->nh.iph   = iph;
357         /* Transport layer set skb->h.foo itself. */
358
359         if (opt && opt->optlen) {
360                 iph->ihl += opt->optlen >> 2;
361                 ip_options_build(skb, opt, inet->daddr, rt, 0);
362         }
363
364         ip_select_ident_more(iph, &rt->u.dst, sk,
365                              (skb_shinfo(skb)->tso_segs ?: 1) - 1);
366
367         /* Add an IP checksum. */
368         ip_send_check(iph);
369
370         skb->priority = sk->sk_priority;
371
372         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
373                        dst_output);
374
375 no_route:
376         IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
377         kfree_skb(skb);
378         return -EHOSTUNREACH;
379 }
380
381
382 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
383 {
384         to->pkt_type = from->pkt_type;
385         to->priority = from->priority;
386         to->protocol = from->protocol;
387         dst_release(to->dst);
388         to->dst = dst_clone(from->dst);
389         to->dev = from->dev;
390
391         /* Copy the flags to each fragment. */
392         IPCB(to)->flags = IPCB(from)->flags;
393
394 #ifdef CONFIG_NET_SCHED
395         to->tc_index = from->tc_index;
396 #endif
397 #ifdef CONFIG_NETFILTER
398         to->nfmark = from->nfmark;
399         /* Connection association is same as pre-frag packet */
400         nf_conntrack_put(to->nfct);
401         to->nfct = from->nfct;
402         nf_conntrack_get(to->nfct);
403         to->nfctinfo = from->nfctinfo;
404 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
405         to->ipvs_property = from->ipvs_property;
406 #endif
407 #ifdef CONFIG_BRIDGE_NETFILTER
408         nf_bridge_put(to->nf_bridge);
409         to->nf_bridge = from->nf_bridge;
410         nf_bridge_get(to->nf_bridge);
411 #endif
412 #endif
413         skb_copy_secmark(to, from);
414 }
415
416 /*
417  *      This IP datagram is too large to be sent in one piece.  Break it up into
418  *      smaller pieces (each of size equal to IP header plus
419  *      a block of the data of the original IP data part) that will yet fit in a
420  *      single device frame, and queue such a frame for sending.
421  */
422
423 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
424 {
425         struct iphdr *iph;
426         int raw = 0;
427         int ptr;
428         struct net_device *dev;
429         struct sk_buff *skb2;
430         unsigned int mtu, hlen, left, len, ll_rs;
431         int offset;
432         __be16 not_last_frag;
433         struct rtable *rt = (struct rtable*)skb->dst;
434         int err = 0;
435
436         dev = rt->u.dst.dev;
437
438         /*
439          *      Point into the IP datagram header.
440          */
441
442         iph = skb->nh.iph;
443
444         if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
445                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
446                           htonl(dst_mtu(&rt->u.dst)));
447                 kfree_skb(skb);
448                 return -EMSGSIZE;
449         }
450
451         /*
452          *      Setup starting values.
453          */
454
455         hlen = iph->ihl * 4;
456         mtu = dst_mtu(&rt->u.dst) - hlen;       /* Size of data space */
457         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
458
459         /* When frag_list is given, use it. First, check its validity:
460          * some transformers could create wrong frag_list or break existing
461          * one, it is not prohibited. In this case fall back to copying.
462          *
463          * LATER: this step can be merged to real generation of fragments,
464          * we can switch to copy when see the first bad fragment.
465          */
466         if (skb_shinfo(skb)->frag_list) {
467                 struct sk_buff *frag;
468                 int first_len = skb_pagelen(skb);
469
470                 if (first_len - hlen > mtu ||
471                     ((first_len - hlen) & 7) ||
472                     (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
473                     skb_cloned(skb))
474                         goto slow_path;
475
476                 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
477                         /* Correct geometry. */
478                         if (frag->len > mtu ||
479                             ((frag->len & 7) && frag->next) ||
480                             skb_headroom(frag) < hlen)
481                             goto slow_path;
482
483                         /* Partially cloned skb? */
484                         if (skb_shared(frag))
485                                 goto slow_path;
486
487                         BUG_ON(frag->sk);
488                         if (skb->sk) {
489                                 sock_hold(skb->sk);
490                                 frag->sk = skb->sk;
491                                 frag->destructor = sock_wfree;
492                                 skb->truesize -= frag->truesize;
493                         }
494                 }
495
496                 /* Everything is OK. Generate! */
497
498                 err = 0;
499                 offset = 0;
500                 frag = skb_shinfo(skb)->frag_list;
501                 skb_shinfo(skb)->frag_list = NULL;
502                 skb->data_len = first_len - skb_headlen(skb);
503                 skb->len = first_len;
504                 iph->tot_len = htons(first_len);
505                 iph->frag_off = htons(IP_MF);
506                 ip_send_check(iph);
507
508                 for (;;) {
509                         /* Prepare header of the next frame,
510                          * before previous one went down. */
511                         if (frag) {
512                                 frag->ip_summed = CHECKSUM_NONE;
513                                 frag->h.raw = frag->data;
514                                 frag->nh.raw = __skb_push(frag, hlen);
515                                 memcpy(frag->nh.raw, iph, hlen);
516                                 iph = frag->nh.iph;
517                                 iph->tot_len = htons(frag->len);
518                                 ip_copy_metadata(frag, skb);
519                                 if (offset == 0)
520                                         ip_options_fragment(frag);
521                                 offset += skb->len - hlen;
522                                 iph->frag_off = htons(offset>>3);
523                                 if (frag->next != NULL)
524                                         iph->frag_off |= htons(IP_MF);
525                                 /* Ready, complete checksum */
526                                 ip_send_check(iph);
527                         }
528
529                         err = output(skb);
530
531                         if (err || !frag)
532                                 break;
533
534                         skb = frag;
535                         frag = skb->next;
536                         skb->next = NULL;
537                 }
538
539                 if (err == 0) {
540                         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
541                         return 0;
542                 }
543
544                 while (frag) {
545                         skb = frag->next;
546                         kfree_skb(frag);
547                         frag = skb;
548                 }
549                 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
550                 return err;
551         }
552
553 slow_path:
554         left = skb->len - hlen;         /* Space per frame */
555         ptr = raw + hlen;               /* Where to start from */
556
557 #ifdef CONFIG_BRIDGE_NETFILTER
558         /* for bridged IP traffic encapsulated inside f.e. a vlan header,
559          * we need to make room for the encapsulating header */
560         ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, nf_bridge_pad(skb));
561         mtu -= nf_bridge_pad(skb);
562 #else
563         ll_rs = LL_RESERVED_SPACE(rt->u.dst.dev);
564 #endif
565         /*
566          *      Fragment the datagram.
567          */
568
569         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
570         not_last_frag = iph->frag_off & htons(IP_MF);
571
572         /*
573          *      Keep copying data until we run out.
574          */
575
576         while(left > 0) {
577                 len = left;
578                 /* IF: it doesn't fit, use 'mtu' - the data space left */
579                 if (len > mtu)
580                         len = mtu;
581                 /* IF: we are not sending upto and including the packet end
582                    then align the next start on an eight byte boundary */
583                 if (len < left) {
584                         len &= ~7;
585                 }
586                 /*
587                  *      Allocate buffer.
588                  */
589
590                 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
591                         NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
592                         err = -ENOMEM;
593                         goto fail;
594                 }
595
596                 /*
597                  *      Set up data on packet
598                  */
599
600                 ip_copy_metadata(skb2, skb);
601                 skb_reserve(skb2, ll_rs);
602                 skb_put(skb2, len + hlen);
603                 skb2->nh.raw = skb2->data;
604                 skb2->h.raw = skb2->data + hlen;
605
606                 /*
607                  *      Charge the memory for the fragment to any owner
608                  *      it might possess
609                  */
610
611                 if (skb->sk)
612                         skb_set_owner_w(skb2, skb->sk);
613
614                 /*
615                  *      Copy the packet header into the new buffer.
616                  */
617
618                 memcpy(skb2->nh.raw, skb->data, hlen);
619
620                 /*
621                  *      Copy a block of the IP datagram.
622                  */
623                 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
624                         BUG();
625                 left -= len;
626
627                 /*
628                  *      Fill in the new header fields.
629                  */
630                 iph = skb2->nh.iph;
631                 iph->frag_off = htons((offset >> 3));
632
633                 /* ANK: dirty, but effective trick. Upgrade options only if
634                  * the segment to be fragmented was THE FIRST (otherwise,
635                  * options are already fixed) and make it ONCE
636                  * on the initial skb, so that all the following fragments
637                  * will inherit fixed options.
638                  */
639                 if (offset == 0)
640                         ip_options_fragment(skb);
641
642                 /*
643                  *      Added AC : If we are fragmenting a fragment that's not the
644                  *                 last fragment then keep MF on each bit
645                  */
646                 if (left > 0 || not_last_frag)
647                         iph->frag_off |= htons(IP_MF);
648                 ptr += len;
649                 offset += len;
650
651                 /*
652                  *      Put this fragment into the sending queue.
653                  */
654
655                 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES);
656
657                 iph->tot_len = htons(len + hlen);
658
659                 ip_send_check(iph);
660
661                 err = output(skb2);
662                 if (err)
663                         goto fail;
664         }
665         kfree_skb(skb);
666         IP_INC_STATS(IPSTATS_MIB_FRAGOKS);
667         return err;
668
669 fail:
670         kfree_skb(skb); 
671         IP_INC_STATS(IPSTATS_MIB_FRAGFAILS);
672         return err;
673 }
674
675 EXPORT_SYMBOL(ip_fragment);
676
677 int
678 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
679 {
680         struct iovec *iov = from;
681
682         if (skb->ip_summed == CHECKSUM_HW) {
683                 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
684                         return -EFAULT;
685         } else {
686                 unsigned int csum = 0;
687                 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
688                         return -EFAULT;
689                 skb->csum = csum_block_add(skb->csum, csum, odd);
690         }
691         return 0;
692 }
693
694 static inline unsigned int
695 csum_page(struct page *page, int offset, int copy)
696 {
697         char *kaddr;
698         unsigned int csum;
699         kaddr = kmap(page);
700         csum = csum_partial(kaddr + offset, copy, 0);
701         kunmap(page);
702         return csum;
703 }
704
705 static inline int ip_ufo_append_data(struct sock *sk,
706                         int getfrag(void *from, char *to, int offset, int len,
707                                int odd, struct sk_buff *skb),
708                         void *from, int length, int hh_len, int fragheaderlen,
709                         int transhdrlen, int mtu,unsigned int flags)
710 {
711         struct sk_buff *skb;
712         int err;
713
714         /* There is support for UDP fragmentation offload by network
715          * device, so create one single skb packet containing complete
716          * udp datagram
717          */
718         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
719                 skb = sock_alloc_send_skb(sk,
720                         hh_len + fragheaderlen + transhdrlen + 20,
721                         (flags & MSG_DONTWAIT), &err);
722
723                 if (skb == NULL)
724                         return err;
725
726                 /* reserve space for Hardware header */
727                 skb_reserve(skb, hh_len);
728
729                 /* create space for UDP/IP header */
730                 skb_put(skb,fragheaderlen + transhdrlen);
731
732                 /* initialize network header pointer */
733                 skb->nh.raw = skb->data;
734
735                 /* initialize protocol header pointer */
736                 skb->h.raw = skb->data + fragheaderlen;
737
738                 skb->ip_summed = CHECKSUM_HW;
739                 skb->csum = 0;
740                 sk->sk_sndmsg_off = 0;
741         }
742
743         err = skb_append_datato_frags(sk,skb, getfrag, from,
744                                (length - transhdrlen));
745         if (!err) {
746                 /* specify the length of each IP datagram fragment*/
747                 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
748                 __skb_queue_tail(&sk->sk_write_queue, skb);
749
750                 return 0;
751         }
752         /* There is not enough support do UFO ,
753          * so follow normal path
754          */
755         kfree_skb(skb);
756         return err;
757 }
758
759 /*
760  *      ip_append_data() and ip_append_page() can make one large IP datagram
761  *      from many pieces of data. Each pieces will be holded on the socket
762  *      until ip_push_pending_frames() is called. Each piece can be a page
763  *      or non-page data.
764  *      
765  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
766  *      this interface potentially.
767  *
768  *      LATER: length must be adjusted by pad at tail, when it is required.
769  */
770 int ip_append_data(struct sock *sk,
771                    int getfrag(void *from, char *to, int offset, int len,
772                                int odd, struct sk_buff *skb),
773                    void *from, int length, int transhdrlen,
774                    struct ipcm_cookie *ipc, struct rtable *rt,
775                    unsigned int flags)
776 {
777         struct inet_sock *inet = inet_sk(sk);
778         struct sk_buff *skb;
779
780         struct ip_options *opt = NULL;
781         int hh_len;
782         int exthdrlen;
783         int mtu;
784         int copy;
785         int err;
786         int offset = 0;
787         unsigned int maxfraglen, fragheaderlen;
788         int csummode = CHECKSUM_NONE;
789
790         if (flags&MSG_PROBE)
791                 return 0;
792
793         if (skb_queue_empty(&sk->sk_write_queue)) {
794                 /*
795                  * setup for corking.
796                  */
797                 opt = ipc->opt;
798                 if (opt) {
799                         if (inet->cork.opt == NULL) {
800                                 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
801                                 if (unlikely(inet->cork.opt == NULL))
802                                         return -ENOBUFS;
803                         }
804                         memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
805                         inet->cork.flags |= IPCORK_OPT;
806                         inet->cork.addr = ipc->addr;
807                 }
808                 dst_hold(&rt->u.dst);
809                 inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
810                 inet->cork.rt = rt;
811                 inet->cork.length = 0;
812                 sk->sk_sndmsg_page = NULL;
813                 sk->sk_sndmsg_off = 0;
814                 if ((exthdrlen = rt->u.dst.header_len) != 0) {
815                         length += exthdrlen;
816                         transhdrlen += exthdrlen;
817                 }
818         } else {
819                 rt = inet->cork.rt;
820                 if (inet->cork.flags & IPCORK_OPT)
821                         opt = inet->cork.opt;
822
823                 transhdrlen = 0;
824                 exthdrlen = 0;
825                 mtu = inet->cork.fragsize;
826         }
827         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
828
829         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
830         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
831
832         if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
833                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
834                 return -EMSGSIZE;
835         }
836
837         /*
838          * transhdrlen > 0 means that this is the first fragment and we wish
839          * it won't be fragmented in the future.
840          */
841         if (transhdrlen &&
842             length + fragheaderlen <= mtu &&
843             rt->u.dst.dev->features & NETIF_F_ALL_CSUM &&
844             !exthdrlen)
845                 csummode = CHECKSUM_HW;
846
847         inet->cork.length += length;
848         if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
849                         (rt->u.dst.dev->features & NETIF_F_UFO)) {
850
851                 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
852                                          fragheaderlen, transhdrlen, mtu,
853                                          flags);
854                 if (err)
855                         goto error;
856                 return 0;
857         }
858
859         /* So, what's going on in the loop below?
860          *
861          * We use calculated fragment length to generate chained skb,
862          * each of segments is IP fragment ready for sending to network after
863          * adding appropriate IP header.
864          */
865
866         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
867                 goto alloc_new_skb;
868
869         while (length > 0) {
870                 /* Check if the remaining data fits into current packet. */
871                 copy = mtu - skb->len;
872                 if (copy < length)
873                         copy = maxfraglen - skb->len;
874                 if (copy <= 0) {
875                         char *data;
876                         unsigned int datalen;
877                         unsigned int fraglen;
878                         unsigned int fraggap;
879                         unsigned int alloclen;
880                         struct sk_buff *skb_prev;
881 alloc_new_skb:
882                         skb_prev = skb;
883                         if (skb_prev)
884                                 fraggap = skb_prev->len - maxfraglen;
885                         else
886                                 fraggap = 0;
887
888                         /*
889                          * If remaining data exceeds the mtu,
890                          * we know we need more fragment(s).
891                          */
892                         datalen = length + fraggap;
893                         if (datalen > mtu - fragheaderlen)
894                                 datalen = maxfraglen - fragheaderlen;
895                         fraglen = datalen + fragheaderlen;
896
897                         if ((flags & MSG_MORE) && 
898                             !(rt->u.dst.dev->features&NETIF_F_SG))
899                                 alloclen = mtu;
900                         else
901                                 alloclen = datalen + fragheaderlen;
902
903                         /* The last fragment gets additional space at tail.
904                          * Note, with MSG_MORE we overallocate on fragments,
905                          * because we have no idea what fragment will be
906                          * the last.
907                          */
908                         if (datalen == length + fraggap)
909                                 alloclen += rt->u.dst.trailer_len;
910
911                         if (transhdrlen) {
912                                 skb = sock_alloc_send_skb(sk, 
913                                                 alloclen + hh_len + 15,
914                                                 (flags & MSG_DONTWAIT), &err);
915                         } else {
916                                 skb = NULL;
917                                 if (atomic_read(&sk->sk_wmem_alloc) <=
918                                     2 * sk->sk_sndbuf)
919                                         skb = sock_wmalloc(sk, 
920                                                            alloclen + hh_len + 15, 1,
921                                                            sk->sk_allocation);
922                                 if (unlikely(skb == NULL))
923                                         err = -ENOBUFS;
924                         }
925                         if (skb == NULL)
926                                 goto error;
927
928                         /*
929                          *      Fill in the control structures
930                          */
931                         skb->ip_summed = csummode;
932                         skb->csum = 0;
933                         skb_reserve(skb, hh_len);
934
935                         /*
936                          *      Find where to start putting bytes.
937                          */
938                         data = skb_put(skb, fraglen);
939                         skb->nh.raw = data + exthdrlen;
940                         data += fragheaderlen;
941                         skb->h.raw = data + exthdrlen;
942
943                         if (fraggap) {
944                                 skb->csum = skb_copy_and_csum_bits(
945                                         skb_prev, maxfraglen,
946                                         data + transhdrlen, fraggap, 0);
947                                 skb_prev->csum = csum_sub(skb_prev->csum,
948                                                           skb->csum);
949                                 data += fraggap;
950                                 skb_trim(skb_prev, maxfraglen);
951                         }
952
953                         copy = datalen - transhdrlen - fraggap;
954                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
955                                 err = -EFAULT;
956                                 kfree_skb(skb);
957                                 goto error;
958                         }
959
960                         offset += copy;
961                         length -= datalen - fraggap;
962                         transhdrlen = 0;
963                         exthdrlen = 0;
964                         csummode = CHECKSUM_NONE;
965
966                         /*
967                          * Put the packet on the pending queue.
968                          */
969                         __skb_queue_tail(&sk->sk_write_queue, skb);
970                         continue;
971                 }
972
973                 if (copy > length)
974                         copy = length;
975
976                 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
977                         unsigned int off;
978
979                         off = skb->len;
980                         if (getfrag(from, skb_put(skb, copy), 
981                                         offset, copy, off, skb) < 0) {
982                                 __skb_trim(skb, off);
983                                 err = -EFAULT;
984                                 goto error;
985                         }
986                 } else {
987                         int i = skb_shinfo(skb)->nr_frags;
988                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
989                         struct page *page = sk->sk_sndmsg_page;
990                         int off = sk->sk_sndmsg_off;
991                         unsigned int left;
992
993                         if (page && (left = PAGE_SIZE - off) > 0) {
994                                 if (copy >= left)
995                                         copy = left;
996                                 if (page != frag->page) {
997                                         if (i == MAX_SKB_FRAGS) {
998                                                 err = -EMSGSIZE;
999                                                 goto error;
1000                                         }
1001                                         get_page(page);
1002                                         skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1003                                         frag = &skb_shinfo(skb)->frags[i];
1004                                 }
1005                         } else if (i < MAX_SKB_FRAGS) {
1006                                 if (copy > PAGE_SIZE)
1007                                         copy = PAGE_SIZE;
1008                                 page = alloc_pages(sk->sk_allocation, 0);
1009                                 if (page == NULL)  {
1010                                         err = -ENOMEM;
1011                                         goto error;
1012                                 }
1013                                 sk->sk_sndmsg_page = page;
1014                                 sk->sk_sndmsg_off = 0;
1015
1016                                 skb_fill_page_desc(skb, i, page, 0, 0);
1017                                 frag = &skb_shinfo(skb)->frags[i];
1018                                 skb->truesize += PAGE_SIZE;
1019                                 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
1020                         } else {
1021                                 err = -EMSGSIZE;
1022                                 goto error;
1023                         }
1024                         if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1025                                 err = -EFAULT;
1026                                 goto error;
1027                         }
1028                         sk->sk_sndmsg_off += copy;
1029                         frag->size += copy;
1030                         skb->len += copy;
1031                         skb->data_len += copy;
1032                 }
1033                 offset += copy;
1034                 length -= copy;
1035         }
1036
1037         return 0;
1038
1039 error:
1040         inet->cork.length -= length;
1041         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1042         return err; 
1043 }
1044
1045 ssize_t ip_append_page(struct sock *sk, struct page *page,
1046                        int offset, size_t size, int flags)
1047 {
1048         struct inet_sock *inet = inet_sk(sk);
1049         struct sk_buff *skb;
1050         struct rtable *rt;
1051         struct ip_options *opt = NULL;
1052         int hh_len;
1053         int mtu;
1054         int len;
1055         int err;
1056         unsigned int maxfraglen, fragheaderlen, fraggap;
1057
1058         if (inet->hdrincl)
1059                 return -EPERM;
1060
1061         if (flags&MSG_PROBE)
1062                 return 0;
1063
1064         if (skb_queue_empty(&sk->sk_write_queue))
1065                 return -EINVAL;
1066
1067         rt = inet->cork.rt;
1068         if (inet->cork.flags & IPCORK_OPT)
1069                 opt = inet->cork.opt;
1070
1071         if (!(rt->u.dst.dev->features&NETIF_F_SG))
1072                 return -EOPNOTSUPP;
1073
1074         hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1075         mtu = inet->cork.fragsize;
1076
1077         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1078         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1079
1080         if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1081                 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1082                 return -EMSGSIZE;
1083         }
1084
1085         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1086                 return -EINVAL;
1087
1088         inet->cork.length += size;
1089         if ((sk->sk_protocol == IPPROTO_UDP) &&
1090             (rt->u.dst.dev->features & NETIF_F_UFO))
1091                 skb_shinfo(skb)->ufo_size = (mtu - fragheaderlen);
1092
1093
1094         while (size > 0) {
1095                 int i;
1096
1097                 if (skb_shinfo(skb)->ufo_size)
1098                         len = size;
1099                 else {
1100
1101                         /* Check if the remaining data fits into current packet. */
1102                         len = mtu - skb->len;
1103                         if (len < size)
1104                                 len = maxfraglen - skb->len;
1105                 }
1106                 if (len <= 0) {
1107                         struct sk_buff *skb_prev;
1108                         char *data;
1109                         struct iphdr *iph;
1110                         int alloclen;
1111
1112                         skb_prev = skb;
1113                         fraggap = skb_prev->len - maxfraglen;
1114
1115                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1116                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1117                         if (unlikely(!skb)) {
1118                                 err = -ENOBUFS;
1119                                 goto error;
1120                         }
1121
1122                         /*
1123                          *      Fill in the control structures
1124                          */
1125                         skb->ip_summed = CHECKSUM_NONE;
1126                         skb->csum = 0;
1127                         skb_reserve(skb, hh_len);
1128
1129                         /*
1130                          *      Find where to start putting bytes.
1131                          */
1132                         data = skb_put(skb, fragheaderlen + fraggap);
1133                         skb->nh.iph = iph = (struct iphdr *)data;
1134                         data += fragheaderlen;
1135                         skb->h.raw = data;
1136
1137                         if (fraggap) {
1138                                 skb->csum = skb_copy_and_csum_bits(
1139                                         skb_prev, maxfraglen,
1140                                         data, fraggap, 0);
1141                                 skb_prev->csum = csum_sub(skb_prev->csum,
1142                                                           skb->csum);
1143                                 skb_trim(skb_prev, maxfraglen);
1144                         }
1145
1146                         /*
1147                          * Put the packet on the pending queue.
1148                          */
1149                         __skb_queue_tail(&sk->sk_write_queue, skb);
1150                         continue;
1151                 }
1152
1153                 i = skb_shinfo(skb)->nr_frags;
1154                 if (len > size)
1155                         len = size;
1156                 if (skb_can_coalesce(skb, i, page, offset)) {
1157                         skb_shinfo(skb)->frags[i-1].size += len;
1158                 } else if (i < MAX_SKB_FRAGS) {
1159                         get_page(page);
1160                         skb_fill_page_desc(skb, i, page, offset, len);
1161                 } else {
1162                         err = -EMSGSIZE;
1163                         goto error;
1164                 }
1165
1166                 if (skb->ip_summed == CHECKSUM_NONE) {
1167                         unsigned int csum;
1168                         csum = csum_page(page, offset, len);
1169                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1170                 }
1171
1172                 skb->len += len;
1173                 skb->data_len += len;
1174                 offset += len;
1175                 size -= len;
1176         }
1177         return 0;
1178
1179 error:
1180         inet->cork.length -= size;
1181         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1182         return err;
1183 }
1184
1185 /*
1186  *      Combined all pending IP fragments on the socket as one IP datagram
1187  *      and push them out.
1188  */
1189 int ip_push_pending_frames(struct sock *sk)
1190 {
1191         struct sk_buff *skb, *tmp_skb;
1192         struct sk_buff **tail_skb;
1193         struct inet_sock *inet = inet_sk(sk);
1194         struct ip_options *opt = NULL;
1195         struct rtable *rt = inet->cork.rt;
1196         struct iphdr *iph;
1197         __be16 df = 0;
1198         __u8 ttl;
1199         int err = 0;
1200
1201         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1202                 goto out;
1203         tail_skb = &(skb_shinfo(skb)->frag_list);
1204
1205         /* move skb->data to ip header from ext header */
1206         if (skb->data < skb->nh.raw)
1207                 __skb_pull(skb, skb->nh.raw - skb->data);
1208         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1209                 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1210                 *tail_skb = tmp_skb;
1211                 tail_skb = &(tmp_skb->next);
1212                 skb->len += tmp_skb->len;
1213                 skb->data_len += tmp_skb->len;
1214                 skb->truesize += tmp_skb->truesize;
1215                 __sock_put(tmp_skb->sk);
1216                 tmp_skb->destructor = NULL;
1217                 tmp_skb->sk = NULL;
1218         }
1219
1220         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1221          * to fragment the frame generated here. No matter, what transforms
1222          * how transforms change size of the packet, it will come out.
1223          */
1224         if (inet->pmtudisc != IP_PMTUDISC_DO)
1225                 skb->local_df = 1;
1226
1227         /* DF bit is set when we want to see DF on outgoing frames.
1228          * If local_df is set too, we still allow to fragment this frame
1229          * locally. */
1230         if (inet->pmtudisc == IP_PMTUDISC_DO ||
1231             (skb->len <= dst_mtu(&rt->u.dst) &&
1232              ip_dont_fragment(sk, &rt->u.dst)))
1233                 df = htons(IP_DF);
1234
1235         if (inet->cork.flags & IPCORK_OPT)
1236                 opt = inet->cork.opt;
1237
1238         if (rt->rt_type == RTN_MULTICAST)
1239                 ttl = inet->mc_ttl;
1240         else
1241                 ttl = ip_select_ttl(inet, &rt->u.dst);
1242
1243         iph = (struct iphdr *)skb->data;
1244         iph->version = 4;
1245         iph->ihl = 5;
1246         if (opt) {
1247                 iph->ihl += opt->optlen>>2;
1248                 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1249         }
1250         iph->tos = inet->tos;
1251         iph->tot_len = htons(skb->len);
1252         iph->frag_off = df;
1253         ip_select_ident(iph, &rt->u.dst, sk);
1254         iph->ttl = ttl;
1255         iph->protocol = sk->sk_protocol;
1256         iph->saddr = rt->rt_src;
1257         iph->daddr = rt->rt_dst;
1258         ip_send_check(iph);
1259
1260         skb->priority = sk->sk_priority;
1261         skb->dst = dst_clone(&rt->u.dst);
1262
1263         /* Netfilter gets whole the not fragmented skb. */
1264         err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, 
1265                       skb->dst->dev, dst_output);
1266         if (err) {
1267                 if (err > 0)
1268                         err = inet->recverr ? net_xmit_errno(err) : 0;
1269                 if (err)
1270                         goto error;
1271         }
1272
1273 out:
1274         inet->cork.flags &= ~IPCORK_OPT;
1275         kfree(inet->cork.opt);
1276         inet->cork.opt = NULL;
1277         if (inet->cork.rt) {
1278                 ip_rt_put(inet->cork.rt);
1279                 inet->cork.rt = NULL;
1280         }
1281         return err;
1282
1283 error:
1284         IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
1285         goto out;
1286 }
1287
1288 /*
1289  *      Throw away all pending data on the socket.
1290  */
1291 void ip_flush_pending_frames(struct sock *sk)
1292 {
1293         struct inet_sock *inet = inet_sk(sk);
1294         struct sk_buff *skb;
1295
1296         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1297                 kfree_skb(skb);
1298
1299         inet->cork.flags &= ~IPCORK_OPT;
1300         kfree(inet->cork.opt);
1301         inet->cork.opt = NULL;
1302         if (inet->cork.rt) {
1303                 ip_rt_put(inet->cork.rt);
1304                 inet->cork.rt = NULL;
1305         }
1306 }
1307
1308
1309 /*
1310  *      Fetch data from kernel space and fill in checksum if needed.
1311  */
1312 static int ip_reply_glue_bits(void *dptr, char *to, int offset, 
1313                               int len, int odd, struct sk_buff *skb)
1314 {
1315         unsigned int csum;
1316
1317         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1318         skb->csum = csum_block_add(skb->csum, csum, odd);
1319         return 0;  
1320 }
1321
1322 /* 
1323  *      Generic function to send a packet as reply to another packet.
1324  *      Used to send TCP resets so far. ICMP should use this function too.
1325  *
1326  *      Should run single threaded per socket because it uses the sock 
1327  *      structure to pass arguments.
1328  *
1329  *      LATER: switch from ip_build_xmit to ip_append_*
1330  */
1331 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1332                    unsigned int len)
1333 {
1334         struct inet_sock *inet = inet_sk(sk);
1335         struct {
1336                 struct ip_options       opt;
1337                 char                    data[40];
1338         } replyopts;
1339         struct ipcm_cookie ipc;
1340         u32 daddr;
1341         struct rtable *rt = (struct rtable*)skb->dst;
1342
1343         if (ip_options_echo(&replyopts.opt, skb))
1344                 return;
1345
1346         daddr = ipc.addr = rt->rt_src;
1347         ipc.opt = NULL;
1348
1349         if (replyopts.opt.optlen) {
1350                 ipc.opt = &replyopts.opt;
1351
1352                 if (ipc.opt->srr)
1353                         daddr = replyopts.opt.faddr;
1354         }
1355
1356         {
1357                 struct flowi fl = { .nl_u = { .ip4_u =
1358                                               { .daddr = daddr,
1359                                                 .saddr = rt->rt_spec_dst,
1360                                                 .tos = RT_TOS(skb->nh.iph->tos) } },
1361                                     /* Not quite clean, but right. */
1362                                     .uli_u = { .ports =
1363                                                { .sport = skb->h.th->dest,
1364                                                  .dport = skb->h.th->source } },
1365                                     .proto = sk->sk_protocol };
1366                 if (ip_route_output_key(&rt, &fl))
1367                         return;
1368         }
1369
1370         /* And let IP do all the hard work.
1371
1372            This chunk is not reenterable, hence spinlock.
1373            Note that it uses the fact, that this function is called
1374            with locally disabled BH and that sk cannot be already spinlocked.
1375          */
1376         bh_lock_sock(sk);
1377         inet->tos = skb->nh.iph->tos;
1378         sk->sk_priority = skb->priority;
1379         sk->sk_protocol = skb->nh.iph->protocol;
1380         ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1381                        &ipc, rt, MSG_DONTWAIT);
1382         if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1383                 if (arg->csumoffset >= 0)
1384                         *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1385                 skb->ip_summed = CHECKSUM_NONE;
1386                 ip_push_pending_frames(sk);
1387         }
1388
1389         bh_unlock_sock(sk);
1390
1391         ip_rt_put(rt);
1392 }
1393
1394 void __init ip_init(void)
1395 {
1396         ip_rt_init();
1397         inet_initpeers();
1398
1399 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1400         igmp_mc_proc_init();
1401 #endif
1402 }
1403
1404 EXPORT_SYMBOL(ip_generic_getfrag);
1405 EXPORT_SYMBOL(ip_queue_xmit);
1406 EXPORT_SYMBOL(ip_send_check);