git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
[GRE]: Add net/gre_net argument to some functions.
[safe/jmp/linux-2.6]
/
net
/
ipv4
/
ip_input.c
diff --git
a/net/ipv4/ip_input.c
b/net/ipv4/ip_input.c
index
473d0f2
..
7b4bad6
100644
(file)
--- a/
net/ipv4/ip_input.c
+++ b/
net/ipv4/ip_input.c
@@
-15,7
+15,7
@@
* Stefan Becker, <stefanb@yello.ping.de>
* Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Stefan Becker, <stefanb@yello.ping.de>
* Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *
+ *
*
* Fixes:
* Alan Cox : Commented a couple of minor bits of surplus code
*
* Fixes:
* Alan Cox : Commented a couple of minor bits of surplus code
@@
-98,13
+98,13
@@
* Jos Vos : Do accounting *before* call_in_firewall
* Willy Konynenberg : Transparent proxying support
*
* Jos Vos : Do accounting *before* call_in_firewall
* Willy Konynenberg : Transparent proxying support
*
- *
+ *
*
* To Fix:
* IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
* and could be made very efficient with the addition of some virtual memory hacks to permit
* the allocation of a buffer that can then be 'grown' by twiddling page tables.
*
* To Fix:
* IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
* and could be made very efficient with the addition of some virtual memory hacks to permit
* the allocation of a buffer that can then be 'grown' by twiddling page tables.
- * Output fragmentation wants updating along with the buffer management to use a single
+ * Output fragmentation wants updating along with the buffer management to use a single
* interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
* output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
* fragmentation anyway.
* interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
* output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
* fragmentation anyway.
@@
-121,13
+121,13
@@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/config.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
+#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@
-154,12
+154,13
@@
DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
/*
* Process Router Attention IP option
/*
* Process Router Attention IP option
- */
+ */
int ip_call_ra_chain(struct sk_buff *skb)
{
struct ip_ra_chain *ra;
int ip_call_ra_chain(struct sk_buff *skb)
{
struct ip_ra_chain *ra;
- u8 protocol =
skb->nh.iph
->protocol;
+ u8 protocol =
ip_hdr(skb)
->protocol;
struct sock *last = NULL;
struct sock *last = NULL;
+ struct net_device *dev = skb->dev;
read_lock(&ip_ra_lock);
for (ra = ip_ra_chain; ra; ra = ra->next) {
read_lock(&ip_ra_lock);
for (ra = ip_ra_chain; ra; ra = ra->next) {
@@
-170,10
+171,10
@@
int ip_call_ra_chain(struct sk_buff *skb)
*/
if (sk && inet_sk(sk)->num == protocol &&
(!sk->sk_bound_dev_if ||
*/
if (sk && inet_sk(sk)->num == protocol &&
(!sk->sk_bound_dev_if ||
- sk->sk_bound_dev_if ==
skb->dev->ifindex)) {
-
if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET
)) {
- skb = ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN);
- if (
skb == NULL
) {
+ sk->sk_bound_dev_if ==
dev->ifindex) &&
+
sock_net(sk) == dev_net(dev
)) {
+ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+ if (
ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)
) {
read_unlock(&ip_ra_lock);
return 1;
}
read_unlock(&ip_ra_lock);
return 1;
}
@@
-184,7
+185,6
@@
int ip_call_ra_chain(struct sk_buff *skb)
raw_rcv(last, skb2);
}
last = sk;
raw_rcv(last, skb2);
}
last = sk;
- nf_reset(skb);
}
}
}
}
@@
-197,44
+197,35
@@
int ip_call_ra_chain(struct sk_buff *skb)
return 0;
}
return 0;
}
-static in
line in
t ip_local_deliver_finish(struct sk_buff *skb)
+static int ip_local_deliver_finish(struct sk_buff *skb)
{
{
- int ihl = skb->nh.iph->ihl*4;
-
- __skb_pull(skb, ihl);
+ struct net *net = dev_net(skb->dev);
- /* Free reference early: we don't need it any more, and it may
- hold ip_conntrack module loaded indefinitely. */
- nf_reset(skb);
+ __skb_pull(skb, ip_hdrlen(skb));
-
/* Point into the IP datagram, just past the header. */
-
skb->h.raw = skb->data
;
+ /* Point into the IP datagram, just past the header. */
+
skb_reset_transport_header(skb)
;
rcu_read_lock();
{
rcu_read_lock();
{
- /* Note: See raw.c and net/raw.h, RAWV4_HTABLE_SIZE==MAX_INET_PROTOS */
- int protocol = skb->nh.iph->protocol;
- int hash;
- struct sock *raw_sk;
+ int protocol = ip_hdr(skb)->protocol;
+ int hash, raw;
struct net_protocol *ipprot;
resubmit:
struct net_protocol *ipprot;
resubmit:
- hash = protocol & (MAX_INET_PROTOS - 1);
- raw_sk = sk_head(&raw_v4_htable[hash]);
-
- /* If there maybe a raw socket we must check - if not we
- * don't care less
- */
- if (raw_sk && !raw_v4_input(skb, skb->nh.iph, hash))
- raw_sk = NULL;
+ raw = raw_local_deliver(skb, protocol);
- if ((ipprot = rcu_dereference(inet_protos[hash])) != NULL) {
+ hash = protocol & (MAX_INET_PROTOS - 1);
+ ipprot = rcu_dereference(inet_protos[hash]);
+ if (ipprot != NULL && (net == &init_net || ipprot->netns_ok)) {
int ret;
int ret;
- if (!ipprot->no_policy &&
- !xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- kfree_skb(skb);
- goto out;
+ if (!ipprot->no_policy) {
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ nf_reset(skb);
}
ret = ipprot->handler(skb);
if (ret < 0) {
}
ret = ipprot->handler(skb);
if (ret < 0) {
@@
-243,7
+234,7
@@
static inline int ip_local_deliver_finish(struct sk_buff *skb)
}
IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
} else {
}
IP_INC_STATS_BH(IPSTATS_MIB_INDELIVERS);
} else {
- if (!raw
_sk
) {
+ if (!raw) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
icmp_send(skb, ICMP_DEST_UNREACH,
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(IPSTATS_MIB_INUNKNOWNPROTOS);
icmp_send(skb, ICMP_DEST_UNREACH,
@@
-262,20
+253,19
@@
static inline int ip_local_deliver_finish(struct sk_buff *skb)
/*
* Deliver IP Packets to the higher protocol layers.
/*
* Deliver IP Packets to the higher protocol layers.
- */
+ */
int ip_local_deliver(struct sk_buff *skb)
{
/*
* Reassemble IP fragments.
*/
int ip_local_deliver(struct sk_buff *skb)
{
/*
* Reassemble IP fragments.
*/
- if (skb->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
- skb = ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER);
- if (!skb)
+ if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
+ if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
return 0;
}
return 0;
}
- return NF_HOOK(PF_INET, NF_I
P
_LOCAL_IN, skb, skb->dev, NULL,
+ return NF_HOOK(PF_INET, NF_I
NET
_LOCAL_IN, skb, skb->dev, NULL,
ip_local_deliver_finish);
}
ip_local_deliver_finish);
}
@@
-297,14
+287,15
@@
static inline int ip_rcv_options(struct sk_buff *skb)
goto drop;
}
goto drop;
}
- iph = skb->nh.iph;
+ iph = ip_hdr(skb);
+ opt = &(IPCB(skb)->opt);
+ opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
- if (ip_options_compile(
NULL
, skb)) {
+ if (ip_options_compile(
dev_net(dev), opt
, skb)) {
IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
goto drop;
}
IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
goto drop;
}
- opt = &(IPCB(skb)->opt);
if (unlikely(opt->srr)) {
struct in_device *in_dev = in_dev_get(dev);
if (in_dev) {
if (unlikely(opt->srr)) {
struct in_device *in_dev = in_dev_get(dev);
if (in_dev) {
@@
-312,7
+303,7
@@
static inline int ip_rcv_options(struct sk_buff *skb)
if (IN_DEV_LOG_MARTIANS(in_dev) &&
net_ratelimit())
printk(KERN_INFO "source route option "
if (IN_DEV_LOG_MARTIANS(in_dev) &&
net_ratelimit())
printk(KERN_INFO "source route option "
-
"%u.%u.%u.%u -> %u.%u.%u.%u
\n",
+
NIPQUAD_FMT " -> " NIPQUAD_FMT "
\n",
NIPQUAD(iph->saddr),
NIPQUAD(iph->daddr));
in_dev_put(in_dev);
NIPQUAD(iph->saddr),
NIPQUAD(iph->daddr));
in_dev_put(in_dev);
@@
-331,27
+322,30
@@
drop:
return -1;
}
return -1;
}
-static in
line in
t ip_rcv_finish(struct sk_buff *skb)
+static int ip_rcv_finish(struct sk_buff *skb)
{
{
- struct iphdr *iph = skb->nh.iph;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt;
/*
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
/*
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
- */
- if (
likely(skb->dst == NULL)
) {
+ */
+ if (
skb->dst == NULL
) {
int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
skb->dev);
if (unlikely(err)) {
if (err == -EHOSTUNREACH)
IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos,
skb->dev);
if (unlikely(err)) {
if (err == -EHOSTUNREACH)
IP_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS);
- goto drop;
+ else if (err == -ENETUNREACH)
+ IP_INC_STATS_BH(IPSTATS_MIB_INNOROUTES);
+ goto drop;
}
}
#ifdef CONFIG_NET_CLS_ROUTE
if (unlikely(skb->dst->tclassid)) {
}
}
#ifdef CONFIG_NET_CLS_ROUTE
if (unlikely(skb->dst->tclassid)) {
- struct ip_rt_acct *st =
ip_rt_acct + 256*smp_processor_id(
);
+ struct ip_rt_acct *st =
per_cpu_ptr(ip_rt_acct, smp_processor_id()
);
u32 idx = skb->dst->tclassid;
st[idx&0xFF].o_packets++;
st[idx&0xFF].o_bytes+=skb->len;
u32 idx = skb->dst->tclassid;
st[idx&0xFF].o_packets++;
st[idx&0xFF].o_bytes+=skb->len;
@@
-363,16
+357,22
@@
static inline int ip_rcv_finish(struct sk_buff *skb)
if (iph->ihl > 5 && ip_rcv_options(skb))
goto drop;
if (iph->ihl > 5 && ip_rcv_options(skb))
goto drop;
+ rt = skb->rtable;
+ if (rt->rt_type == RTN_MULTICAST)
+ IP_INC_STATS_BH(IPSTATS_MIB_INMCASTPKTS);
+ else if (rt->rt_type == RTN_BROADCAST)
+ IP_INC_STATS_BH(IPSTATS_MIB_INBCASTPKTS);
+
return dst_input(skb);
drop:
return dst_input(skb);
drop:
-
kfree_skb(skb);
-
return NET_RX_DROP;
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
/*
* Main IP Receive routine.
}
/*
* Main IP Receive routine.
- */
+ */
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
struct iphdr *iph;
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
struct iphdr *iph;
@@
-394,7
+394,7
@@
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto inhdr_error;
- iph =
skb->nh.iph
;
+ iph =
ip_hdr(skb)
;
/*
* RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
/*
* RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
@@
-413,13
+413,16
@@
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
- iph =
skb->nh.iph
;
+ iph =
ip_hdr(skb)
;
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
goto inhdr_error;
len = ntohs(iph->tot_len);
- if (skb->len < len || len < (iph->ihl*4))
+ if (skb->len < len) {
+ IP_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS);
+ goto drop;
+ } else if (len < (iph->ihl*4))
goto inhdr_error;
/* Our transport medium may have padded the buffer out. Now we know it
goto inhdr_error;
/* Our transport medium may have padded the buffer out. Now we know it
@@
-431,15
+434,18
@@
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
goto drop;
}
goto drop;
}
- return NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, dev, NULL,
+ /* Remove any debris in the socket control block */
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+
+ return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, dev, NULL,
ip_rcv_finish);
inhdr_error:
IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
drop:
ip_rcv_finish);
inhdr_error:
IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
drop:
-
kfree_skb(skb);
+ kfree_skb(skb);
out:
out:
-
return NET_RX_DROP;
+ return NET_RX_DROP;
}
EXPORT_SYMBOL(ip_statistics);
}
EXPORT_SYMBOL(ip_statistics);