X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fdecnet%2Fdn_route.c;h=c1b5502f195be2ffd05ed3ada14c1ded000f0a4b;hb=923f4902fefdf4e89b0fb32c4e069d4f57d704f5;hp=5abf7057af00db705d4ec019a2c670312d6cdd5c;hpb=364c6badde0dd62a0a38e5ed67f85d87d6665780;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 5abf705..c1b5502 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -43,7 +43,7 @@ /****************************************************************************** (c) 1995-1998 E.M. Serrat emserrat@geocities.com - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or @@ -55,7 +55,6 @@ GNU General Public License for more details. *******************************************************************************/ -#include #include #include #include @@ -81,6 +80,7 @@ #include #include #include +#include #include #include #include @@ -167,11 +167,11 @@ static void dn_dst_check_expire(unsigned long dummy) while((rt=*rtp) != NULL) { if (atomic_read(&rt->u.dst.__refcnt) || (now - rt->u.dst.lastuse) < expire) { - rtp = &rt->u.rt_next; + rtp = &rt->u.dst.dn_next; continue; } - *rtp = rt->u.rt_next; - rt->u.rt_next = NULL; + *rtp = rt->u.dst.dn_next; + rt->u.dst.dn_next = NULL; dnrt_free(rt); } spin_unlock(&dn_rt_hash_table[i].lock); @@ -198,11 +198,11 @@ static int dn_dst_gc(void) while((rt=*rtp) != NULL) { if (atomic_read(&rt->u.dst.__refcnt) || (now - rt->u.dst.lastuse) < expire) { - rtp = &rt->u.rt_next; + rtp = &rt->u.dst.dn_next; continue; } - *rtp = rt->u.rt_next; - rt->u.rt_next = NULL; + *rtp = rt->u.dst.dn_next; + rt->u.dst.dn_next = NULL; dnrt_drop(rt); break; } @@ -246,7 +246,7 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) } } -/* +/* * When a route has been marked obsolete. (e.g. routing cache flush) */ static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) @@ -267,9 +267,12 @@ static void dn_dst_link_failure(struct sk_buff *skb) static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) { - return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 && - fl1->oif == fl2->oif && - fl1->iif == fl2->iif; + return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) | + (fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) | + (fl1->mark ^ fl2->mark) | + (fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) | + (fl1->oif ^ fl2->oif) | + (fl1->iif ^ fl2->iif)) == 0; } static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) @@ -283,8 +286,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route * while((rth = *rthp) != NULL) { if (compare_keys(&rth->fl, &rt->fl)) { /* Put it first */ - *rthp = rth->u.rt_next; - rcu_assign_pointer(rth->u.rt_next, + *rthp = rth->u.dst.dn_next; + rcu_assign_pointer(rth->u.dst.dn_next, dn_rt_hash_table[hash].chain); rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); @@ -297,12 +300,12 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route * *rp = rth; return 0; } - rthp = &rth->u.rt_next; + rthp = &rth->u.dst.dn_next; } - rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); + rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain); rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); - + dst_hold(&rt->u.dst); rt->u.dst.__use++; rt->u.dst.lastuse = now; @@ -323,8 +326,8 @@ void dn_run_flush(unsigned long dummy) goto nothing_to_declare; for(; rt; rt=next) { - next = rt->u.rt_next; - rt->u.rt_next = NULL; + next = rt->u.dst.dn_next; + rt->u.dst.dn_next = NULL; dst_free((struct dst_entry *)rt); } @@ -503,23 +506,23 @@ static int dn_route_rx_long(struct sk_buff *skb) skb_pull(skb, 20); skb->h.raw = skb->data; - /* Destination info */ - ptr += 2; + /* Destination info */ + ptr += 2; cb->dst = dn_eth2dn(ptr); - if (memcmp(ptr, dn_hiord_addr, 4) != 0) - goto drop_it; - ptr += 6; + if (memcmp(ptr, dn_hiord_addr, 4) != 0) + goto drop_it; + ptr += 6; - /* Source info */ - ptr += 2; + /* Source info */ + ptr += 2; cb->src = dn_eth2dn(ptr); - if (memcmp(ptr, dn_hiord_addr, 4) != 0) - goto drop_it; - ptr += 6; - /* Other junk */ - ptr++; - cb->hops = *ptr++; /* Visit Count */ + if (memcmp(ptr, dn_hiord_addr, 4) != 0) + goto drop_it; + ptr += 6; + /* Other junk */ + ptr++; + cb->hops = *ptr++; /* Visit Count */ return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); @@ -542,16 +545,16 @@ static int dn_route_rx_short(struct sk_buff *skb) skb->h.raw = skb->data; cb->dst = *(__le16 *)ptr; - ptr += 2; - cb->src = *(__le16 *)ptr; - ptr += 2; - cb->hops = *ptr & 0x3f; + ptr += 2; + cb->src = *(__le16 *)ptr; + ptr += 2; + cb->hops = *ptr & 0x3f; return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); drop_it: - kfree_skb(skb); - return NET_RX_DROP; + kfree_skb(skb); + return NET_RX_DROP; } static int dn_route_discard(struct sk_buff *skb) @@ -623,20 +626,20 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type cb->rt_flags = flags; if (decnet_debug_level & 1) - printk(KERN_DEBUG + printk(KERN_DEBUG "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", - (int)flags, (dev) ? dev->name : "???", len, skb->len, + (int)flags, (dev) ? dev->name : "???", len, skb->len, padlen); - if (flags & DN_RT_PKT_CNTL) { + if (flags & DN_RT_PKT_CNTL) { if (unlikely(skb_linearize(skb))) goto dump_it; - switch(flags & DN_RT_CNTL_MSK) { - case DN_RT_PKT_INIT: + switch(flags & DN_RT_CNTL_MSK) { + case DN_RT_PKT_INIT: dn_dev_init_pkt(skb); break; - case DN_RT_PKT_VERI: + case DN_RT_PKT_VERI: dn_dev_veri_pkt(skb); break; } @@ -645,31 +648,31 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type goto dump_it; switch(flags & DN_RT_CNTL_MSK) { - case DN_RT_PKT_HELO: + case DN_RT_PKT_HELO: return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); - case DN_RT_PKT_L1RT: - case DN_RT_PKT_L2RT: - return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); - case DN_RT_PKT_ERTH: + case DN_RT_PKT_L1RT: + case DN_RT_PKT_L2RT: + return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); + case DN_RT_PKT_ERTH: return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); - case DN_RT_PKT_EEDH: + case DN_RT_PKT_EEDH: return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); - } - } else { + } + } else { if (dn->parms.state != DN_DEV_S_RU) goto dump_it; skb_pull(skb, 1); /* Pull flags */ - switch(flags & DN_RT_PKT_MSK) { - case DN_RT_PKT_LONG: - return dn_route_rx_long(skb); - case DN_RT_PKT_SHORT: - return dn_route_rx_short(skb); + switch(flags & DN_RT_PKT_MSK) { + case DN_RT_PKT_LONG: + return dn_route_rx_long(skb); + case DN_RT_PKT_SHORT: + return dn_route_rx_short(skb); } - } + } dump_it: kfree_skb(skb); @@ -812,8 +815,8 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) rt->u.dst.neighbour = n; } - if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || - rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) + if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || + rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || @@ -873,14 +876,12 @@ static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_re static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) { - struct flowi fl = { .nl_u = { .dn_u = + struct flowi fl = { .nl_u = { .dn_u = { .daddr = oldflp->fld_dst, .saddr = oldflp->fld_src, .scope = RT_SCOPE_UNIVERSE, -#ifdef CONFIG_DECNET_ROUTE_FWMARK - .fwmark = oldflp->fld_fwmark -#endif } }, + .mark = oldflp->mark, .iif = loopback_dev.ifindex, .oif = oldflp->oif }; struct dn_route *rt = NULL; @@ -898,7 +899,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old "dn_route_output_slow: dst=%04x src=%04x mark=%d" " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), dn_ntohs(oldflp->fld_src), - oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif); + oldflp->mark, loopback_dev.ifindex, oldflp->oif); /* If we have an output interface, verify its a DECnet device */ if (oldflp->oif) { @@ -926,8 +927,13 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *old for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { if (!dev_out->dn_ptr) continue; - if (dn_dev_islocal(dev_out, oldflp->fld_src)) - break; + if (!dn_dev_islocal(dev_out, oldflp->fld_src)) + continue; + if ((dev_out->flags & IFF_LOOPBACK) && + oldflp->fld_dst && + !dn_dev_islocal(dev_out, oldflp->fld_dst)) + continue; + break; } read_unlock(&dev_base_lock); if (dev_out == NULL) @@ -976,19 +982,19 @@ source_ok: if (err != -ESRCH) goto out; /* - * Here the fallback is basically the standard algorithm for + * Here the fallback is basically the standard algorithm for * routing in endnodes which is described in the DECnet routing * docs * * If we are not trying hard, look in neighbour cache. * The result is tested to ensure that if a specific output - * device/source address was requested, then we honour that + * device/source address was requested, then we honour that * here */ if (!try_hard) { neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst); if (neigh) { - if ((oldflp->oif && + if ((oldflp->oif && (neigh->dev->ifindex != oldflp->oif)) || (oldflp->fld_src && (!dn_dev_islocal(neigh->dev, @@ -1038,7 +1044,7 @@ select_source: if (fl.fld_src == 0) { fl.fld_src = dnet_select_source(dev_out, gateway, res.type == RTN_LOCAL ? - RT_SCOPE_HOST : + RT_SCOPE_HOST : RT_SCOPE_LINK); if (fl.fld_src == 0 && res.type != RTN_LOCAL) goto e_addr; @@ -1068,14 +1074,14 @@ select_source: if (res.fi->fib_nhs > 1 && fl.oif == 0) dn_fib_select_multipath(&fl, &res); - /* + /* * We could add some logic to deal with default routes here and * get rid of some of the special casing above. */ if (!fl.fld_src) fl.fld_src = DN_FIB_RES_PREFSRC(res); - + if (dev_out) dev_put(dev_out); dev_out = DN_FIB_RES_DEV(res); @@ -1098,9 +1104,7 @@ make_route: rt->fl.fld_dst = oldflp->fld_dst; rt->fl.oif = oldflp->oif; rt->fl.iif = 0; -#ifdef CONFIG_DECNET_ROUTE_FWMARK - rt->fl.fld_fwmark = oldflp->fld_fwmark; -#endif + rt->fl.mark = oldflp->mark; rt->rt_saddr = fl.fld_src; rt->rt_daddr = fl.fld_dst; @@ -1140,8 +1144,8 @@ out: return err; e_addr: - err = -EADDRNOTAVAIL; - goto done; + err = -EADDRNOTAVAIL; + goto done; e_inval: err = -EINVAL; goto done; @@ -1165,12 +1169,10 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl if (!(flags & MSG_TRYHARD)) { rcu_read_lock_bh(); for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; - rt = rcu_dereference(rt->u.rt_next)) { + rt = rcu_dereference(rt->u.dst.dn_next)) { if ((flp->fld_dst == rt->fl.fld_dst) && (flp->fld_src == rt->fl.fld_src) && -#ifdef CONFIG_DECNET_ROUTE_FWMARK - (flp->fld_fwmark == rt->fl.fld_fwmark) && -#endif + (flp->mark == rt->fl.mark) && (rt->fl.iif == 0) && (rt->fl.oif == flp->oif)) { rt->u.dst.lastuse = jiffies; @@ -1221,14 +1223,12 @@ static int dn_route_input_slow(struct sk_buff *skb) int flags = 0; __le16 gateway = 0; __le16 local_src = 0; - struct flowi fl = { .nl_u = { .dn_u = + struct flowi fl = { .nl_u = { .dn_u = { .daddr = cb->dst, .saddr = cb->src, .scope = RT_SCOPE_UNIVERSE, -#ifdef CONFIG_DECNET_ROUTE_FWMARK - .fwmark = skb->nfmark -#endif } }, + .mark = skb->mark, .iif = skb->dev->ifindex }; struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; int err = -EINVAL; @@ -1265,7 +1265,6 @@ static int dn_route_input_slow(struct sk_buff *skb) goto e_inval; res.type = RTN_LOCAL; - flags |= RTCF_DIRECTSRC; } else { __le16 src_map = fl.fld_src; free_res = 1; @@ -1280,7 +1279,7 @@ static int dn_route_input_slow(struct sk_buff *skb) dev_hold(out_dev); if (res.r) - src_map = dn_fib_rules_policy(fl.fld_src, &res, &flags); + src_map = fl.fld_src; /* no NAT support for now */ gateway = DN_FIB_RES_GW(res); if (res.type == RTN_NAT) { @@ -1312,7 +1311,7 @@ static int dn_route_input_slow(struct sk_buff *skb) if (res.fi->fib_nhs > 1 && fl.oif == 0) dn_fib_select_multipath(&fl, &res); - /* + /* * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT * flag as a hint to set the intra-ethernet bit when * forwarding. If we've got NAT in operation, we don't do @@ -1336,7 +1335,7 @@ static int dn_route_input_slow(struct sk_buff *skb) goto make_route; /* Packet was intra-ethernet, so we know its on-link */ - if (cb->rt_flags | DN_RT_F_IE) { + if (cb->rt_flags & DN_RT_F_IE) { gateway = cb->src; flags |= RTCF_DIRECTSRC; goto make_route; @@ -1376,7 +1375,7 @@ make_route: rt->fl.fld_dst = cb->dst; rt->fl.oif = 0; rt->fl.iif = in_dev->ifindex; - rt->fl.fld_fwmark = fl.fld_fwmark; + rt->fl.mark = fl.mark; rt->u.dst.flags = DST_HOST; rt->u.dst.neighbour = neigh; @@ -1444,13 +1443,11 @@ int dn_route_input(struct sk_buff *skb) rcu_read_lock(); for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; - rt = rcu_dereference(rt->u.rt_next)) { + rt = rcu_dereference(rt->u.dst.dn_next)) { if ((rt->fl.fld_src == cb->src) && - (rt->fl.fld_dst == cb->dst) && + (rt->fl.fld_dst == cb->dst) && (rt->fl.oif == 0) && -#ifdef CONFIG_DECNET_ROUTE_FWMARK - (rt->fl.fld_fwmark == skb->nfmark) && -#endif + (rt->fl.mark == skb->mark) && (rt->fl.iif == cb->iif)) { rt->u.dst.lastuse = jiffies; dst_hold(&rt->u.dst); @@ -1472,7 +1469,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, struct rtmsg *r; struct nlmsghdr *nlh; unsigned char *b = skb->tail; - struct rta_cacheinfo ci; + long expires; nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); r = NLMSG_DATA(nlh); @@ -1481,6 +1478,7 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, r->rtm_src_len = 0; r->rtm_tos = 0; r->rtm_table = RT_TABLE_MAIN; + RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); r->rtm_type = rt->rt_type; r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; r->rtm_scope = RT_SCOPE_UNIVERSE; @@ -1504,16 +1502,10 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) goto rtattr_failure; - ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); - ci.rta_used = rt->u.dst.__use; - ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); - if (rt->u.dst.expires) - ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies); - else - ci.rta_expires = 0; - ci.rta_error = rt->u.dst.error; - ci.rta_id = ci.rta_ts = ci.rta_tsage = 0; - RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); + expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; + if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires, + rt->u.dst.error) < 0) + goto rtattr_failure; if (rt->fl.iif) RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); @@ -1522,8 +1514,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, nlmsg_failure: rtattr_failure: - skb_trim(skb, b - skb->data); - return -1; + skb_trim(skb, b - skb->data); + return -1; } /* @@ -1594,8 +1586,6 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) if (rtm->rtm_flags & RTM_F_NOTIFY) rt->rt_flags |= RTCF_NOTIFY; - NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; - err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); if (err == 0) @@ -1605,9 +1595,7 @@ int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) goto out_free; } - err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); - - return err; + return rtnl_unicast(skb, NETLINK_CB(in_skb).pid); out_free: kfree_skb(skb); @@ -1639,12 +1627,12 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock_bh(); for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; rt; - rt = rcu_dereference(rt->u.rt_next), idx++) { + rt = rcu_dereference(rt->u.dst.dn_next), idx++) { if (idx < s_idx) continue; skb->dst = dst_clone(&rt->u.dst); if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, RTM_NEWROUTE, + cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1, NLM_F_MULTI) <= 0) { dst_release(xchg(&skb->dst, NULL)); rcu_read_unlock_bh(); @@ -1685,7 +1673,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou { struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private); - rt = rt->u.rt_next; + rt = rt->u.dst.dn_next; while(!rt) { rcu_read_unlock_bh(); if (--s->bucket < 0) @@ -1733,7 +1721,7 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) rt->u.dst.__use, (int) dst_metric(&rt->u.dst, RTAX_RTT)); return 0; -} +} static struct seq_operations dn_rt_cache_seq_ops = { .start = dn_rt_cache_seq_start, @@ -1763,7 +1751,7 @@ out_kfree: goto out; } -static struct file_operations dn_rt_cache_seq_fops = { +static const struct file_operations dn_rt_cache_seq_fops = { .owner = THIS_MODULE, .open = dn_rt_cache_seq_open, .read = seq_read, @@ -1777,14 +1765,9 @@ void __init dn_route_init(void) { int i, goal, order; - dn_dst_ops.kmem_cachep = kmem_cache_create("dn_dst_cache", - sizeof(struct dn_route), - 0, SLAB_HWCACHE_ALIGN, - NULL, NULL); - - if (!dn_dst_ops.kmem_cachep) - panic("DECnet: Failed to allocate dn_dst_cache\n"); - + dn_dst_ops.kmem_cachep = + kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); init_timer(&dn_route_timer); dn_route_timer.function = dn_dst_check_expire; dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; @@ -1795,38 +1778,38 @@ void __init dn_route_init(void) for(order = 0; (1UL << order) < goal; order++) /* NOTHING */; - /* - * Only want 1024 entries max, since the table is very, very unlikely - * to be larger than that. - */ - while(order && ((((1UL << order) * PAGE_SIZE) / - sizeof(struct dn_rt_hash_bucket)) >= 2048)) - order--; - - do { - dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / - sizeof(struct dn_rt_hash_bucket); - while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) - dn_rt_hash_mask--; - dn_rt_hash_table = (struct dn_rt_hash_bucket *) - __get_free_pages(GFP_ATOMIC, order); - } while (dn_rt_hash_table == NULL && --order > 0); + /* + * Only want 1024 entries max, since the table is very, very unlikely + * to be larger than that. + */ + while(order && ((((1UL << order) * PAGE_SIZE) / + sizeof(struct dn_rt_hash_bucket)) >= 2048)) + order--; + + do { + dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / + sizeof(struct dn_rt_hash_bucket); + while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) + dn_rt_hash_mask--; + dn_rt_hash_table = (struct dn_rt_hash_bucket *) + __get_free_pages(GFP_ATOMIC, order); + } while (dn_rt_hash_table == NULL && --order > 0); if (!dn_rt_hash_table) - panic("Failed to allocate DECnet route cache hash table\n"); + panic("Failed to allocate DECnet route cache hash table\n"); - printk(KERN_INFO - "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", - dn_rt_hash_mask, + printk(KERN_INFO + "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", + dn_rt_hash_mask, (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); dn_rt_hash_mask--; - for(i = 0; i <= dn_rt_hash_mask; i++) { - spin_lock_init(&dn_rt_hash_table[i].lock); - dn_rt_hash_table[i].chain = NULL; - } + for(i = 0; i <= dn_rt_hash_mask; i++) { + spin_lock_init(&dn_rt_hash_table[i].lock); + dn_rt_hash_table[i].chain = NULL; + } - dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); + dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); }