X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=net%2Fsched%2Fcls_tcindex.c;h=e806f2314b5e24281dd4bfbe3f442d1c1ee51d41;hb=e071041be037eca208b62b84469a06bdfc692bea;hp=471909e54807e1508db78e5e0115fedff3cb934d;hpb=aa767bfea4828936fffb7800204294ba4c8ba283;p=safe%2Fjmp%2Flinux-2.6 diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 471909e..e806f23 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -13,12 +13,6 @@ #include #include - -/* - * Not quite sure if we need all the xchgs Alexey uses when accessing things. - * Can always add them later ... :) - */ - /* * Passing parameters to the root seems to be done more awkwardly than really * necessary. At least, u32 doesn't seem to use such dirty hacks. To be @@ -55,7 +49,7 @@ struct tcindex_data { int fall_through; /* 0: only classify if explicit match */ }; -static struct tcf_ext_map tcindex_ext_map = { +static const struct tcf_ext_map tcindex_ext_map = { .police = TCA_TCINDEX_POLICE, .action = TCA_TCINDEX_ACT }; @@ -193,10 +187,18 @@ valid_perfect_hash(struct tcindex_data *p) return p->hash > (p->mask >> p->shift); } +static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = { + [TCA_TCINDEX_HASH] = { .type = NLA_U32 }, + [TCA_TCINDEX_MASK] = { .type = NLA_U16 }, + [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 }, + [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 }, + [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 }, +}; + static int tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, struct tcindex_data *p, struct tcindex_filter_result *r, - struct rtattr **tb, struct rtattr *est) + struct nlattr **tb, struct nlattr *est) { int err, balloc = 0; struct tcindex_filter_result new_filter_result, *old_r = r; @@ -217,24 +219,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, else memset(&cr, 0, sizeof(cr)); - err = -EINVAL; - if (tb[TCA_TCINDEX_HASH-1]) { - if (RTA_PAYLOAD(tb[TCA_TCINDEX_HASH-1]) < sizeof(u32)) - goto errout; - cp.hash = *(u32 *) RTA_DATA(tb[TCA_TCINDEX_HASH-1]); - } + if (tb[TCA_TCINDEX_HASH]) + cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); - if (tb[TCA_TCINDEX_MASK-1]) { - if (RTA_PAYLOAD(tb[TCA_TCINDEX_MASK-1]) < sizeof(u16)) - goto errout; - cp.mask = *(u16 *) RTA_DATA(tb[TCA_TCINDEX_MASK-1]); - } + if (tb[TCA_TCINDEX_MASK]) + cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]); - if (tb[TCA_TCINDEX_SHIFT-1]) { - if (RTA_PAYLOAD(tb[TCA_TCINDEX_SHIFT-1]) < sizeof(int)) - goto errout; - cp.shift = *(int *) RTA_DATA(tb[TCA_TCINDEX_SHIFT-1]); - } + if (tb[TCA_TCINDEX_SHIFT]) + cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]); err = -EBUSY; /* Hash already allocated, make sure that we still meet the @@ -248,12 +240,8 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, goto errout; err = -EINVAL; - if (tb[TCA_TCINDEX_FALL_THROUGH-1]) { - if (RTA_PAYLOAD(tb[TCA_TCINDEX_FALL_THROUGH-1]) < sizeof(u32)) - goto errout; - cp.fall_through = - *(u32 *) RTA_DATA(tb[TCA_TCINDEX_FALL_THROUGH-1]); - } + if (tb[TCA_TCINDEX_FALL_THROUGH]) + cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]); if (!cp.hash) { /* Hash not specified, use perfect hash if the upper limit @@ -304,8 +292,8 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle, goto errout_alloc; } - if (tb[TCA_TCINDEX_CLASSID-1]) { - cr.res.classid = *(u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]); + if (tb[TCA_TCINDEX_CLASSID]) { + cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); tcf_bind_filter(tp, &cr.res, base); } @@ -344,12 +332,13 @@ errout: static int tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle, - struct rtattr **tca, unsigned long *arg) + struct nlattr **tca, unsigned long *arg) { - struct rtattr *opt = tca[TCA_OPTIONS-1]; - struct rtattr *tb[TCA_TCINDEX_MAX]; + struct nlattr *opt = tca[TCA_OPTIONS]; + struct nlattr *tb[TCA_TCINDEX_MAX + 1]; struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg; + int err; pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," "p %p,r %p,*arg 0x%lx\n", @@ -358,10 +347,11 @@ tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle, if (!opt) return 0; - if (rtattr_parse_nested(tb, TCA_TCINDEX_MAX, opt) < 0) - return -EINVAL; + err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy); + if (err < 0) + return err; - return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE-1]); + return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE]); } @@ -435,21 +425,23 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, struct tcindex_data *p = PRIV(tp); struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; unsigned char *b = skb_tail_pointer(skb); - struct rtattr *rta; + struct nlattr *nest; pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n", tp, fh, skb, t, p, r, b); pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); - rta = (struct rtattr *) b; - RTA_PUT(skb, TCA_OPTIONS, 0, NULL); + + nest = nla_nest_start(skb, TCA_OPTIONS); + if (nest == NULL) + goto nla_put_failure; + if (!fh) { t->tcm_handle = ~0; /* whatever ... */ - RTA_PUT(skb, TCA_TCINDEX_HASH, sizeof(p->hash), &p->hash); - RTA_PUT(skb, TCA_TCINDEX_MASK, sizeof(p->mask), &p->mask); - RTA_PUT(skb, TCA_TCINDEX_SHIFT, sizeof(p->shift), &p->shift); - RTA_PUT(skb, TCA_TCINDEX_FALL_THROUGH, sizeof(p->fall_through), - &p->fall_through); - rta->rta_len = skb_tail_pointer(skb) - b; + NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash); + NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask); + NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift); + NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through); + nla_nest_end(skb, nest); } else { if (p->perfect) { t->tcm_handle = r-p->perfect; @@ -468,25 +460,24 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh, } pr_debug("handle = %d\n", t->tcm_handle); if (r->res.class) - RTA_PUT(skb, TCA_TCINDEX_CLASSID, 4, &r->res.classid); + NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid); if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) - goto rtattr_failure; - rta->rta_len = skb_tail_pointer(skb) - b; + goto nla_put_failure; + nla_nest_end(skb, nest); if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0) - goto rtattr_failure; + goto nla_put_failure; } return skb->len; -rtattr_failure: +nla_put_failure: nlmsg_trim(skb, b); return -1; } -static struct tcf_proto_ops cls_tcindex_ops = { - .next = NULL, +static struct tcf_proto_ops cls_tcindex_ops __read_mostly = { .kind = "tcindex", .classify = tcindex_classify, .init = tcindex_init,