sh: convert /proc/cpu/aligmnent, /proc/cpu/kernel_alignment to seq_file
[safe/jmp/linux-2.6] / net / sched / em_cmp.c
index cc49c93..bc45039 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
 #include <linux/tc_ematch/tc_em_cmp.h>
+#include <asm/unaligned.h>
 #include <net/pkt_cls.h>
 
 static inline int cmp_needs_transformation(struct tcf_em_cmp *cmp)
@@ -37,8 +38,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
                        break;
 
                case TCF_EM_ALIGN_U16:
-                       val = *ptr << 8;
-                       val |= *(ptr+1);
+                       val = get_unaligned_be16(ptr);
 
                        if (cmp_needs_transformation(cmp))
                                val = be16_to_cpu(val);
@@ -47,10 +47,7 @@ static int em_cmp_match(struct sk_buff *skb, struct tcf_ematch *em,
                case TCF_EM_ALIGN_U32:
                        /* Worth checking boundries? The branching seems
                         * to get worse. Visit again. */
-                       val = *ptr << 24;
-                       val |= *(ptr+1) << 16;
-                       val |= *(ptr+2) << 8;
-                       val |= *(ptr+3);
+                       val = get_unaligned_be32(ptr);
 
                        if (cmp_needs_transformation(cmp))
                                val = be32_to_cpu(val);