+/*
+ * Per-CPU spinlock associated with per-cpu table entries, and
+ * with a counter for the "reading" side that allows a recursive
+ * reader to avoid taking the lock and deadlocking.
+ *
+ * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu.
+ * It needs to ensure that the rules are not being changed while the packet
+ * is being processed. In some cases, the read lock will be acquired
+ * twice on the same CPU; this is okay because of the count.
+ *
+ * "writing" is used when reading counters.
+ * During replace any readers that are using the old tables have to complete
+ * before freeing the old table. This is handled by the write locking
+ * necessary for reading the counters.
+ */
+struct xt_info_lock {
+ spinlock_t lock;
+ unsigned char readers;
+};
+DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
+
+/*
+ * Note: we need to ensure that preemption is disabled before acquiring
+ * the per-cpu-variable, so we do it as a two step process rather than
+ * using "spin_lock_bh()".
+ *
+ * We _also_ need to disable bottom half processing before updating our
+ * nesting count, to make sure that the only kind of re-entrancy is this
+ * code being called by itself: since the count+lock is not an atomic
+ * operation, we can allow no races.
+ *
+ * _Only_ that special combination of being per-cpu and never getting
+ * re-entered asynchronously means that the count is safe.
+ */
+static inline void xt_info_rdlock_bh(void)
+{
+ struct xt_info_lock *lock;
+
+ local_bh_disable();
+ lock = &__get_cpu_var(xt_info_locks);
+ if (likely(!lock->readers++))
+ spin_lock(&lock->lock);
+}
+
+static inline void xt_info_rdunlock_bh(void)
+{
+ struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
+
+ if (likely(!--lock->readers))
+ spin_unlock(&lock->lock);
+ local_bh_enable();
+}
+
+/*
+ * The "writer" side needs to get exclusive access to the lock,
+ * regardless of readers. This must be called with bottom half
+ * processing (and thus also preemption) disabled.
+ */
+static inline void xt_info_wrlock(unsigned int cpu)
+{
+ spin_lock(&per_cpu(xt_info_locks, cpu).lock);
+}
+
+static inline void xt_info_wrunlock(unsigned int cpu)
+{
+ spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
+}
+
+/*
+ * This helper is performance critical and must be inlined
+ */
+static inline unsigned long ifname_compare_aligned(const char *_a,
+ const char *_b,
+ const char *_mask)
+{
+ const unsigned long *a = (const unsigned long *)_a;
+ const unsigned long *b = (const unsigned long *)_b;
+ const unsigned long *mask = (const unsigned long *)_mask;
+ unsigned long ret;
+
+ ret = (a[0] ^ b[0]) & mask[0];
+ if (IFNAMSIZ > sizeof(unsigned long))
+ ret |= (a[1] ^ b[1]) & mask[1];
+ if (IFNAMSIZ > 2 * sizeof(unsigned long))
+ ret |= (a[2] ^ b[2]) & mask[2];
+ if (IFNAMSIZ > 3 * sizeof(unsigned long))
+ ret |= (a[3] ^ b[3]) & mask[3];
+ BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+#include <net/compat.h>
+
+struct compat_xt_entry_match {
+ union {
+ struct {
+ u_int16_t match_size;
+ char name[XT_FUNCTION_MAXNAMELEN - 1];
+ u_int8_t revision;
+ } user;
+ struct {
+ u_int16_t match_size;
+ compat_uptr_t match;
+ } kernel;
+ u_int16_t match_size;
+ } u;
+ unsigned char data[0];
+};
+
+struct compat_xt_entry_target {
+ union {
+ struct {
+ u_int16_t target_size;
+ char name[XT_FUNCTION_MAXNAMELEN - 1];
+ u_int8_t revision;
+ } user;
+ struct {
+ u_int16_t target_size;
+ compat_uptr_t target;
+ } kernel;
+ u_int16_t target_size;
+ } u;
+ unsigned char data[0];
+};
+
+/* FIXME: this works only on 32 bit tasks
+ * need to change whole approach in order to calculate align as function of
+ * current task alignment */
+
+struct compat_xt_counters {
+ compat_u64 pcnt, bcnt; /* Packet and byte counters */
+};
+
+struct compat_xt_counters_info {
+ char name[XT_TABLE_MAXNAMELEN];
+ compat_uint_t num_counters;
+ struct compat_xt_counters counters[0];
+};
+
+#define COMPAT_XT_ALIGN(s) (((s) + (__alignof__(struct compat_xt_counters)-1)) \
+ & ~(__alignof__(struct compat_xt_counters)-1))
+
+extern void xt_compat_lock(u_int8_t af);
+extern void xt_compat_unlock(u_int8_t af);
+
+extern int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta);
+extern void xt_compat_flush_offsets(u_int8_t af);
+extern short xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+
+extern int xt_compat_match_offset(const struct xt_match *match);
+extern int xt_compat_match_from_user(struct xt_entry_match *m,
+ void **dstptr, unsigned int *size);
+extern int xt_compat_match_to_user(struct xt_entry_match *m,
+ void __user **dstptr, unsigned int *size);
+
+extern int xt_compat_target_offset(const struct xt_target *target);
+extern void xt_compat_target_from_user(struct xt_entry_target *t,
+ void **dstptr, unsigned int *size);
+extern int xt_compat_target_to_user(struct xt_entry_target *t,
+ void __user **dstptr, unsigned int *size);
+
+#endif /* CONFIG_COMPAT */