summaryrefslogtreecommitdiff
path: root/net/ipv6/netfilter/ip6_tables.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/netfilter/ip6_tables.c')
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index dc41d6d3c6c6..5359ef4daac5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -387,9 +387,7 @@ ip6t_do_table(struct sk_buff *skb,
goto no_match;
}
- ADD_COUNTER(e->counters,
- ntohs(ipv6_hdr(skb)->payload_len) +
- sizeof(struct ipv6hdr), 1);
+ ADD_COUNTER(e->counters, skb->len, 1);
t = ip6t_get_target_c(e);
IP_NF_ASSERT(t->u.kernel.target);
@@ -899,7 +897,7 @@ get_counters(const struct xt_table_info *t,
struct ip6t_entry *iter;
unsigned int cpu;
unsigned int i;
- unsigned int curcpu;
+ unsigned int curcpu = get_cpu();
/* Instead of clearing (by a previous call to memset())
* the counters and using adds, we set the counters
@@ -909,14 +907,16 @@ get_counters(const struct xt_table_info *t,
* if new softirq were to run and call ipt_do_table
*/
local_bh_disable();
- curcpu = smp_processor_id();
-
i = 0;
xt_entry_foreach(iter, t->entries[curcpu], t->size) {
SET_COUNTER(counters[i], iter->counters.bcnt,
iter->counters.pcnt);
++i;
}
+ local_bh_enable();
+ /* Processing counters from other cpus, we can let bottom half enabled,
+ * (preemption is disabled)
+ */
for_each_possible_cpu(cpu) {
if (cpu == curcpu)
@@ -930,7 +930,7 @@ get_counters(const struct xt_table_info *t,
}
xt_info_wrunlock(cpu);
}
- local_bh_enable();
+ put_cpu();
}
static struct xt_counters *alloc_counters(const struct xt_table *table)