summaryrefslogtreecommitdiff
path: root/net/netfilter
diff options
context:
space:
mode:
authorYasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>2007-07-07 22:25:51 -0700
committerDavid S. Miller <davem@sunset.davemloft.net>2007-07-10 22:17:35 -0700
commitdacd2a1a5cf621288833aa3c6e815b86a1536538 (patch)
treec615b95aa1518c6518df35acd481ed4af19acd61 /net/netfilter
parentff09b7493c8f433d3ffd6a31ad58d190f82ef0c5 (diff)
[NETFILTER]: nf_conntrack: remove old memory allocator of conntrack
Now memory space for help and NAT are allocated by extension infrastructure. Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/nf_conntrack_core.c222
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c7
2 files changed, 15 insertions, 214 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 914506e6c787..a71366652938 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -71,39 +71,12 @@ EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
unsigned int nf_ct_log_invalid __read_mostly;
LIST_HEAD(unconfirmed);
static int nf_conntrack_vmalloc __read_mostly;
-
+static struct kmem_cache *nf_conntrack_cachep __read_mostly;
static unsigned int nf_conntrack_next_id;
DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
-/*
- * This scheme offers various size of "struct nf_conn" dependent on
- * features(helper, nat, ...)
- */
-
-#define NF_CT_FEATURES_NAMELEN 256
-static struct {
- /* name of slab cache. printed in /proc/slabinfo */
- char *name;
-
- /* size of slab cache */
- size_t size;
-
- /* slab cache pointer */
- struct kmem_cache *cachep;
-
- /* allocated slab cache + modules which uses this slab cache */
- int use;
-
-} nf_ct_cache[NF_CT_F_NUM];
-
-/* protect members of nf_ct_cache except of "use" */
-DEFINE_RWLOCK(nf_ct_cache_lock);
-
-/* This avoids calling kmem_cache_create() with same name simultaneously */
-static DEFINE_MUTEX(nf_ct_cache_mutex);
-
static int nf_conntrack_hash_rnd_initted;
static unsigned int nf_conntrack_hash_rnd;
@@ -126,122 +99,6 @@ static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
nf_conntrack_hash_rnd);
}
-int nf_conntrack_register_cache(u_int32_t features, const char *name,
- size_t size)
-{
- int ret = 0;
- char *cache_name;
- struct kmem_cache *cachep;
-
- DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
- features, name, size);
-
- if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) {
- DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
- features);
- return -EINVAL;
- }
-
- mutex_lock(&nf_ct_cache_mutex);
-
- write_lock_bh(&nf_ct_cache_lock);
- /* e.g: multiple helpers are loaded */
- if (nf_ct_cache[features].use > 0) {
- DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
- if ((!strncmp(nf_ct_cache[features].name, name,
- NF_CT_FEATURES_NAMELEN))
- && nf_ct_cache[features].size == size) {
- DEBUGP("nf_conntrack_register_cache: reusing.\n");
- nf_ct_cache[features].use++;
- ret = 0;
- } else
- ret = -EBUSY;
-
- write_unlock_bh(&nf_ct_cache_lock);
- mutex_unlock(&nf_ct_cache_mutex);
- return ret;
- }
- write_unlock_bh(&nf_ct_cache_lock);
-
- /*
- * The memory space for name of slab cache must be alive until
- * cache is destroyed.
- */
- cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC);
- if (cache_name == NULL) {
- DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
- ret = -ENOMEM;
- goto out_up_mutex;
- }
-
- if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN)
- >= NF_CT_FEATURES_NAMELEN) {
- printk("nf_conntrack_register_cache: name too long\n");
- ret = -EINVAL;
- goto out_free_name;
- }
-
- cachep = kmem_cache_create(cache_name, size, 0, 0,
- NULL, NULL);
- if (!cachep) {
- printk("nf_conntrack_register_cache: Can't create slab cache "
- "for the features = 0x%x\n", features);
- ret = -ENOMEM;
- goto out_free_name;
- }
-
- write_lock_bh(&nf_ct_cache_lock);
- nf_ct_cache[features].use = 1;
- nf_ct_cache[features].size = size;
- nf_ct_cache[features].cachep = cachep;
- nf_ct_cache[features].name = cache_name;
- write_unlock_bh(&nf_ct_cache_lock);
-
- goto out_up_mutex;
-
-out_free_name:
- kfree(cache_name);
-out_up_mutex:
- mutex_unlock(&nf_ct_cache_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_register_cache);
-
-/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
-void nf_conntrack_unregister_cache(u_int32_t features)
-{
- struct kmem_cache *cachep;
- char *name;
-
- /*
- * This assures that kmem_cache_create() isn't called before destroying
- * slab cache.
- */
- DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
- mutex_lock(&nf_ct_cache_mutex);
-
- write_lock_bh(&nf_ct_cache_lock);
- if (--nf_ct_cache[features].use > 0) {
- write_unlock_bh(&nf_ct_cache_lock);
- mutex_unlock(&nf_ct_cache_mutex);
- return;
- }
- cachep = nf_ct_cache[features].cachep;
- name = nf_ct_cache[features].name;
- nf_ct_cache[features].cachep = NULL;
- nf_ct_cache[features].name = NULL;
- nf_ct_cache[features].size = 0;
- write_unlock_bh(&nf_ct_cache_lock);
-
- synchronize_net();
-
- kmem_cache_destroy(cachep);
- kfree(name);
-
- mutex_unlock(&nf_ct_cache_mutex);
-}
-EXPORT_SYMBOL_GPL(nf_conntrack_unregister_cache);
-
int
nf_ct_get_tuple(const struct sk_buff *skb,
unsigned int nhoff,
@@ -559,11 +416,8 @@ static int early_drop(struct list_head *chain)
return dropped;
}
-static struct nf_conn *
-__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_tuple *repl,
- const struct nf_conntrack_l3proto *l3proto,
- u_int32_t features)
+struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl)
{
struct nf_conn *conntrack = NULL;
@@ -589,65 +443,28 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
}
}
- /* find features needed by this conntrack. */
- features |= l3proto->get_features(orig);
-
- DEBUGP("nf_conntrack_alloc: features=0x%x\n", features);
-
- read_lock_bh(&nf_ct_cache_lock);
-
- if (unlikely(!nf_ct_cache[features].use)) {
- DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
- features);
- goto out;
- }
-
- conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC);
+ conntrack = kmem_cache_zalloc(nf_conntrack_cachep, GFP_ATOMIC);
if (conntrack == NULL) {
- DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n");
- goto out;
+ DEBUGP("nf_conntrack_alloc: Can't alloc conntrack.\n");
+ atomic_dec(&nf_conntrack_count);
+ return ERR_PTR(-ENOMEM);
}
- memset(conntrack, 0, nf_ct_cache[features].size);
- conntrack->features = features;
atomic_set(&conntrack->ct_general.use, 1);
conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
/* Don't set timer yet: wait for confirmation */
setup_timer(&conntrack->timeout, death_by_timeout,
(unsigned long)conntrack);
- read_unlock_bh(&nf_ct_cache_lock);
return conntrack;
-out:
- read_unlock_bh(&nf_ct_cache_lock);
- atomic_dec(&nf_conntrack_count);
- return conntrack;
-}
-
-struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_tuple *repl)
-{
- struct nf_conntrack_l3proto *l3proto;
- struct nf_conn *ct;
-
- rcu_read_lock();
- l3proto = __nf_ct_l3proto_find(orig->src.l3num);
- ct = __nf_conntrack_alloc(orig, repl, l3proto, 0);
- rcu_read_unlock();
-
- return ct;
}
EXPORT_SYMBOL_GPL(nf_conntrack_alloc);
void nf_conntrack_free(struct nf_conn *conntrack)
{
- u_int32_t features = conntrack->features;
- NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM);
nf_ct_ext_free(conntrack);
- DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features,
- conntrack);
- kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
+ kmem_cache_free(nf_conntrack_cachep, conntrack);
atomic_dec(&nf_conntrack_count);
}
EXPORT_SYMBOL_GPL(nf_conntrack_free);
@@ -665,14 +482,13 @@ init_conntrack(const struct nf_conntrack_tuple *tuple,
struct nf_conn_help *help;
struct nf_conntrack_tuple repl_tuple;
struct nf_conntrack_expect *exp;
- u_int32_t features = 0;
if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, l4proto)) {
DEBUGP("Can't invert tuple.\n");
return NULL;
}
- conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto, features);
+ conntrack = nf_conntrack_alloc(tuple, &repl_tuple);
if (conntrack == NULL || IS_ERR(conntrack)) {
DEBUGP("Can't allocate conntrack.\n");
return (struct nf_conntrack_tuple_hash *)conntrack;
@@ -1128,8 +944,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_flush);
supposed to kill the mall. */
void nf_conntrack_cleanup(void)
{
- int i;
-
rcu_assign_pointer(ip_ct_attach, NULL);
/* This makes sure all current packets have passed through
@@ -1150,14 +964,7 @@ void nf_conntrack_cleanup(void)
rcu_assign_pointer(nf_ct_destroy, NULL);
- for (i = 0; i < NF_CT_F_NUM; i++) {
- if (nf_ct_cache[i].use == 0)
- continue;
-
- NF_CT_ASSERT(nf_ct_cache[i].use == 1);
- nf_ct_cache[i].use = 1;
- nf_conntrack_unregister_cache(i);
- }
+ kmem_cache_destroy(nf_conntrack_cachep);
kmem_cache_destroy(nf_conntrack_expect_cachep);
free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
nf_conntrack_htable_size);
@@ -1267,9 +1074,10 @@ int __init nf_conntrack_init(void)
goto err_out;
}
- ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic",
- sizeof(struct nf_conn));
- if (ret < 0) {
+ nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+ sizeof(struct nf_conn),
+ 0, 0, NULL, NULL);
+ if (!nf_conntrack_cachep) {
printk(KERN_ERR "Unable to create nf_conn slab cache\n");
goto err_free_hash;
}
@@ -1307,7 +1115,7 @@ out_fini_proto:
out_free_expect_slab:
kmem_cache_destroy(nf_conntrack_expect_cachep);
err_free_conntrack_slab:
- nf_conntrack_unregister_cache(NF_CT_F_BASIC);
+ kmem_cache_destroy(nf_conntrack_cachep);
err_free_hash:
free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
nf_conntrack_htable_size);
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
index cbd96f3c1b89..2fd0f11b8fb2 100644
--- a/net/netfilter/nf_conntrack_l3proto_generic.c
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -76,12 +76,6 @@ generic_prepare(struct sk_buff **pskb, unsigned int hooknum,
}
-static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple)
-
-{
- return NF_CT_F_BASIC;
-}
-
struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = {
.l3proto = PF_UNSPEC,
.name = "unknown",
@@ -90,6 +84,5 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_generic = {
.print_tuple = generic_print_tuple,
.print_conntrack = generic_print_conntrack,
.prepare = generic_prepare,
- .get_features = generic_get_features,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_generic);