summaryrefslogtreecommitdiff
path: root/fs/dlm/lockspace.c
diff options
context:
space:
mode:
authorDavid Teigland <teigland@redhat.com>2012-05-10 10:18:07 -0500
committerDavid Teigland <teigland@redhat.com>2012-07-16 14:16:19 -0500
commitc04fecb4d9f7753e0cbff7edd03ec68f8721cdce (patch)
treeecd82017d49c7bb03b96a8ad1eb4e9a5bb84409a /fs/dlm/lockspace.c
parentecc728467fb0c3e350b57fc66ed7585c15be50f5 (diff)
dlm: use rsbtbl as resource directory
Remove the dir hash table (dirtbl), and use the rsb hash table (rsbtbl) as the resource directory. It has always been an unnecessary duplication of information. This improves efficiency by using a single rsbtbl lookup in many cases where both rsbtbl and dirtbl lookups were needed previously. This eliminates the need to handle cases of rsbtbl and dirtbl being out of sync. In many cases there will be memory savings because the dir hash table no longer exists. Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/lockspace.c')
-rw-r--r--fs/dlm/lockspace.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index ca506abbdd3b..065bb75ed609 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -509,17 +509,6 @@ static int new_lockspace(const char *name, const char *cluster,
idr_init(&ls->ls_lkbidr);
spin_lock_init(&ls->ls_lkbidr_spin);
- size = dlm_config.ci_dirtbl_size;
- ls->ls_dirtbl_size = size;
-
- ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size);
- if (!ls->ls_dirtbl)
- goto out_lkbfree;
- for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
- spin_lock_init(&ls->ls_dirtbl[i].lock);
- }
-
INIT_LIST_HEAD(&ls->ls_waiters);
mutex_init(&ls->ls_waiters_mutex);
INIT_LIST_HEAD(&ls->ls_orphans);
@@ -567,7 +556,7 @@ static int new_lockspace(const char *name, const char *cluster,
ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
if (!ls->ls_recover_buf)
- goto out_dirfree;
+ goto out_lkbfree;
ls->ls_slot = 0;
ls->ls_num_slots = 0;
@@ -648,8 +637,6 @@ static int new_lockspace(const char *name, const char *cluster,
list_del(&ls->ls_list);
spin_unlock(&lslist_lock);
kfree(ls->ls_recover_buf);
- out_dirfree:
- vfree(ls->ls_dirtbl);
out_lkbfree:
idr_destroy(&ls->ls_lkbidr);
vfree(ls->ls_rsbtbl);
@@ -779,13 +766,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
kfree(ls->ls_recover_buf);
/*
- * Free direntry structs.
- */
-
- dlm_dir_clear(ls);
- vfree(ls->ls_dirtbl);
-
- /*
* Free all lkb's in idr
*/
@@ -826,7 +806,6 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_purge_requestqueue(ls);
kfree(ls->ls_recover_args);
- dlm_clear_free_entries(ls);
dlm_clear_members(ls);
dlm_clear_members_gone(ls);
kfree(ls->ls_node_array);