summaryrefslogtreecommitdiff
path: root/fs/gfs2/quota.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/quota.c')
-rw-r--r--fs/gfs2/quota.c342
1 files changed, 170 insertions, 172 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 98236d0df3ca..8bec0e3192dd 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -52,6 +52,11 @@
#include <linux/dqblk_xfs.h>
#include <linux/lockref.h>
#include <linux/list_lru.h>
+#include <linux/rcupdate.h>
+#include <linux/rculist_bl.h>
+#include <linux/bit_spinlock.h>
+#include <linux/jhash.h>
+#include <linux/vmalloc.h>
#include "gfs2.h"
#include "incore.h"
@@ -67,16 +72,44 @@
#include "inode.h"
#include "util.h"
-struct gfs2_quota_change_host {
- u64 qc_change;
- u32 qc_flags; /* GFS2_QCF_... */
- struct kqid qc_id;
-};
+#define GFS2_QD_HASH_SHIFT 12
+#define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
+#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
-/* Lock order: qd_lock -> qd->lockref.lock -> lru lock */
+/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
+/* -> sd_bitmap_lock */
static DEFINE_SPINLOCK(qd_lock);
struct list_lru gfs2_qd_lru;
+static struct hlist_bl_head qd_hash_table[GFS2_QD_HASH_SIZE];
+
+static unsigned int gfs2_qd_hash(const struct gfs2_sbd *sdp,
+ const struct kqid qid)
+{
+ unsigned int h;
+
+ h = jhash(&sdp, sizeof(struct gfs2_sbd *), 0);
+ h = jhash(&qid, sizeof(struct kqid), h);
+
+ return h & GFS2_QD_HASH_MASK;
+}
+
+static inline void spin_lock_bucket(unsigned int hash)
+{
+ hlist_bl_lock(&qd_hash_table[hash]);
+}
+
+static inline void spin_unlock_bucket(unsigned int hash)
+{
+ hlist_bl_unlock(&qd_hash_table[hash]);
+}
+
+static void gfs2_qd_dealloc(struct rcu_head *rcu)
+{
+ struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
+ kmem_cache_free(gfs2_quotad_cachep, qd);
+}
+
static void gfs2_qd_dispose(struct list_head *list)
{
struct gfs2_quota_data *qd;
@@ -93,6 +126,10 @@ static void gfs2_qd_dispose(struct list_head *list)
list_del(&qd->qd_list);
spin_unlock(&qd_lock);
+ spin_lock_bucket(qd->qd_hash);
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
+
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
@@ -101,7 +138,7 @@ static void gfs2_qd_dispose(struct list_head *list)
atomic_dec(&sdp->sd_quota_count);
/* Delete it from the common reclaim list */
- kmem_cache_free(gfs2_quotad_cachep, qd);
+ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
}
}
@@ -171,83 +208,95 @@ static u64 qd2offset(struct gfs2_quota_data *qd)
return offset;
}
-static int qd_alloc(struct gfs2_sbd *sdp, struct kqid qid,
- struct gfs2_quota_data **qdp)
+static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
{
struct gfs2_quota_data *qd;
int error;
qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
if (!qd)
- return -ENOMEM;
+ return NULL;
+ qd->qd_sbd = sdp;
qd->qd_lockref.count = 1;
spin_lock_init(&qd->qd_lockref.lock);
qd->qd_id = qid;
qd->qd_slot = -1;
INIT_LIST_HEAD(&qd->qd_lru);
+ qd->qd_hash = hash;
error = gfs2_glock_get(sdp, qd2index(qd),
&gfs2_quota_glops, CREATE, &qd->qd_gl);
if (error)
goto fail;
- *qdp = qd;
-
- return 0;
+ return qd;
fail:
kmem_cache_free(gfs2_quotad_cachep, qd);
- return error;
+ return NULL;
}
-static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
- struct gfs2_quota_data **qdp)
+static struct gfs2_quota_data *gfs2_qd_search_bucket(unsigned int hash,
+ const struct gfs2_sbd *sdp,
+ struct kqid qid)
{
- struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
- int error, found;
-
- *qdp = NULL;
+ struct gfs2_quota_data *qd;
+ struct hlist_bl_node *h;
- for (;;) {
- found = 0;
- spin_lock(&qd_lock);
- list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
- if (qid_eq(qd->qd_id, qid) &&
- lockref_get_not_dead(&qd->qd_lockref)) {
- list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
- found = 1;
- break;
- }
+ hlist_bl_for_each_entry_rcu(qd, h, &qd_hash_table[hash], qd_hlist) {
+ if (!qid_eq(qd->qd_id, qid))
+ continue;
+ if (qd->qd_sbd != sdp)
+ continue;
+ if (lockref_get_not_dead(&qd->qd_lockref)) {
+ list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
+ return qd;
}
+ }
- if (!found)
- qd = NULL;
+ return NULL;
+}
- if (!qd && new_qd) {
- qd = new_qd;
- list_add(&qd->qd_list, &sdp->sd_quota_list);
- atomic_inc(&sdp->sd_quota_count);
- new_qd = NULL;
- }
- spin_unlock(&qd_lock);
+static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
+ struct gfs2_quota_data **qdp)
+{
+ struct gfs2_quota_data *qd, *new_qd;
+ unsigned int hash = gfs2_qd_hash(sdp, qid);
- if (qd) {
- if (new_qd) {
- gfs2_glock_put(new_qd->qd_gl);
- kmem_cache_free(gfs2_quotad_cachep, new_qd);
- }
- *qdp = qd;
- return 0;
- }
+ rcu_read_lock();
+ *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
+ rcu_read_unlock();
- error = qd_alloc(sdp, qid, &new_qd);
- if (error)
- return error;
+ if (qd)
+ return 0;
+
+ new_qd = qd_alloc(hash, sdp, qid);
+ if (!new_qd)
+ return -ENOMEM;
+
+ spin_lock(&qd_lock);
+ spin_lock_bucket(hash);
+ *qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
+ if (qd == NULL) {
+ *qdp = new_qd;
+ list_add(&new_qd->qd_list, &sdp->sd_quota_list);
+ hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
+ atomic_inc(&sdp->sd_quota_count);
}
+ spin_unlock_bucket(hash);
+ spin_unlock(&qd_lock);
+
+ if (qd) {
+ gfs2_glock_put(new_qd->qd_gl);
+ kmem_cache_free(gfs2_quotad_cachep, new_qd);
+ }
+
+ return 0;
}
+
static void qd_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
@@ -268,88 +317,48 @@ static void qd_put(struct gfs2_quota_data *qd)
static int slot_get(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- unsigned int c, o = 0, b;
- unsigned char byte = 0;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+ unsigned int bit;
+ int error = 0;
- spin_lock(&qd_lock);
+ spin_lock(&sdp->sd_bitmap_lock);
+ if (qd->qd_slot_count != 0)
+ goto out;
- if (qd->qd_slot_count++) {
- spin_unlock(&qd_lock);
- return 0;
+ error = -ENOSPC;
+ bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
+ if (bit < sdp->sd_quota_slots) {
+ set_bit(bit, sdp->sd_quota_bitmap);
+ qd->qd_slot = bit;
+out:
+ qd->qd_slot_count++;
}
+ spin_unlock(&sdp->sd_bitmap_lock);
- for (c = 0; c < sdp->sd_quota_chunks; c++)
- for (o = 0; o < PAGE_SIZE; o++) {
- byte = sdp->sd_quota_bitmap[c][o];
- if (byte != 0xFF)
- goto found;
- }
-
- goto fail;
-
-found:
- for (b = 0; b < 8; b++)
- if (!(byte & (1 << b)))
- break;
- qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
-
- if (qd->qd_slot >= sdp->sd_quota_slots)
- goto fail;
-
- sdp->sd_quota_bitmap[c][o] |= 1 << b;
-
- spin_unlock(&qd_lock);
-
- return 0;
-
-fail:
- qd->qd_slot_count--;
- spin_unlock(&qd_lock);
- return -ENOSPC;
+ return error;
}
static void slot_hold(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- spin_lock(&qd_lock);
+ spin_lock(&sdp->sd_bitmap_lock);
gfs2_assert(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
- spin_unlock(&qd_lock);
-}
-
-static void gfs2_icbit_munge(struct gfs2_sbd *sdp, unsigned char **bitmap,
- unsigned int bit, int new_value)
-{
- unsigned int c, o, b = bit;
- int old_value;
-
- c = b / (8 * PAGE_SIZE);
- b %= 8 * PAGE_SIZE;
- o = b / 8;
- b %= 8;
-
- old_value = (bitmap[c][o] & (1 << b));
- gfs2_assert_withdraw(sdp, !old_value != !new_value);
-
- if (new_value)
- bitmap[c][o] |= 1 << b;
- else
- bitmap[c][o] &= ~(1 << b);
+ spin_unlock(&sdp->sd_bitmap_lock);
}
static void slot_put(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- spin_lock(&qd_lock);
+ spin_lock(&sdp->sd_bitmap_lock);
gfs2_assert(sdp, qd->qd_slot_count);
if (!--qd->qd_slot_count) {
- gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
+ BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
qd->qd_slot = -1;
}
- spin_unlock(&qd_lock);
+ spin_unlock(&sdp->sd_bitmap_lock);
}
static int bh_get(struct gfs2_quota_data *qd)
@@ -427,8 +436,7 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
set_bit(QDF_LOCKED, &qd->qd_flags);
qd->qd_change_sync = qd->qd_change;
- gfs2_assert_warn(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
+ slot_hold(qd);
return 1;
}
@@ -1214,17 +1222,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid)
return error;
}
-static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
-{
- const struct gfs2_quota_change *str = buf;
-
- qc->qc_change = be64_to_cpu(str->qc_change);
- qc->qc_flags = be32_to_cpu(str->qc_flags);
- qc->qc_id = make_kqid(&init_user_ns,
- (qc->qc_flags & GFS2_QCF_USER)?USRQUOTA:GRPQUOTA,
- be32_to_cpu(str->qc_id));
-}
-
int gfs2_quota_init(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
@@ -1232,6 +1229,8 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
unsigned int x, slot = 0;
unsigned int found = 0;
+ unsigned int hash;
+ unsigned int bm_size;
u64 dblock;
u32 extlen = 0;
int error;
@@ -1240,23 +1239,20 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
return -EIO;
sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
- sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
-
+ bm_size = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * sizeof(unsigned long));
+ bm_size *= sizeof(unsigned long);
error = -ENOMEM;
-
- sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
- sizeof(unsigned char *), GFP_NOFS);
+ sdp->sd_quota_bitmap = kmalloc(bm_size, GFP_NOFS|__GFP_NOWARN);
+ if (sdp->sd_quota_bitmap == NULL)
+ sdp->sd_quota_bitmap = __vmalloc(bm_size, GFP_NOFS, PAGE_KERNEL);
if (!sdp->sd_quota_bitmap)
return error;
- for (x = 0; x < sdp->sd_quota_chunks; x++) {
- sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
- if (!sdp->sd_quota_bitmap[x])
- goto fail;
- }
+ memset(sdp->sd_quota_bitmap, 0, bm_size);
for (x = 0; x < blocks; x++) {
struct buffer_head *bh;
+ const struct gfs2_quota_change *qc;
unsigned int y;
if (!extlen) {
@@ -1274,34 +1270,42 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
goto fail;
}
+ qc = (const struct gfs2_quota_change *)(bh->b_data + sizeof(struct gfs2_meta_header));
for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
y++, slot++) {
- struct gfs2_quota_change_host qc;
struct gfs2_quota_data *qd;
-
- gfs2_quota_change_in(&qc, bh->b_data +
- sizeof(struct gfs2_meta_header) +
- y * sizeof(struct gfs2_quota_change));
- if (!qc.qc_change)
+ s64 qc_change = be64_to_cpu(qc->qc_change);
+ u32 qc_flags = be32_to_cpu(qc->qc_flags);
+ enum quota_type qtype = (qc_flags & GFS2_QCF_USER) ?
+ USRQUOTA : GRPQUOTA;
+ struct kqid qc_id = make_kqid(&init_user_ns, qtype,
+ be32_to_cpu(qc->qc_id));
+ qc++;
+ if (!qc_change)
continue;
- error = qd_alloc(sdp, qc.qc_id, &qd);
- if (error) {
+ hash = gfs2_qd_hash(sdp, qc_id);
+ qd = qd_alloc(hash, sdp, qc_id);
+ if (qd == NULL) {
brelse(bh);
goto fail;
}
set_bit(QDF_CHANGE, &qd->qd_flags);
- qd->qd_change = qc.qc_change;
+ qd->qd_change = qc_change;
qd->qd_slot = slot;
qd->qd_slot_count = 1;
spin_lock(&qd_lock);
- gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
+ BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
list_add(&qd->qd_list, &sdp->sd_quota_list);
atomic_inc(&sdp->sd_quota_count);
spin_unlock(&qd_lock);
+ spin_lock_bucket(hash);
+ hlist_bl_add_head_rcu(&qd->qd_hlist, &qd_hash_table[hash]);
+ spin_unlock_bucket(hash);
+
found++;
}
@@ -1324,44 +1328,28 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
struct list_head *head = &sdp->sd_quota_list;
struct gfs2_quota_data *qd;
- unsigned int x;
spin_lock(&qd_lock);
while (!list_empty(head)) {
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
- /*
- * To be removed in due course... we should be able to
- * ensure that all refs to the qd have done by this point
- * so that this rather odd test is not required
- */
- spin_lock(&qd->qd_lockref.lock);
- if (qd->qd_lockref.count > 1 ||
- (qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
- spin_unlock(&qd->qd_lockref.lock);
- list_move(&qd->qd_list, head);
- spin_unlock(&qd_lock);
- schedule();
- spin_lock(&qd_lock);
- continue;
- }
- spin_unlock(&qd->qd_lockref.lock);
-
list_del(&qd->qd_list);
+
/* Also remove if this qd exists in the reclaim list */
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
atomic_dec(&sdp->sd_quota_count);
spin_unlock(&qd_lock);
- if (!qd->qd_lockref.count) {
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- } else
- gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
+ spin_lock_bucket(qd->qd_hash);
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
+
+ gfs2_assert_warn(sdp, !qd->qd_change);
+ gfs2_assert_warn(sdp, !qd->qd_slot_count);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
gfs2_glock_put(qd->qd_gl);
- kmem_cache_free(gfs2_quotad_cachep, qd);
+ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
spin_lock(&qd_lock);
}
@@ -1370,9 +1358,11 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
if (sdp->sd_quota_bitmap) {
- for (x = 0; x < sdp->sd_quota_chunks; x++)
- kfree(sdp->sd_quota_bitmap[x]);
- kfree(sdp->sd_quota_bitmap);
+ if (is_vmalloc_addr(sdp->sd_quota_bitmap))
+ vfree(sdp->sd_quota_bitmap);
+ else
+ kfree(sdp->sd_quota_bitmap);
+ sdp->sd_quota_bitmap = NULL;
}
}
@@ -1656,3 +1646,11 @@ const struct quotactl_ops gfs2_quotactl_ops = {
.get_dqblk = gfs2_get_dqblk,
.set_dqblk = gfs2_set_dqblk,
};
+
+void __init gfs2_quota_hash_init(void)
+{
+ unsigned i;
+
+ for(i = 0; i < GFS2_QD_HASH_SIZE; i++)
+ INIT_HLIST_BL_HEAD(&qd_hash_table[i]);
+}