summaryrefslogtreecommitdiff
path: root/fs/super.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2011-07-08 14:14:44 +1000
committerAl Viro <viro@zeniv.linux.org.uk>2011-07-20 20:47:41 -0400
commit0e1fdafd93980eac62e778798549ce0f6073905c (patch)
treea8b21525d195d436fa738e9a470e64eaa21e6736 /fs/super.c
parent4f8c19fdf3f97402b68f058b1c72a6c7166c9e59 (diff)
superblock: add filesystem shrinker operations
Now we have a per-superblock shrinker implementation, we can add a filesystem specific callout to it to allow filesystem internal caches to be shrunk by the superblock shrinker. Rather than perpetuate the multipurpose shrinker callback API (i.e. nr_to_scan == 0 meaning "tell me how many objects freeable in the cache), two operations will be added. The first will return the number of objects that are freeable, the second is the actual shrinker call. Signed-off-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/super.c')
-rw-r--r--fs/super.c45
1 files changed, 33 insertions, 12 deletions
diff --git a/fs/super.c b/fs/super.c
index 37a75410079e..5101f0544960 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -48,7 +48,8 @@ DEFINE_SPINLOCK(sb_lock);
static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
{
struct super_block *sb;
- int count;
+ int fs_objects = 0;
+ int total_objects;
sb = container_of(shrink, struct super_block, s_shrink);
@@ -62,22 +63,42 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
if (!grab_super_passive(sb))
return -1;
- if (sc->nr_to_scan) {
- /* proportion the scan between the two caches */
- int total;
+ if (sb->s_op && sb->s_op->nr_cached_objects)
+ fs_objects = sb->s_op->nr_cached_objects(sb);
+
+ total_objects = sb->s_nr_dentry_unused +
+ sb->s_nr_inodes_unused + fs_objects + 1;
- total = sb->s_nr_dentry_unused + sb->s_nr_inodes_unused + 1;
- count = (sc->nr_to_scan * sb->s_nr_dentry_unused) / total;
+ if (sc->nr_to_scan) {
+ int dentries;
+ int inodes;
+
+ /* proportion the scan between the caches */
+ dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
+ total_objects;
+ inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
+ total_objects;
+ if (fs_objects)
+ fs_objects = (sc->nr_to_scan * fs_objects) /
+ total_objects;
+ /*
+ * prune the dcache first as the icache is pinned by it, then
+ * prune the icache, followed by the filesystem specific caches
+ */
+ prune_dcache_sb(sb, dentries);
+ prune_icache_sb(sb, inodes);
- /* prune dcache first as icache is pinned by it */
- prune_dcache_sb(sb, count);
- prune_icache_sb(sb, sc->nr_to_scan - count);
+ if (fs_objects && sb->s_op->free_cached_objects) {
+ sb->s_op->free_cached_objects(sb, fs_objects);
+ fs_objects = sb->s_op->nr_cached_objects(sb);
+ }
+ total_objects = sb->s_nr_dentry_unused +
+ sb->s_nr_inodes_unused + fs_objects;
}
- count = ((sb->s_nr_dentry_unused + sb->s_nr_inodes_unused) / 100)
- * sysctl_vfs_cache_pressure;
+ total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
drop_super(sb);
- return count;
+ return total_objects;
}
/**