summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2016-07-03 23:15:21 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2016-07-03 23:15:21 -0400
commit2864f301424227d9d3bde6d550bc224a83535b46 (patch)
tree7d6535793143e2d542fa69bfdf6ba776b8846ccb /fs
parent1a695a905c18548062509178b98bc91e67510864 (diff)
iget_locked et.al.: make sure we don't return bad inodes
If one thread does iget_locked(), proceeds to try and set the new inode up and fails, inode will be unhashed and dropped. However, another thread doing ilookup/iget_locked in the middle of that would end up finding a half-set-up inode, grabbing a reference, waiting for it to come unlocked and getting the resulting bad inode. It's a race (if that ilookup had been called just after the failure of setup attempt it wouldn't have found the sucker at all), particularly unpleasant in cases when failure is transient/caller-dependent/etc. While it can be dealt with in the callers, there's no reason not to handle it in fs/inode.c primitives, especially since the cost is trivial. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r--fs/inode.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/fs/inode.c b/fs/inode.c
index 4ccbc21b30ce..d123fe4b6f7d 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1019,13 +1019,17 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
{
struct hlist_head *head = inode_hashtable + hash(sb, hashval);
struct inode *inode;
-
+again:
spin_lock(&inode_hash_lock);
inode = find_inode(sb, head, test, data);
spin_unlock(&inode_hash_lock);
if (inode) {
wait_on_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
return inode;
}
@@ -1062,6 +1066,10 @@ struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
destroy_inode(inode);
inode = old;
wait_on_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
}
return inode;
@@ -1089,12 +1097,16 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
-
+again:
spin_lock(&inode_hash_lock);
inode = find_inode_fast(sb, head, ino);
spin_unlock(&inode_hash_lock);
if (inode) {
wait_on_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
return inode;
}
@@ -1129,6 +1141,10 @@ struct inode *iget_locked(struct super_block *sb, unsigned long ino)
destroy_inode(inode);
inode = old;
wait_on_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
}
return inode;
}
@@ -1264,10 +1280,16 @@ EXPORT_SYMBOL(ilookup5_nowait);
struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
int (*test)(struct inode *, void *), void *data)
{
- struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
-
- if (inode)
+ struct inode *inode;
+again:
+ inode = ilookup5_nowait(sb, hashval, test, data);
+ if (inode) {
wait_on_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
+ }
return inode;
}
EXPORT_SYMBOL(ilookup5);
@@ -1284,13 +1306,18 @@ struct inode *ilookup(struct super_block *sb, unsigned long ino)
{
struct hlist_head *head = inode_hashtable + hash(sb, ino);
struct inode *inode;
-
+again:
spin_lock(&inode_hash_lock);
inode = find_inode_fast(sb, head, ino);
spin_unlock(&inode_hash_lock);
- if (inode)
+ if (inode) {
wait_on_inode(inode);
+ if (unlikely(inode_unhashed(inode))) {
+ iput(inode);
+ goto again;
+ }
+ }
return inode;
}
EXPORT_SYMBOL(ilookup);