summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/autofs4/autofs_i.h1
-rw-r--r--fs/autofs4/expire.c2
-rw-r--r--fs/buffer.c21
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c2
-rw-r--r--fs/file.c6
-rw-r--r--fs/namespace.c8
-rw-r--r--fs/ntfs/aops.c14
-rw-r--r--fs/timerfd.c2
10 files changed, 32 insertions, 35 deletions
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index eb1cc92cd67d..c5a1ce74ff6b 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <asm/current.h>
#include <asm/uaccess.h>
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 1feb68ecef95..859badd23da6 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -171,7 +171,7 @@ again:
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
- cpu_relax();
+ cpu_chill();
goto relock;
}
spin_unlock(&p->d_lock);
diff --git a/fs/buffer.c b/fs/buffer.c
index 36d66653b931..e4fa8d8e5aa5 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -281,8 +281,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* decide that the page is now completely done.
*/
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -295,8 +294,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors and they are all
@@ -308,9 +306,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/*
@@ -344,8 +340,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_write(bh);
unlock_buffer(bh);
@@ -357,15 +352,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
}
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
end_page_writeback(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
EXPORT_SYMBOL(end_buffer_async_write);
@@ -3186,6 +3178,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
+ buffer_head_init_locks(ret);
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
diff --git a/fs/dcache.c b/fs/dcache.c
index b60ddc41d783..04d701684fb7 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -37,6 +37,7 @@
#include <linux/rculist_bl.h>
#include <linux/prefetch.h>
#include <linux/ratelimit.h>
+#include <linux/delay.h>
#include "internal.h"
#include "mount.h"
@@ -454,7 +455,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
if (inode && !spin_trylock(&inode->i_lock)) {
relock:
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
return dentry; /* try again with same dentry */
}
if (IS_ROOT(dentry))
@@ -840,7 +841,7 @@ relock:
if (!spin_trylock(&dentry->d_lock)) {
spin_unlock(&dcache_lru_lock);
- cpu_relax();
+ cpu_chill();
goto relock;
}
@@ -2022,7 +2023,7 @@ again:
if (dentry->d_count == 1) {
if (inode && !spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
- cpu_relax();
+ cpu_chill();
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 739b0985b398..95518729a3b2 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -488,12 +488,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
{
- int this_cpu = get_cpu();
+ int this_cpu = get_cpu_light();
ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
- put_cpu();
+ put_cpu_light();
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
diff --git a/fs/exec.c b/fs/exec.c
index b1fd2025e59a..db02d76e4977 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -840,10 +840,12 @@ static int exec_mmap(struct mm_struct *mm)
}
}
task_lock(tsk);
+ local_irq_disable_rt();
active_mm = tsk->active_mm;
tsk->mm = mm;
tsk->active_mm = mm;
activate_mm(active_mm, mm);
+ local_irq_enable_rt();
task_unlock(tsk);
arch_pick_mmap_layout(mm);
if (old_mm) {
diff --git a/fs/file.c b/fs/file.c
index ba3f6053025c..9f5343d36f37 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *rcu)
kfree(fdt->open_fds);
kfree(fdt);
} else {
- fddef = &get_cpu_var(fdtable_defer_list);
+ fddef = &per_cpu(fdtable_defer_list, get_cpu_light());
spin_lock(&fddef->lock);
fdt->next = fddef->next;
fddef->next = fdt;
/* vmallocs are handled from the workqueue context */
schedule_work(&fddef->wq);
spin_unlock(&fddef->lock);
- put_cpu_var(fdtable_defer_list);
+ put_cpu_light();
}
}
@@ -421,7 +421,7 @@ struct files_struct init_files = {
.close_on_exec = init_files.close_on_exec_init,
.open_fds = init_files.open_fds_init,
},
- .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
+ .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
};
/*
diff --git a/fs/namespace.c b/fs/namespace.c
index e6081996c9a2..4d20e76c0be6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -20,6 +20,7 @@
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include "pnode.h"
#include "internal.h"
@@ -313,8 +314,11 @@ int mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
- while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
- cpu_relax();
+ while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) {
+ preempt_enable();
+ cpu_chill();
+ preempt_disable();
+ }
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index fa9c05f97af4..f5d45653bd27 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+ flags = bh_uptodate_lock_irqsave(first);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
+ bh_uptodate_unlock_irqrestore(first, flags);
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
@@ -146,13 +144,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
recs = PAGE_CACHE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
- local_irq_save(flags);
+ local_irq_save_nort(flags);
kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size);
kunmap_atomic(kaddr);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page)))
SetPageUptodate(page);
@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
unlock_page(page);
return;
still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
+ bh_uptodate_unlock_irqrestore(first, flags);
}
/**
diff --git a/fs/timerfd.c b/fs/timerfd.c
index dffeb3795af1..57f0e4e86f0a 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -313,7 +313,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
break;
spin_unlock_irq(&ctx->wqh.lock);
- cpu_relax();
+ hrtimer_wait_for_timer(&ctx->tmr);
}
/*