summaryrefslogtreecommitdiff
path: root/arch/x86/include/asm/rwsem.h
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-04-07 17:12:31 +0200
committerIngo Molnar <mingo@kernel.org>2016-04-22 08:58:33 +0200
commit916633a403702549d37ea353e63a68e5b0dc27ad (patch)
tree2e36d9a73b0b8781aac44e2b99fa749b7122d2ce /arch/x86/include/asm/rwsem.h
parent664b4e24c6145830885e854195376351b0eb3eee (diff)
locking/rwsem: Provide down_write_killable()
Now that all the architectures implement the necessary glue code we can introduce down_write_killable(). The only difference wrt. regular down_write() is that the slow path waits in TASK_KILLABLE state and the interruption by the fatal signal is reported as -EINTR to the caller. Signed-off-by: Michal Hocko <mhocko@suse.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Zankel <chris@zankel.net> Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Cc: Signed-off-by: Jason Low <jason.low2@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-alpha@vger.kernel.org Cc: linux-arch@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Cc: linux-sh@vger.kernel.org Cc: linux-xtensa@linux-xtensa.org Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/1460041951-22347-12-git-send-email-mhocko@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/include/asm/rwsem.h')
-rw-r--r--arch/x86/include/asm/rwsem.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
index d759c5f70f49..453744c1d347 100644
--- a/arch/x86/include/asm/rwsem.h
+++ b/arch/x86/include/asm/rwsem.h
@@ -102,9 +102,9 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
#define ____down_write(sem, slow_path) \
({ \
long tmp; \
- struct rw_semaphore* ret = sem; \
+ struct rw_semaphore* ret; \
asm volatile("# beginning down_write\n\t" \
- LOCK_PREFIX " xadd %1,(%2)\n\t" \
+ LOCK_PREFIX " xadd %1,(%3)\n\t" \
/* adds 0xffff0001, returns the old value */ \
" test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
/* was the active mask 0 before? */\
@@ -112,7 +112,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
" call " slow_path "\n" \
"1:\n" \
"# ending down_write" \
- : "+m" (sem->count), "=d" (tmp), "+a" (ret) \
+ : "+m" (sem->count), "=d" (tmp), "=a" (ret) \
: "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
: "memory", "cc"); \
ret; \