summaryrefslogtreecommitdiff
path: root/arch/arm/include
diff options
context:
space:
mode:
authorVincent Whitchurch <vincent.whitchurch@axis.com>2018-11-09 10:12:30 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-12-17 20:37:38 +0100
commite82dd54ec0a259c510528ad2ac90f133cd9b9c5d (patch)
tree0ccecc710aa0f1687ffda186f83513c867e0de0f /arch/arm/include
parent1830e1a276bd6c5d4355293195037e4e814f9ce0 (diff)
ARM: 8813/1: Make aligned 2-byte getuser()/putuser() atomic on ARMv6+
[ Upstream commit 344eb5539abf3e0b6ce22568c03e86450073e097 ] getuser() and putuser() (and there underscored variants) use two strb[t]/ldrb[t] instructions when they are asked to get/put 16-bits. This means that the read/write is not atomic even when performed to a 16-bit-aligned address. This leads to problems with vhost: vhost uses __getuser() to read the vring's 16-bit avail.index field, and if it happens to observe a partial update of the index, wrong descriptors will be used which will lead to a breakdown of the virtio communication. A similar problem exists for __putuser() which is used to write to the vring's used.index field. The reason these functions use strb[t]/ldrb[t] is because strht/ldrht instructions did not exist until ARMv6T2/ARMv7. So we should be easily able to fix this on ARMv7. Also, since all ARMv6 processors also don't actually use the unprivileged instructions anymore for uaccess (since CONFIG_CPU_USE_DOMAINS is not used) we can easily fix them too. Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/uaccess.h18
1 files changed, 18 insertions, 0 deletions
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index a5807b67ca8a..fe47d24955ea 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -349,6 +349,13 @@ do { \
#define __get_user_asm_byte(x, addr, err) \
__get_user_asm(x, addr, err, ldrb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrh)
+
+#else
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -367,6 +374,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
#endif
@@ -442,6 +451,13 @@ do { \
#define __put_user_asm_byte(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, strb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strh)
+
+#else
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -458,6 +474,8 @@ do { \
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __put_user_asm_word(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, str)