summaryrefslogtreecommitdiff
path: root/arch/parisc/kernel/syscall.S
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2007-01-24 22:36:32 +0100
committerKyle McMartin <kyle@athena.road.mcmartin.ca>2007-02-17 01:16:12 -0500
commit8e9e9844b44dd9f855d824d035b3097b199e44ed (patch)
treedbf1189bae2b13ab9e7f670971b960bda00280a2 /arch/parisc/kernel/syscall.S
parentb288a8f79ac6028940ba60fb6cc61ed134632770 (diff)
[PARISC] more ENTRY(), ENDPROC(), END() conversions
Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'arch/parisc/kernel/syscall.S')
-rw-r--r--arch/parisc/kernel/syscall.S71
1 files changed, 27 insertions, 44 deletions
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index a05800429304..de1812de5183 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -12,10 +12,11 @@
#include <asm/errno.h>
#include <asm/psw.h>
#include <asm/thread_info.h>
-
#include <asm/assembly.h>
#include <asm/processor.h>
+#include <linux/linkage.h>
+
/* We fill the empty parts of the gateway page with
* something that will kill the kernel or a
* userspace application.
@@ -28,11 +29,18 @@
.level 1.1
#endif
+/* on 64bit pad to 64bit values */
+#ifdef CONFIG_64BIT
+#define ULONG_WORD(x) .word 0, x
+#else
+#define ULONG_WORD(x) .word x
+#endif
+
+
.text
.import syscall_exit,code
.import syscall_exit_rfi,code
- .export linux_gateway_page
/* Linux gateway page is aliased to virtual page 0 in the kernel
* address space. Since it is a gateway page it cannot be
@@ -43,7 +51,7 @@
*/
.align ASM_PAGE_SIZE
-linux_gateway_page:
+ENTRY(linux_gateway_page)
/* ADDRESS 0x00 to 0xb0 = 176 bytes / 4 bytes per insn = 44 insns */
.rept 44
@@ -595,73 +603,49 @@ cas_action:
the other for the store. Either return -EFAULT.
Each of the entries must be relocated. */
.section __ex_table,"aw"
-#ifdef CONFIG_64BIT
- /* Pad the address calculation */
- .word 0,(2b - linux_gateway_page)
- .word 0,(3b - linux_gateway_page)
-#else
- .word (2b - linux_gateway_page)
- .word (3b - linux_gateway_page)
-#endif
+ ULONG_WORD(2b - linux_gateway_page)
+ ULONG_WORD(3b - linux_gateway_page)
.previous
.section __ex_table,"aw"
-#ifdef CONFIG_64BIT
- /* Pad the address calculation */
- .word 0,(1b - linux_gateway_page)
- .word 0,(3b - linux_gateway_page)
-#else
- .word (1b - linux_gateway_page)
- .word (3b - linux_gateway_page)
-#endif
+ ULONG_WORD(1b - linux_gateway_page)
+ ULONG_WORD(3b - linux_gateway_page)
.previous
end_compare_and_swap:
/* Make sure nothing else is placed on this page */
.align ASM_PAGE_SIZE
- .export end_linux_gateway_page
-end_linux_gateway_page:
+END(linux_gateway_page)
+ENTRY(end_linux_gateway_page)
/* Relocate symbols assuming linux_gateway_page is mapped
to virtual address 0x0 */
-#ifdef CONFIG_64BIT
- /* FIXME: The code will always be on the gateay page
- and thus it will be on the first 4k, the
- assembler seems to think that the final
- subtraction result is only a word in
- length, so we pad the value.
- */
-#define LWS_ENTRY(_name_) .word 0,(lws_##_name_ - linux_gateway_page)
-#else
-#define LWS_ENTRY(_name_) .word (lws_##_name_ - linux_gateway_page)
-#endif
+
+#define LWS_ENTRY(_name_) ULONG_WORD(lws_##_name_ - linux_gateway_page)
.section .rodata,"a"
.align ASM_PAGE_SIZE
/* Light-weight-syscall table */
/* Start of lws table. */
- .export lws_table
-.Llws_table:
-lws_table:
+ENTRY(lws_table)
LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */
LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */
+END(lws_table)
/* End of lws table */
.align ASM_PAGE_SIZE
- .export sys_call_table
-.Lsys_call_table:
-sys_call_table:
+ENTRY(sys_call_table)
#include "syscall_table.S"
+END(sys_call_table)
#ifdef CONFIG_64BIT
.align ASM_PAGE_SIZE
- .export sys_call_table64
-.Lsys_call_table64:
-sys_call_table64:
+ENTRY(sys_call_table64)
#define SYSCALL_TABLE_64BIT
#include "syscall_table.S"
+END(sys_call_table64)
#endif
#ifdef CONFIG_SMP
@@ -671,9 +655,7 @@ sys_call_table64:
*/
.section .data
.align 4096
- .export lws_lock_start
-.Llws_lock_start:
-lws_lock_start:
+ENTRY(lws_lock_start)
/* lws locks */
.align 16
.rept 16
@@ -683,6 +665,7 @@ lws_lock_start:
.word 0
.word 0
.endr
+END(lws_lock_start)
.previous
#endif
/* CONFIG_SMP for lws_lock_start */