summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 16:56:43 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 16:56:43 +0200
commit94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch)
tree8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31 /arch/s390/mm
parent25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff)
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common coding style. Quite a few have been shortened, mainly by using register asm variables. Use of the EX_TABLE macro helps as well. The atomic ops, bit ops and locking inlines new use the Q-constraint if a newer gcc is used. That results in slightly better code. Thanks to Christian Borntraeger for proof reading the changes. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/extmem.c16
-rw-r--r--arch/s390/mm/fault.c34
-rw-r--r--arch/s390/mm/init.c41
3 files changed, 33 insertions, 58 deletions
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9b11e3e20903..226275d5c4f6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -142,17 +142,17 @@ dcss_diag (__u8 func, void *parameter,
rx = (unsigned long) parameter;
ry = (unsigned long) func;
- __asm__ __volatile__(
+ asm volatile(
#ifdef CONFIG_64BIT
- " sam31\n" // switch to 31 bit
- " diag %0,%1,0x64\n"
- " sam64\n" // switch back to 64 bit
+ " sam31\n"
+ " diag %0,%1,0x64\n"
+ " sam64\n"
#else
- " diag %0,%1,0x64\n"
+ " diag %0,%1,0x64\n"
#endif
- " ipm %2\n"
- " srl %2,28\n"
- : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc" );
+ " ipm %2\n"
+ " srl %2,28\n"
+ : "+d" (rx), "+d" (ry), "=d" (rc) : : "cc");
*ret1 = rx;
*ret2 = ry;
return rc;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a393c308bb29..f2b9a84dc2bf 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -424,20 +424,13 @@ int pfault_init(void)
if (pfault_disable)
return -1;
- __asm__ __volatile__(
- " diag %1,%0,0x258\n"
- "0: j 2f\n"
- "1: la %0,8\n"
+ asm volatile(
+ " diag %1,%0,0x258\n"
+ "0: j 2f\n"
+ "1: la %0,8\n"
"2:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
-#ifndef CONFIG_64BIT
- " .long 0b,1b\n"
-#else /* CONFIG_64BIT */
- " .quad 0b,1b\n"
-#endif /* CONFIG_64BIT */
- ".previous"
- : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc" );
+ EX_TABLE(0b,1b)
+ : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
__ctl_set_bit(0, 9);
return rc;
}
@@ -450,18 +443,11 @@ void pfault_fini(void)
if (pfault_disable)
return;
__ctl_clear_bit(0,9);
- __asm__ __volatile__(
- " diag %0,0,0x258\n"
+ asm volatile(
+ " diag %0,0,0x258\n"
"0:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
-#ifndef CONFIG_64BIT
- " .long 0b,0b\n"
-#else /* CONFIG_64BIT */
- " .quad 0b,0b\n"
-#endif /* CONFIG_64BIT */
- ".previous"
- : : "a" (&refbk), "m" (refbk) : "cc" );
+ EX_TABLE(0b,0b)
+ : : "a" (&refbk), "m" (refbk) : "cc");
}
asmlinkage void
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index cfd9b8f7a523..127044e1707c 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -45,26 +45,17 @@ void diag10(unsigned long addr)
{
if (addr >= 0x7ff00000)
return;
+ asm volatile(
#ifdef CONFIG_64BIT
- asm volatile (
- " sam31\n"
- " diag %0,%0,0x10\n"
- "0: sam64\n"
- ".section __ex_table,\"a\"\n"
- " .align 8\n"
- " .quad 0b, 0b\n"
- ".previous\n"
- : : "a" (addr));
+ " sam31\n"
+ " diag %0,%0,0x10\n"
+ "0: sam64\n"
#else
- asm volatile (
- " diag %0,%0,0x10\n"
+ " diag %0,%0,0x10\n"
"0:\n"
- ".section __ex_table,\"a\"\n"
- " .align 4\n"
- " .long 0b, 0b\n"
- ".previous\n"
- : : "a" (addr));
#endif
+ EX_TABLE(0b,0b)
+ : : "a" (addr));
}
void show_mem(void)
@@ -156,11 +147,10 @@ void __init paging_init(void)
S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */
- __asm__ __volatile__(" LCTL 1,1,%0\n"
- " LCTL 7,7,%0\n"
- " LCTL 13,13,%0\n"
- " SSM %1"
- : : "m" (pgdir_k), "m" (ssm_mask));
+ __ctl_load(pgdir_k, 1, 1);
+ __ctl_load(pgdir_k, 7, 7);
+ __ctl_load(pgdir_k, 13, 13);
+ __raw_local_irq_ssm(ssm_mask);
local_flush_tlb();
return;
@@ -241,11 +231,10 @@ void __init paging_init(void)
S390_lowcore.kernel_asce = pgdir_k;
/* enable virtual mapping in kernel mode */
- __asm__ __volatile__("lctlg 1,1,%0\n\t"
- "lctlg 7,7,%0\n\t"
- "lctlg 13,13,%0\n\t"
- "ssm %1"
- : :"m" (pgdir_k), "m" (ssm_mask));
+ __ctl_load(pgdir_k, 1, 1);
+ __ctl_load(pgdir_k, 7, 7);
+ __ctl_load(pgdir_k, 13, 13);
+ __raw_local_irq_ssm(ssm_mask);
local_flush_tlb();