summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2005-09-03 15:56:36 -0700
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 00:06:11 -0700
commit4bb0d3ec3e5b1e9e2399cdc641b3b6521ac9cdaa (patch)
tree5e8d7646f5c6a2cec990b6d591f230d496b20664 /include
parent2a0694d15d55d0deed928786a6393d5e45e37d76 (diff)
[PATCH] i386: inline asm cleanup
i386 Inline asm cleanup. Use cr/dr accessor functions. Also, a potential bugfix. Also, some CR accessors really should be volatile. Reads from CR0 (numeric state may change in an exception handler), writes to CR4 (flipping CR4.TSD) and reads from CR2 (page fault) prevent instruction re-ordering. I did not add memory clobber to CR3 / CR4 / CR0 updates, as it was not there to begin with, and in no case should kernel memory be clobbered, except when doing a TLB flush, which already has memory clobber. I noticed that page invalidation does not have a memory clobber. I can't find a bug as a result, but there is definitely a potential for a bug here: #define __flush_tlb_single(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/agp.h2
-rw-r--r--include/asm-i386/bugs.h5
-rw-r--r--include/asm-i386/processor.h22
-rw-r--r--include/asm-i386/system.h28
-rw-r--r--include/asm-i386/xor.h26
5 files changed, 52 insertions, 31 deletions
diff --git a/include/asm-i386/agp.h b/include/asm-i386/agp.h
index b82f5f3ab887..9075083bab76 100644
--- a/include/asm-i386/agp.h
+++ b/include/asm-i386/agp.h
@@ -19,7 +19,7 @@ int unmap_page_from_agp(struct page *page);
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
-#define flush_agp_cache() asm volatile("wbinvd":::"memory")
+#define flush_agp_cache() wbinvd()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h
index 6789fc275da3..ea54540638d2 100644
--- a/include/asm-i386/bugs.h
+++ b/include/asm-i386/bugs.h
@@ -118,7 +118,10 @@ static void __init check_hlt(void)
printk("disabled\n");
return;
}
- __asm__ __volatile__("hlt ; hlt ; hlt ; hlt");
+ halt();
+ halt();
+ halt();
+ halt();
printk("OK.\n");
}
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index d0d8b0160090..7e17d3b4f65a 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -203,9 +203,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
return edx;
}
-#define load_cr3(pgdir) \
- asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
-
+#define load_cr3(pgdir) write_cr3(__pa(pgdir))
/*
* Intel CPU features in CR4
@@ -232,22 +230,20 @@ extern unsigned long mmu_cr4_features;
static inline void set_in_cr4 (unsigned long mask)
{
+ unsigned cr4;
mmu_cr4_features |= mask;
- __asm__("movl %%cr4,%%eax\n\t"
- "orl %0,%%eax\n\t"
- "movl %%eax,%%cr4\n"
- : : "irg" (mask)
- :"ax");
+ cr4 = read_cr4();
+ cr4 |= mask;
+ write_cr4(cr4);
}
static inline void clear_in_cr4 (unsigned long mask)
{
+ unsigned cr4;
mmu_cr4_features &= ~mask;
- __asm__("movl %%cr4,%%eax\n\t"
- "andl %0,%%eax\n\t"
- "movl %%eax,%%cr4\n"
- : : "irg" (~mask)
- :"ax");
+ cr4 = read_cr4();
+ cr4 &= ~mask;
+ write_cr4(cr4);
}
/*
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 3db717a244f0..8048a5e018cd 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -107,13 +107,33 @@ static inline unsigned long _get_base(char * addr)
#define clts() __asm__ __volatile__ ("clts")
#define read_cr0() ({ \
unsigned int __dummy; \
- __asm__( \
+ __asm__ __volatile__( \
"movl %%cr0,%0\n\t" \
:"=r" (__dummy)); \
__dummy; \
})
#define write_cr0(x) \
- __asm__("movl %0,%%cr0": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
+
+#define read_cr2() ({ \
+ unsigned int __dummy; \
+ __asm__ __volatile__( \
+ "movl %%cr2,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+#define write_cr2(x) \
+ __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
+
+#define read_cr3() ({ \
+ unsigned int __dummy; \
+ __asm__ ( \
+ "movl %%cr3,%0\n\t" \
+ :"=r" (__dummy)); \
+ __dummy; \
+})
+#define write_cr3(x) \
+ __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
#define read_cr4() ({ \
unsigned int __dummy; \
@@ -123,7 +143,7 @@ static inline unsigned long _get_base(char * addr)
__dummy; \
})
#define write_cr4(x) \
- __asm__("movl %0,%%cr4": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
#define stts() write_cr0(8 | read_cr0())
#endif /* __KERNEL__ */
@@ -447,6 +467,8 @@ struct alt_instr {
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
+/* used when interrupts are already enabled or to shutdown the processor */
+#define halt() __asm__ __volatile__("hlt": : :"memory")
#define irqs_disabled() \
({ \
diff --git a/include/asm-i386/xor.h b/include/asm-i386/xor.h
index f80e2dbe1b56..23c86cef3b25 100644
--- a/include/asm-i386/xor.h
+++ b/include/asm-i386/xor.h
@@ -535,14 +535,14 @@ static struct xor_block_template xor_block_p5_mmx = {
#define XMMS_SAVE do { \
preempt_disable(); \
+ cr0 = read_cr0(); \
+ clts(); \
__asm__ __volatile__ ( \
- "movl %%cr0,%0 ;\n\t" \
- "clts ;\n\t" \
- "movups %%xmm0,(%1) ;\n\t" \
- "movups %%xmm1,0x10(%1) ;\n\t" \
- "movups %%xmm2,0x20(%1) ;\n\t" \
- "movups %%xmm3,0x30(%1) ;\n\t" \
- : "=&r" (cr0) \
+ "movups %%xmm0,(%0) ;\n\t" \
+ "movups %%xmm1,0x10(%0) ;\n\t" \
+ "movups %%xmm2,0x20(%0) ;\n\t" \
+ "movups %%xmm3,0x30(%0) ;\n\t" \
+ : \
: "r" (xmm_save) \
: "memory"); \
} while(0)
@@ -550,14 +550,14 @@ static struct xor_block_template xor_block_p5_mmx = {
#define XMMS_RESTORE do { \
__asm__ __volatile__ ( \
"sfence ;\n\t" \
- "movups (%1),%%xmm0 ;\n\t" \
- "movups 0x10(%1),%%xmm1 ;\n\t" \
- "movups 0x20(%1),%%xmm2 ;\n\t" \
- "movups 0x30(%1),%%xmm3 ;\n\t" \
- "movl %0,%%cr0 ;\n\t" \
+ "movups (%0),%%xmm0 ;\n\t" \
+ "movups 0x10(%0),%%xmm1 ;\n\t" \
+ "movups 0x20(%0),%%xmm2 ;\n\t" \
+ "movups 0x30(%0),%%xmm3 ;\n\t" \
: \
- : "r" (cr0), "r" (xmm_save) \
+ : "r" (xmm_save) \
: "memory"); \
+ write_cr0(cr0); \
preempt_enable(); \
} while(0)