summaryrefslogtreecommitdiff
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/a.out.h1
-rw-r--r--include/asm-i386/acpi.h23
-rw-r--r--include/asm-i386/alternative.h2
-rw-r--r--include/asm-i386/apic.h2
-rw-r--r--include/asm-i386/boot.h6
-rw-r--r--include/asm-i386/cmpxchg.h16
-rw-r--r--include/asm-i386/cpufeature.h28
-rw-r--r--include/asm-i386/e820.h22
-rw-r--r--include/asm-i386/fixmap.h2
-rw-r--r--include/asm-i386/hpet.h126
-rw-r--r--include/asm-i386/i8253.h16
-rw-r--r--include/asm-i386/ide.h4
-rw-r--r--include/asm-i386/io.h4
-rw-r--r--include/asm-i386/io_apic.h1
-rw-r--r--include/asm-i386/irq.h1
-rw-r--r--include/asm-i386/ist.h10
-rw-r--r--include/asm-i386/kprobes.h1
-rw-r--r--include/asm-i386/mach-default/do_timer.h2
-rw-r--r--include/asm-i386/mach-default/io_ports.h5
-rw-r--r--include/asm-i386/mach-default/irq_vectors_limits.h2
-rw-r--r--include/asm-i386/mach-default/mach_reboot.h25
-rw-r--r--include/asm-i386/mach-default/mach_wakecpu.h3
-rw-r--r--include/asm-i386/mach-es7000/mach_wakecpu.h3
-rw-r--r--include/asm-i386/mach-voyager/do_timer.h2
-rw-r--r--include/asm-i386/mc146818rtc.h5
-rw-r--r--include/asm-i386/mce.h4
-rw-r--r--include/asm-i386/mmu_context.h2
-rw-r--r--include/asm-i386/nmi.h5
-rw-r--r--include/asm-i386/page.h4
-rw-r--r--include/asm-i386/paravirt.h38
-rw-r--r--include/asm-i386/pci.h45
-rw-r--r--include/asm-i386/percpu.h5
-rw-r--r--include/asm-i386/pgalloc.h6
-rw-r--r--include/asm-i386/pgtable-2level.h8
-rw-r--r--include/asm-i386/pgtable-3level.h17
-rw-r--r--include/asm-i386/pgtable.h40
-rw-r--r--include/asm-i386/processor.h17
-rw-r--r--include/asm-i386/required-features.h41
-rw-r--r--include/asm-i386/setup.h14
-rw-r--r--include/asm-i386/smp.h5
-rw-r--r--include/asm-i386/string.h243
-rw-r--r--include/asm-i386/suspend.h2
-rw-r--r--include/asm-i386/system.h14
-rw-r--r--include/asm-i386/thread_info.h23
-rw-r--r--include/asm-i386/timer.h34
-rw-r--r--include/asm-i386/tlbflush.h6
-rw-r--r--include/asm-i386/topology.h2
-rw-r--r--include/asm-i386/tsc.h1
-rw-r--r--include/asm-i386/uaccess.h2
-rw-r--r--include/asm-i386/unistd.h3
-rw-r--r--include/asm-i386/vmi_time.h2
51 files changed, 339 insertions, 556 deletions
diff --git a/include/asm-i386/a.out.h b/include/asm-i386/a.out.h
index ab17bb8e5465..851a60f8258c 100644
--- a/include/asm-i386/a.out.h
+++ b/include/asm-i386/a.out.h
@@ -20,6 +20,7 @@ struct exec
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
#endif
diff --git a/include/asm-i386/acpi.h b/include/asm-i386/acpi.h
index 449f3f272e07..125179adf044 100644
--- a/include/asm-i386/acpi.h
+++ b/include/asm-i386/acpi.h
@@ -121,19 +121,6 @@ static inline void acpi_disable_pci(void)
}
extern int acpi_irq_balance_set(char *str);
-#else /* !CONFIG_ACPI */
-
-#define acpi_lapic 0
-#define acpi_ioapic 0
-static inline void acpi_noirq_set(void) { }
-static inline void acpi_disable_pci(void) { }
-static inline void disable_acpi(void) { }
-
-#endif /* !CONFIG_ACPI */
-
-
-#ifdef CONFIG_ACPI_SLEEP
-
/* routines for saving/restoring kernel state */
extern int acpi_save_state_mem(void);
extern void acpi_restore_state_mem(void);
@@ -143,7 +130,15 @@ extern unsigned long acpi_wakeup_address;
/* early initialization routine */
extern void acpi_reserve_bootmem(void);
-#endif /*CONFIG_ACPI_SLEEP*/
+#else /* !CONFIG_ACPI */
+
+#define acpi_lapic 0
+#define acpi_ioapic 0
+static inline void acpi_noirq_set(void) { }
+static inline void acpi_disable_pci(void) { }
+static inline void disable_acpi(void) { }
+
+#endif /* !CONFIG_ACPI */
#define ARCH_HAS_POWER_INIT 1
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h
index eb7da5402bfa..bda6c810c0f4 100644
--- a/include/asm-i386/alternative.h
+++ b/include/asm-i386/alternative.h
@@ -149,4 +149,6 @@ apply_paravirt(struct paravirt_patch_site *start,
#define __parainstructions_end NULL
#endif
+extern void text_poke(void *addr, unsigned char *opcode, int len);
+
#endif /* _I386_ALTERNATIVE_H */
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h
index 1e8f6f252dd3..4091b33dcb10 100644
--- a/include/asm-i386/apic.h
+++ b/include/asm-i386/apic.h
@@ -116,6 +116,8 @@ extern void enable_NMI_through_LVT0 (void * dummy);
extern int timer_over_8254;
extern int local_apic_timer_c2_ok;
+extern int local_apic_timer_disabled;
+
#else /* !CONFIG_X86_LOCAL_APIC */
static inline void lapic_shutdown(void) { }
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h
index bd024ab4fe53..ed8affbf96cb 100644
--- a/include/asm-i386/boot.h
+++ b/include/asm-i386/boot.h
@@ -1,5 +1,5 @@
-#ifndef _LINUX_BOOT_H
-#define _LINUX_BOOT_H
+#ifndef _ASM_BOOT_H
+#define _ASM_BOOT_H
/* Don't touch these, unless you really know what you're doing. */
#define DEF_INITSEG 0x9000
@@ -17,4 +17,4 @@
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
& ~(CONFIG_PHYSICAL_ALIGN - 1))
-#endif /* _LINUX_BOOT_H */
+#endif /* _ASM_BOOT_H */
diff --git a/include/asm-i386/cmpxchg.h b/include/asm-i386/cmpxchg.h
index 7adcef0cd53b..f86ede28f6dc 100644
--- a/include/asm-i386/cmpxchg.h
+++ b/include/asm-i386/cmpxchg.h
@@ -3,14 +3,16 @@
#include <linux/bitops.h> /* for LOCK_PREFIX */
+/*
+ * Note: if you use set64_bit(), __cmpxchg64(), or their variants, you
+ * you need to test for the feature in boot_cpu_data.
+ */
+
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((struct __xchg_dummy *)(x))
-
-#ifdef CONFIG_X86_CMPXCHG64
-
/*
* The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to
@@ -32,7 +34,7 @@ static inline void __set_64bit (unsigned long long * ptr,
"\n1:\t"
"movl (%0), %%eax\n\t"
"movl 4(%0), %%edx\n\t"
- "lock cmpxchg8b (%0)\n\t"
+ LOCK_PREFIX "cmpxchg8b (%0)\n\t"
"jnz 1b"
: /* no outputs */
: "D"(ptr),
@@ -65,8 +67,6 @@ static inline void __set_64bit_var (unsigned long long *ptr,
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
__set_64bit(ptr, ll_low(value), ll_high(value)) )
-#endif
-
/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -252,8 +252,6 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
})
#endif
-#ifdef CONFIG_X86_CMPXCHG64
-
static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
unsigned long long new)
{
@@ -289,5 +287,3 @@ static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
((__typeof__(*(ptr)))__cmpxchg64_local((ptr),(unsigned long long)(o),\
(unsigned long long)(n)))
#endif
-
-#endif
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h
index f514e906643a..7b3aa28ebc6e 100644
--- a/include/asm-i386/cpufeature.h
+++ b/include/asm-i386/cpufeature.h
@@ -12,7 +12,7 @@
#endif
#include <asm/required-features.h>
-#define NCAPINTS 7 /* N 32-bit words worth of info */
+#define NCAPINTS 8 /* N 32-bit words worth of info */
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
@@ -79,8 +79,9 @@
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_LAPIC_TIMER_BROKEN (3*32+ 14) /* lapic timer broken in C1 */
+/* 14 free */
#define X86_FEATURE_SYNC_RDTSC (3*32+15) /* RDTSC synchronizes the CPU */
+#define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
@@ -108,11 +109,24 @@
#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
-#define cpu_has(c, bit) \
- ((__builtin_constant_p(bit) && (bit) < 32 && \
- (1UL << (bit)) & REQUIRED_MASK1) ? \
- 1 : \
- test_bit(bit, (c)->x86_capability))
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc
+ */
+#define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */
+
+#define cpu_has(c, bit) \
+ (__builtin_constant_p(bit) && \
+ ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
+ (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
+ (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
+ (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
+ (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
+ (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
+ (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
+ (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) ) \
+ ? 1 : \
+ test_bit(bit, (c)->x86_capability))
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h
index 096a2a8eb1da..cf67dbb1db79 100644
--- a/include/asm-i386/e820.h
+++ b/include/asm-i386/e820.h
@@ -25,13 +25,15 @@
#ifndef __ASSEMBLY__
+struct e820entry {
+ u64 addr; /* start of memory segment */
+ u64 size; /* size of memory segment */
+ u32 type; /* type of memory segment */
+} __attribute__((packed));
+
struct e820map {
- int nr_map;
- struct e820entry {
- unsigned long long addr; /* start of memory segment */
- unsigned long long size; /* size of memory segment */
- unsigned long type; /* type of memory segment */
- } map[E820MAX];
+ u32 nr_map;
+ struct e820entry map[E820MAX];
};
extern struct e820map e820;
@@ -45,6 +47,14 @@ extern void e820_register_memory(void);
extern void limit_regions(unsigned long long size);
extern void print_memory_map(char *who);
+#if defined(CONFIG_PM) && defined(CONFIG_HIBERNATION)
+extern void e820_mark_nosave_regions(void);
+#else
+static inline void e820_mark_nosave_regions(void)
+{
+}
+#endif
+
#endif/*!__ASSEMBLY__*/
#endif/*__E820_HEADER*/
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index 80ea052ee3a4..249e753ac805 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -54,6 +54,8 @@ extern unsigned long __FIXADDR_TOP;
enum fixed_addresses {
FIX_HOLE,
FIX_VDSO,
+ FIX_DBGP_BASE,
+ FIX_EARLYCON_MEM_BASE,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h
index dddeedf504b7..c82dc7ed96b3 100644
--- a/include/asm-i386/hpet.h
+++ b/include/asm-i386/hpet.h
@@ -4,112 +4,82 @@
#ifdef CONFIG_HPET_TIMER
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/param.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/irq.h>
-#include <asm/msr.h>
-#include <asm/delay.h>
-#include <asm/mpspec.h>
-#include <asm/uaccess.h>
-#include <asm/processor.h>
-
-#include <linux/timex.h>
-
/*
* Documentation on HPET can be found at:
* http://www.intel.com/ial/home/sp/pcmmspec.htm
* ftp://download.intel.com/ial/home/sp/mmts098.pdf
*/
-#define HPET_MMAP_SIZE 1024
-
-#define HPET_ID 0x000
-#define HPET_PERIOD 0x004
-#define HPET_CFG 0x010
-#define HPET_STATUS 0x020
-#define HPET_COUNTER 0x0f0
-#define HPET_T0_CFG 0x100
-#define HPET_T0_CMP 0x108
-#define HPET_T0_ROUTE 0x110
-#define HPET_T1_CFG 0x120
-#define HPET_T1_CMP 0x128
-#define HPET_T1_ROUTE 0x130
-#define HPET_T2_CFG 0x140
-#define HPET_T2_CMP 0x148
-#define HPET_T2_ROUTE 0x150
-
-#define HPET_ID_LEGSUP 0x00008000
-#define HPET_ID_NUMBER 0x00001f00
-#define HPET_ID_REV 0x000000ff
+#define HPET_MMAP_SIZE 1024
+
+#define HPET_ID 0x000
+#define HPET_PERIOD 0x004
+#define HPET_CFG 0x010
+#define HPET_STATUS 0x020
+#define HPET_COUNTER 0x0f0
+#define HPET_T0_CFG 0x100
+#define HPET_T0_CMP 0x108
+#define HPET_T0_ROUTE 0x110
+#define HPET_T1_CFG 0x120
+#define HPET_T1_CMP 0x128
+#define HPET_T1_ROUTE 0x130
+#define HPET_T2_CFG 0x140
+#define HPET_T2_CMP 0x148
+#define HPET_T2_ROUTE 0x150
+
+#define HPET_ID_REV 0x000000ff
+#define HPET_ID_NUMBER 0x00001f00
+#define HPET_ID_64BIT 0x00002000
+#define HPET_ID_LEGSUP 0x00008000
+#define HPET_ID_VENDOR 0xffff0000
#define HPET_ID_NUMBER_SHIFT 8
+#define HPET_ID_VENDOR_SHIFT 16
-#define HPET_CFG_ENABLE 0x001
-#define HPET_CFG_LEGACY 0x002
+#define HPET_ID_VENDOR_8086 0x8086
+
+#define HPET_CFG_ENABLE 0x001
+#define HPET_CFG_LEGACY 0x002
#define HPET_LEGACY_8254 2
#define HPET_LEGACY_RTC 8
-#define HPET_TN_ENABLE 0x004
-#define HPET_TN_PERIODIC 0x008
-#define HPET_TN_PERIODIC_CAP 0x010
-#define HPET_TN_SETVAL 0x040
-#define HPET_TN_32BIT 0x100
-
-/* Use our own asm for 64 bit multiply/divide */
-#define ASM_MUL64_REG(eax_out,edx_out,reg_in,eax_in) \
- __asm__ __volatile__("mull %2" \
- :"=a" (eax_out), "=d" (edx_out) \
- :"r" (reg_in), "0" (eax_in))
+#define HPET_TN_LEVEL 0x0002
+#define HPET_TN_ENABLE 0x0004
+#define HPET_TN_PERIODIC 0x0008
+#define HPET_TN_PERIODIC_CAP 0x0010
+#define HPET_TN_64BIT_CAP 0x0020
+#define HPET_TN_SETVAL 0x0040
+#define HPET_TN_32BIT 0x0100
+#define HPET_TN_ROUTE 0x3e00
+#define HPET_TN_FSB 0x4000
+#define HPET_TN_FSB_CAP 0x8000
+#define HPET_TN_ROUTE_SHIFT 9
-#define ASM_DIV64_REG(eax_out,edx_out,reg_in,eax_in,edx_in) \
- __asm__ __volatile__("divl %2" \
- :"=a" (eax_out), "=d" (edx_out) \
- :"r" (reg_in), "0" (eax_in), "1" (edx_in))
-
-#define KERNEL_TICK_USEC (1000000UL/HZ) /* tick value in microsec */
/* Max HPET Period is 10^8 femto sec as in HPET spec */
-#define HPET_MAX_PERIOD (100000000UL)
+#define HPET_MAX_PERIOD 100000000UL
/*
* Min HPET period is 10^5 femto sec just for safety. If it is less than this,
* then 32 bit HPET counter wrapsaround in less than 0.5 sec.
*/
-#define HPET_MIN_PERIOD (100000UL)
-#define HPET_TICK_RATE (HZ * 100000UL)
+#define HPET_MIN_PERIOD 100000UL
-extern unsigned long hpet_address; /* hpet memory map physical address */
+/* hpet memory map physical address */
+extern unsigned long hpet_address;
extern int is_hpet_enabled(void);
-
-#ifdef CONFIG_X86_64
-extern unsigned long hpet_tick; /* hpet clks count per tick */
-extern int hpet_use_timer;
-extern int hpet_rtc_timer_init(void);
extern int hpet_enable(void);
-extern int is_hpet_capable(void);
-extern int hpet_readl(unsigned long a);
-#else
-extern int hpet_enable(void);
-#endif
#ifdef CONFIG_HPET_EMULATE_RTC
+
+#include <linux/interrupt.h>
+
extern int hpet_mask_rtc_irq_bit(unsigned long bit_mask);
extern int hpet_set_rtc_irq_bit(unsigned long bit_mask);
-extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec);
+extern int hpet_set_alarm_time(unsigned char hrs, unsigned char min,
+ unsigned char sec);
extern int hpet_set_periodic_freq(unsigned long freq);
extern int hpet_rtc_dropped_irq(void);
extern int hpet_rtc_timer_init(void);
extern irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id);
+
#endif /* CONFIG_HPET_EMULATE_RTC */
#else
diff --git a/include/asm-i386/i8253.h b/include/asm-i386/i8253.h
index 6cb0dd4dcdde..7577d058d86e 100644
--- a/include/asm-i386/i8253.h
+++ b/include/asm-i386/i8253.h
@@ -3,19 +3,15 @@
#include <linux/clockchips.h>
+/* i8253A PIT registers */
+#define PIT_MODE 0x43
+#define PIT_CH0 0x40
+#define PIT_CH2 0x42
+
extern spinlock_t i8253_lock;
extern struct clock_event_device *global_clock_event;
-/**
- * pit_interrupt_hook - hook into timer tick
- * @regs: standard registers from interrupt
- *
- * Call the global clock event handler.
- **/
-static inline void pit_interrupt_hook(void)
-{
- global_clock_event->event_handler(global_clock_event);
-}
+extern void setup_pit_timer(void);
#endif /* __ASM_I8253_H__ */
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
index 0fc240c80f49..e7817a3d6578 100644
--- a/include/asm-i386/ide.h
+++ b/include/asm-i386/ide.h
@@ -40,14 +40,13 @@ static __inline__ int ide_default_irq(unsigned long base)
static __inline__ unsigned long ide_default_io_base(int index)
{
- struct pci_dev *pdev;
/*
* If PCI is present then it is not safe to poke around
* the other legacy IDE ports. Only 0x1f0 and 0x170 are
* defined compatibility mode ports for PCI. A user can
* override this using ide= but we must default safe.
*/
- if ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL)) == NULL) {
+ if (no_pci_devices()) {
switch(index) {
case 2: return 0x1e8;
case 3: return 0x168;
@@ -55,7 +54,6 @@ static __inline__ unsigned long ide_default_io_base(int index)
case 5: return 0x160;
}
}
- pci_dev_put(pdev);
switch (index) {
case 0: return 0x1f0;
case 1: return 0x170;
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index e797586a5bfc..e8e0bd641120 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -112,6 +112,9 @@ extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsign
* writew/writel functions and the other mmio helpers. The returned
* address is not guaranteed to be usable directly as a virtual
* address.
+ *
+ * If the area you are trying to map is a PCI BAR you should have a
+ * look at pci_iomap().
*/
static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
@@ -129,6 +132,7 @@ extern void iounmap(volatile void __iomem *addr);
*/
extern void *bt_ioremap(unsigned long offset, unsigned long size);
extern void bt_iounmap(void *addr, unsigned long size);
+extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
/* Use early IO mappings for DMI because it's initialized early */
#define dmi_ioremap bt_ioremap
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 340764076d5f..dbe734ddf2af 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -150,7 +150,6 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq);
#else /* !CONFIG_X86_IO_APIC */
#define io_apic_assign_pci_irqs 0
-static inline void disable_ioapic_setup(void) { }
#endif
#endif
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h
index 9e15ce0006eb..36f310632c49 100644
--- a/include/asm-i386/irq.h
+++ b/include/asm-i386/irq.h
@@ -41,6 +41,7 @@ extern int irqbalance_disable(char *str);
extern void fixup_irqs(cpumask_t map);
#endif
+unsigned int do_IRQ(struct pt_regs *regs);
void init_IRQ(void);
void __init native_init_IRQ(void);
diff --git a/include/asm-i386/ist.h b/include/asm-i386/ist.h
index d13d1e68afa9..ef2003ebc6f9 100644
--- a/include/asm-i386/ist.h
+++ b/include/asm-i386/ist.h
@@ -19,11 +19,13 @@
#ifdef __KERNEL__
+#include <linux/types.h>
+
struct ist_info {
- unsigned long signature;
- unsigned long command;
- unsigned long event;
- unsigned long perf_level;
+ u32 signature;
+ u32 command;
+ u32 event;
+ u32 perf_level;
};
extern struct ist_info ist_info;
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h
index 8774d06689da..06f7303c30ca 100644
--- a/include/asm-i386/kprobes.h
+++ b/include/asm-i386/kprobes.h
@@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t;
? (MAX_STACK_SIZE) \
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
-#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 0
#define flush_insn_slot(p) do { } while (0)
diff --git a/include/asm-i386/mach-default/do_timer.h b/include/asm-i386/mach-default/do_timer.h
index 56e5689863ae..23ecda0b28a0 100644
--- a/include/asm-i386/mach-default/do_timer.h
+++ b/include/asm-i386/mach-default/do_timer.h
@@ -12,5 +12,5 @@
static inline void do_timer_interrupt_hook(void)
{
- pit_interrupt_hook();
+ global_clock_event->event_handler(global_clock_event);
}
diff --git a/include/asm-i386/mach-default/io_ports.h b/include/asm-i386/mach-default/io_ports.h
index a96d9f6604ee..48540ba97166 100644
--- a/include/asm-i386/mach-default/io_ports.h
+++ b/include/asm-i386/mach-default/io_ports.h
@@ -7,11 +7,6 @@
#ifndef _MACH_IO_PORTS_H
#define _MACH_IO_PORTS_H
-/* i8253A PIT registers */
-#define PIT_MODE 0x43
-#define PIT_CH0 0x40
-#define PIT_CH2 0x42
-
/* i8259A PIC registers */
#define PIC_MASTER_CMD 0x20
#define PIC_MASTER_IMR 0x21
diff --git a/include/asm-i386/mach-default/irq_vectors_limits.h b/include/asm-i386/mach-default/irq_vectors_limits.h
index 7f161e760be6..a90c7a60109f 100644
--- a/include/asm-i386/mach-default/irq_vectors_limits.h
+++ b/include/asm-i386/mach-default/irq_vectors_limits.h
@@ -1,7 +1,7 @@
#ifndef _ASM_IRQ_VECTORS_LIMITS_H
#define _ASM_IRQ_VECTORS_LIMITS_H
-#ifdef CONFIG_X86_IO_APIC
+#if defined(CONFIG_X86_IO_APIC) || defined(CONFIG_PARAVIRT)
#define NR_IRQS 224
# if (224 >= 32 * NR_CPUS)
# define NR_IRQ_VECTORS NR_IRQS
diff --git a/include/asm-i386/mach-default/mach_reboot.h b/include/asm-i386/mach-default/mach_reboot.h
index a955e57ad016..e23fd9fbebb3 100644
--- a/include/asm-i386/mach-default/mach_reboot.h
+++ b/include/asm-i386/mach-default/mach_reboot.h
@@ -19,14 +19,37 @@ static inline void kb_wait(void)
static inline void mach_reboot(void)
{
int i;
+
+ /* old method, works on most machines */
for (i = 0; i < 10; i++) {
kb_wait();
udelay(50);
+ outb(0xfe, 0x64); /* pulse reset low */
+ udelay(50);
+ }
+
+ /* New method: sets the "System flag" which, when set, indicates
+ * successful completion of the keyboard controller self-test (Basic
+ * Assurance Test, BAT). This is needed for some machines with no
+ * keyboard plugged in. This read-modify-write sequence sets only the
+ * system flag
+ */
+ for (i = 0; i < 10; i++) {
+ int cmd;
+
+ outb(0x20, 0x64); /* read Controller Command Byte */
+ udelay(50);
+ kb_wait();
+ udelay(50);
+ cmd = inb(0x60);
+ udelay(50);
+ kb_wait();
+ udelay(50);
outb(0x60, 0x64); /* write Controller Command Byte */
udelay(50);
kb_wait();
udelay(50);
- outb(0x14, 0x60); /* set "System flag" */
+ outb(cmd | 0x04, 0x60); /* set "System flag" */
udelay(50);
kb_wait();
udelay(50);
diff --git a/include/asm-i386/mach-default/mach_wakecpu.h b/include/asm-i386/mach-default/mach_wakecpu.h
index 673b85c9b273..3ebb17893aa5 100644
--- a/include/asm-i386/mach-default/mach_wakecpu.h
+++ b/include/asm-i386/mach-default/mach_wakecpu.h
@@ -15,7 +15,8 @@
static inline void wait_for_init_deassert(atomic_t *deassert)
{
- while (!atomic_read(deassert));
+ while (!atomic_read(deassert))
+ cpu_relax();
return;
}
diff --git a/include/asm-i386/mach-es7000/mach_wakecpu.h b/include/asm-i386/mach-es7000/mach_wakecpu.h
index efc903b73486..84ff58314501 100644
--- a/include/asm-i386/mach-es7000/mach_wakecpu.h
+++ b/include/asm-i386/mach-es7000/mach_wakecpu.h
@@ -31,7 +31,8 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
static inline void wait_for_init_deassert(atomic_t *deassert)
{
#ifdef WAKE_SECONDARY_VIA_INIT
- while (!atomic_read(deassert));
+ while (!atomic_read(deassert))
+ cpu_relax();
#endif
return;
}
diff --git a/include/asm-i386/mach-voyager/do_timer.h b/include/asm-i386/mach-voyager/do_timer.h
index 60f9dcc15d54..bc2b58926308 100644
--- a/include/asm-i386/mach-voyager/do_timer.h
+++ b/include/asm-i386/mach-voyager/do_timer.h
@@ -12,7 +12,7 @@
**/
static inline void do_timer_interrupt_hook(void)
{
- pit_interrupt_hook();
+ global_clock_event->event_handler(global_clock_event);
voyager_timer_interrupt();
}
diff --git a/include/asm-i386/mc146818rtc.h b/include/asm-i386/mc146818rtc.h
index 99a890047023..1613b42eaf58 100644
--- a/include/asm-i386/mc146818rtc.h
+++ b/include/asm-i386/mc146818rtc.h
@@ -6,6 +6,7 @@
#include <asm/io.h>
#include <asm/system.h>
+#include <asm/processor.h>
#include <linux/mc146818rtc.h>
#ifndef RTC_PORT
@@ -43,8 +44,10 @@ static inline void lock_cmos(unsigned char reg)
unsigned long new;
new = ((smp_processor_id()+1) << 8) | reg;
for (;;) {
- if (cmos_lock)
+ if (cmos_lock) {
+ cpu_relax();
continue;
+ }
if (__cmpxchg(&cmos_lock, 0, new, sizeof(cmos_lock)) == 0)
return;
}
diff --git a/include/asm-i386/mce.h b/include/asm-i386/mce.h
index b0a02ee34ffd..d56d89742e8f 100644
--- a/include/asm-i386/mce.h
+++ b/include/asm-i386/mce.h
@@ -5,3 +5,7 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
#endif
extern int mce_disabled;
+
+extern void stop_mce(void);
+extern void restart_mce(void);
+
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h
index 8198d1cca1f3..7eb0b0b1fb3c 100644
--- a/include/asm-i386/mmu_context.h
+++ b/include/asm-i386/mmu_context.h
@@ -32,6 +32,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#endif
}
+void leave_mm(unsigned long cpu);
+
static inline void switch_mm(struct mm_struct *prev,
struct mm_struct *next,
struct task_struct *tsk)
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h
index fb1e133efd9f..70a958a8e381 100644
--- a/include/asm-i386/nmi.h
+++ b/include/asm-i386/nmi.h
@@ -33,11 +33,12 @@ extern int nmi_watchdog_tick (struct pt_regs * regs, unsigned reason);
extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
-#define NMI_DEFAULT -1
+#define NMI_DISABLED -1
#define NMI_NONE 0
#define NMI_IO_APIC 1
#define NMI_LOCAL_APIC 2
#define NMI_INVALID 3
+#define NMI_DEFAULT NMI_DISABLED
struct ctl_table;
struct file;
@@ -57,5 +58,7 @@ unsigned lapic_adjust_nmi_hz(unsigned hz);
int lapic_watchdog_ok(void);
void disable_lapic_nmi_watchdog(void);
void enable_lapic_nmi_watchdog(void);
+void stop_nmi(void);
+void restart_nmi(void);
#endif /* ASM_NMI_H */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index 818ac8bf01e2..80ecc66b6d86 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -34,7 +34,8 @@
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
+#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/*
@@ -43,7 +44,6 @@
extern int nx_enabled;
#ifdef CONFIG_X86_PAE
-extern unsigned long long __supported_pte_mask;
typedef struct { unsigned long pte_low, pte_high; } pte_t;
typedef struct { unsigned long long pmd; } pmd_t;
typedef struct { unsigned long long pgd; } pgd_t;
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index 7f846a7d6bcc..9fa3fa9e62d1 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -47,11 +47,14 @@ struct paravirt_ops
* The patch function should return the number of bytes of code
* generated, as we nop pad the rest in generic code.
*/
- unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len);
+ unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
+ unsigned long addr, unsigned len);
/* Basic arch-specific setup */
void (*arch_setup)(void);
char *(*memory_setup)(void);
+ void (*post_allocator_init)(void);
+
void (*init_IRQ)(void);
void (*time_init)(void);
@@ -116,7 +119,7 @@ struct paravirt_ops
u64 (*read_tsc)(void);
u64 (*read_pmc)(void);
- u64 (*get_scheduled_cycles)(void);
+ unsigned long long (*sched_clock)(void);
unsigned long (*get_cpu_khz)(void);
/* Segment descriptor handling */
@@ -173,7 +176,7 @@ struct paravirt_ops
unsigned long va);
/* Hooks for allocating/releasing pagetable pages */
- void (*alloc_pt)(u32 pfn);
+ void (*alloc_pt)(struct mm_struct *mm, u32 pfn);
void (*alloc_pd)(u32 pfn);
void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
void (*release_pt)(u32 pfn);
@@ -251,15 +254,19 @@ extern struct paravirt_ops paravirt_ops;
unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ignore(unsigned len);
-unsigned paravirt_patch_call(void *target, u16 tgt_clobbers,
- void *site, u16 site_clobbers,
+unsigned paravirt_patch_call(void *insnbuf,
+ const void *target, u16 tgt_clobbers,
+ unsigned long addr, u16 site_clobbers,
unsigned len);
-unsigned paravirt_patch_jmp(void *target, void *site, unsigned len);
-unsigned paravirt_patch_default(u8 type, u16 clobbers, void *site, unsigned len);
+unsigned paravirt_patch_jmp(const void *target, void *insnbuf,
+ unsigned long addr, unsigned len);
+unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
+ unsigned long addr, unsigned len);
-unsigned paravirt_patch_insns(void *site, unsigned len,
+unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
const char *start, const char *end);
+int paravirt_disable_iospace(void);
/*
* This generates an indirect call based on the operation type number.
@@ -563,7 +570,10 @@ static inline u64 paravirt_read_tsc(void)
#define rdtscll(val) (val = paravirt_read_tsc())
-#define get_scheduled_cycles(val) (val = paravirt_ops.get_scheduled_cycles())
+static inline unsigned long long paravirt_sched_clock(void)
+{
+ return PVOP_CALL0(unsigned long long, sched_clock);
+}
#define calculate_cpu_khz() (paravirt_ops.get_cpu_khz())
#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
@@ -669,6 +679,12 @@ static inline void setup_secondary_clock(void)
}
#endif
+static inline void paravirt_post_allocator_init(void)
+{
+ if (paravirt_ops.post_allocator_init)
+ (*paravirt_ops.post_allocator_init)();
+}
+
static inline void paravirt_pagetable_setup_start(pgd_t *base)
{
if (paravirt_ops.pagetable_setup_start)
@@ -725,9 +741,9 @@ static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
PVOP_VCALL3(flush_tlb_others, &cpumask, mm, va);
}
-static inline void paravirt_alloc_pt(unsigned pfn)
+static inline void paravirt_alloc_pt(struct mm_struct *mm, unsigned pfn)
{
- PVOP_VCALL1(alloc_pt, pfn);
+ PVOP_VCALL2(alloc_pt, mm, pfn);
}
static inline void paravirt_release_pt(unsigned pfn)
{
diff --git a/include/asm-i386/pci.h b/include/asm-i386/pci.h
index 64b6d0baedbc..4fcacc711385 100644
--- a/include/asm-i386/pci.h
+++ b/include/asm-i386/pci.h
@@ -3,6 +3,14 @@
#ifdef __KERNEL__
+
+struct pci_sysdata {
+ int node; /* NUMA node */
+};
+
+/* scan a bus after allocating a pci_sysdata for it */
+extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
+
#include <linux/mm.h> /* for struct page */
/* Can be used to override the logic in pci_scan_bus for skipping
@@ -56,48 +64,11 @@ struct pci_dev;
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
-/* This is always fine. */
-#define pci_dac_dma_supported(pci_dev, mask) (1)
-
-static inline dma64_addr_t
-pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
-{
- return ((dma64_addr_t) page_to_phys(page) +
- (dma64_addr_t) offset);
-}
-
-static inline struct page *
-pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return pfn_to_page(dma_addr >> PAGE_SHIFT);
-}
-
-static inline unsigned long
-pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
-{
- return (dma_addr & ~PAGE_MASK);
-}
-
-static inline void
-pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
-}
-
-static inline void
-pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
-{
- flush_write_buffers();
-}
-
#define HAVE_PCI_MMAP
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
-static inline void pcibios_add_platform_entries(struct pci_dev *dev)
-{
-}
-
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h
index f54830b5d5ac..a7ebd436f3cc 100644
--- a/include/asm-i386/percpu.h
+++ b/include/asm-i386/percpu.h
@@ -54,6 +54,11 @@ extern unsigned long __per_cpu_offset[];
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.shared_aligned"))) \
+ __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
diff --git a/include/asm-i386/pgalloc.h b/include/asm-i386/pgalloc.h
index d07b7afc2692..f2fc33ceb9f2 100644
--- a/include/asm-i386/pgalloc.h
+++ b/include/asm-i386/pgalloc.h
@@ -7,7 +7,7 @@
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
-#define paravirt_alloc_pt(pfn) do { } while (0)
+#define paravirt_alloc_pt(mm, pfn) do { } while (0)
#define paravirt_alloc_pd(pfn) do { } while (0)
#define paravirt_alloc_pd(pfn) do { } while (0)
#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
@@ -17,13 +17,13 @@
#define pmd_populate_kernel(mm, pmd, pte) \
do { \
- paravirt_alloc_pt(__pa(pte) >> PAGE_SHIFT); \
+ paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT); \
set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte))); \
} while (0)
#define pmd_populate(mm, pmd, pte) \
do { \
- paravirt_alloc_pt(page_to_pfn(pte)); \
+ paravirt_alloc_pt(mm, page_to_pfn(pte)); \
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT))); \
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h
index a50fd1773de8..84b03cf56a79 100644
--- a/include/asm-i386/pgtable-2level.h
+++ b/include/asm-i386/pgtable-2level.h
@@ -57,14 +57,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
/*
- * All present user pages are user-executable:
- */
-static inline int pte_exec(pte_t pte)
-{
- return pte_user(pte);
-}
-
-/*
* All present pages are kernel-executable:
*/
static inline int pte_exec_kernel(pte_t pte)
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h
index eb0f1d7e96a1..948a33414118 100644
--- a/include/asm-i386/pgtable-3level.h
+++ b/include/asm-i386/pgtable-3level.h
@@ -20,26 +20,11 @@
#define pud_present(pud) 1
/*
- * Is the pte executable?
- */
-static inline int pte_x(pte_t pte)
-{
- return !(pte_val(pte) & _PAGE_NX);
-}
-
-/*
- * All present user-pages with !NX bit are user-executable:
- */
-static inline int pte_exec(pte_t pte)
-{
- return pte_user(pte) && pte_x(pte);
-}
-/*
* All present pages with !NX bit are kernel-executable:
*/
static inline int pte_exec_kernel(pte_t pte)
{
- return pte_x(pte);
+ return !(pte_val(pte) & _PAGE_NX);
}
/* Rules for using set_pte: the pte being assigned *must* be
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 628fa7747d0c..c7fefa6b12fd 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -79,7 +79,7 @@ void paging_init(void);
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8*1024*1024)
-#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
+#define VMALLOC_START (((unsigned long) high_memory + \
2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
@@ -218,8 +218,6 @@ extern unsigned long pg0[];
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
-static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
@@ -230,13 +228,9 @@ static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
*/
static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
-static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
-static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
-static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
-static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
@@ -295,17 +289,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
__changed; \
})
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \
- int __ret = 0; \
- if (pte_dirty(*(ptep))) \
- __ret = test_and_clear_bit(_PAGE_BIT_DIRTY, \
- &(ptep)->pte_low); \
- if (__ret) \
- pte_update((vma)->vm_mm, addr, ptep); \
- __ret; \
-})
-
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
int __ret = 0; \
@@ -317,27 +300,6 @@ static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
__ret; \
})
-/*
- * Rules for using ptep_establish: the pte MUST be a user pte, and
- * must be a present->present transition.
- */
-#define __HAVE_ARCH_PTEP_ESTABLISH
-#define ptep_establish(vma, address, ptep, pteval) \
-do { \
- set_pte_present((vma)->vm_mm, address, ptep, pteval); \
- flush_tlb_page(vma, address); \
-} while (0)
-
-#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
-#define ptep_clear_flush_dirty(vma, address, ptep) \
-({ \
- int __dirty; \
- __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \
- if (__dirty) \
- flush_tlb_page(vma, address); \
- __dirty; \
-})
-
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(vma, address, ptep) \
({ \
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 338668bfb0a2..3845fe72383e 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -88,7 +88,6 @@ struct cpuinfo_x86 {
#define X86_VENDOR_UMC 3
#define X86_VENDOR_NEXGEN 4
#define X86_VENDOR_CENTAUR 5
-#define X86_VENDOR_RISE 6
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
#define X86_VENDOR_NUM 9
@@ -119,6 +118,7 @@ void __init cpu_detect(struct cpuinfo_x86 *c);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern unsigned short num_cache_leaves;
@@ -168,17 +168,6 @@ static inline void clear_in_cr4 (unsigned long mask)
write_cr4(cr4);
}
-/*
- * NSC/Cyrix CPU indexed register access macros
- */
-
-#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
-
-#define setCx86(reg, data) do { \
- outb((reg), 0x22); \
- outb((data), 0x23); \
-} while (0)
-
/* Stop speculative execution */
static inline void sync_core(void)
{
@@ -227,6 +216,10 @@ extern int bootloader_type;
#define HAVE_ARCH_PICK_MMAP_LAYOUT
+extern void hard_disable_TSC(void);
+extern void disable_TSC(void);
+extern void hard_enable_TSC(void);
+
/*
* Size of io_bitmap.
*/
diff --git a/include/asm-i386/required-features.h b/include/asm-i386/required-features.h
index 9db866c1e64c..618feb98f9f5 100644
--- a/include/asm-i386/required-features.h
+++ b/include/asm-i386/required-features.h
@@ -3,32 +3,53 @@
/* Define minimum CPUID feature set for kernel These bits are checked
really early to actually display a visible error message before the
- kernel dies. Only add word 0 bits here
+ kernel dies. Make sure to assign features to the proper mask!
Some requirements that are not in CPUID yet are also in the
- CONFIG_X86_MINIMUM_CPU mode which is checked too.
+ CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
The real information is in arch/i386/Kconfig.cpu, this just converts
the CONFIGs into a bitmask */
+#ifndef CONFIG_MATH_EMULATION
+# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
+#else
+# define NEED_FPU 0
+#endif
+
#ifdef CONFIG_X86_PAE
-#define NEED_PAE (1<<X86_FEATURE_PAE)
+# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
#else
-#define NEED_PAE 0
+# define NEED_PAE 0
#endif
#ifdef CONFIG_X86_CMOV
-#define NEED_CMOV (1<<X86_FEATURE_CMOV)
+# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
#else
-#define NEED_CMOV 0
+# define NEED_CMOV 0
#endif
-#ifdef CONFIG_X86_CMPXCHG64
-#define NEED_CMPXCHG64 (1<<X86_FEATURE_CX8)
+#ifdef CONFIG_X86_PAE
+# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
#else
-#define NEED_CMPXCHG64 0
+# define NEED_CX8 0
#endif
-#define REQUIRED_MASK1 (NEED_PAE|NEED_CMOV|NEED_CMPXCHG64)
+#define REQUIRED_MASK0 (NEED_FPU|NEED_PAE|NEED_CMOV|NEED_CX8)
+
+#ifdef CONFIG_X86_USE_3DNOW
+# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
+#else
+# define NEED_3DNOW 0
+#endif
+
+#define REQUIRED_MASK1 (NEED_3DNOW)
+
+#define REQUIRED_MASK2 0
+#define REQUIRED_MASK3 0
+#define REQUIRED_MASK4 0
+#define REQUIRED_MASK5 0
+#define REQUIRED_MASK6 0
+#define REQUIRED_MASK7 0
#endif
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h
index 0e8077cbfdac..7862fe858a9e 100644
--- a/include/asm-i386/setup.h
+++ b/include/asm-i386/setup.h
@@ -26,12 +26,15 @@
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
#ifndef __ASSEMBLY__
+
+#include <asm/bootparam.h>
+
/*
* This is set up by the setup-routine at boot-time
*/
-extern unsigned char boot_params[PARAM_SIZE];
+extern struct boot_params boot_params;
-#define PARAM (boot_params)
+#define PARAM ((char *)&boot_params)
#define SCREEN_INFO (*(struct screen_info *) (PARAM+0))
#define EXT_MEM_K (*(unsigned short *) (PARAM+2))
#define ALT_MEM_K (*(unsigned long *) (PARAM+0x1e0))
@@ -39,8 +42,7 @@ extern unsigned char boot_params[PARAM_SIZE];
#define E820_MAP ((struct e820entry *) (PARAM+E820MAP))
#define APM_BIOS_INFO (*(struct apm_bios_info *) (PARAM+0x40))
#define IST_INFO (*(struct ist_info *) (PARAM+0x60))
-#define DRIVE_INFO (*(struct drive_info_struct *) (PARAM+0x80))
-#define SYS_DESC_TABLE (*(struct sys_desc_table_struct*)(PARAM+0xa0))
+#define SYS_DESC_TABLE (*(struct sys_desc_table *)(PARAM+0xa0))
#define EFI_SYSTAB ((efi_system_table_t *) *((unsigned long *)(PARAM+0x1c4)))
#define EFI_MEMDESC_SIZE (*((unsigned long *) (PARAM+0x1c8)))
#define EFI_MEMDESC_VERSION (*((unsigned long *) (PARAM+0x1cc)))
@@ -79,6 +81,10 @@ void __init add_memory_region(unsigned long long start,
extern unsigned long init_pg_tables_end;
+#ifndef CONFIG_PARAVIRT
+#define paravirt_post_allocator_init() do {} while (0)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 0c7132787062..1f73bde165b1 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -43,9 +43,12 @@ extern u8 x86_cpu_to_apicid[];
#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+extern void set_cpu_sibling_map(int cpu);
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
+extern void remove_siblinginfo(int cpu);
#endif
struct smp_ops
@@ -129,6 +132,8 @@ extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern unsigned int num_processors;
+void __cpuinit smp_store_cpu_info(int id);
+
#endif /* !__ASSEMBLY__ */
#else /* CONFIG_SMP */
diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h
index b9277361954b..a9b64453bdf5 100644
--- a/include/asm-i386/string.h
+++ b/include/asm-i386/string.h
@@ -2,203 +2,35 @@
#define _I386_STRING_H_
#ifdef __KERNEL__
-/*
- * On a 486 or Pentium, we are better off not using the
- * byte string operations. But on a 386 or a PPro the
- * byte string ops are faster than doing it by hand
- * (MUCH faster on a Pentium).
- */
-
-/*
- * This string-include defines all string functions as inline
- * functions. Use gcc. It also assumes ds=es=data space, this should be
- * normal. Most of the string-functions are rather heavily hand-optimized,
- * see especially strsep,strstr,str[c]spn. They should work, but are not
- * very easy to understand. Everything is done entirely within the register
- * set, making the functions fast and clean. String instructions have been
- * used through-out, making for "slightly" unclear code :-)
- *
- * NO Copyright (C) 1991, 1992 Linus Torvalds,
- * consider these trivial functions to be PD.
- */
-/* AK: in fact I bet it would be better to move this stuff all out of line.
- */
+/* Let gcc decide wether to inline or use the out of line functions */
#define __HAVE_ARCH_STRCPY
-static inline char * strcpy(char * dest,const char *src)
-{
-int d0, d1, d2;
-__asm__ __volatile__(
- "1:\tlodsb\n\t"
- "stosb\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b"
- : "=&S" (d0), "=&D" (d1), "=&a" (d2)
- :"0" (src),"1" (dest) : "memory");
-return dest;
-}
+extern char *strcpy(char *dest, const char *src);
#define __HAVE_ARCH_STRNCPY
-static inline char * strncpy(char * dest,const char *src,size_t count)
-{
-int d0, d1, d2, d3;
-__asm__ __volatile__(
- "1:\tdecl %2\n\t"
- "js 2f\n\t"
- "lodsb\n\t"
- "stosb\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b\n\t"
- "rep\n\t"
- "stosb\n"
- "2:"
- : "=&S" (d0), "=&D" (d1), "=&c" (d2), "=&a" (d3)
- :"0" (src),"1" (dest),"2" (count) : "memory");
-return dest;
-}
+extern char *strncpy(char *dest, const char *src, size_t count);
#define __HAVE_ARCH_STRCAT
-static inline char * strcat(char * dest,const char * src)
-{
-int d0, d1, d2, d3;
-__asm__ __volatile__(
- "repne\n\t"
- "scasb\n\t"
- "decl %1\n"
- "1:\tlodsb\n\t"
- "stosb\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b"
- : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
- : "0" (src), "1" (dest), "2" (0), "3" (0xffffffffu):"memory");
-return dest;
-}
+extern char *strcat(char *dest, const char *src);
#define __HAVE_ARCH_STRNCAT
-static inline char * strncat(char * dest,const char * src,size_t count)
-{
-int d0, d1, d2, d3;
-__asm__ __volatile__(
- "repne\n\t"
- "scasb\n\t"
- "decl %1\n\t"
- "movl %8,%3\n"
- "1:\tdecl %3\n\t"
- "js 2f\n\t"
- "lodsb\n\t"
- "stosb\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b\n"
- "2:\txorl %2,%2\n\t"
- "stosb"
- : "=&S" (d0), "=&D" (d1), "=&a" (d2), "=&c" (d3)
- : "0" (src),"1" (dest),"2" (0),"3" (0xffffffffu), "g" (count)
- : "memory");
-return dest;
-}
+extern char *strncat(char *dest, const char *src, size_t count);
#define __HAVE_ARCH_STRCMP
-static inline int strcmp(const char * cs,const char * ct)
-{
-int d0, d1;
-register int __res;
-__asm__ __volatile__(
- "1:\tlodsb\n\t"
- "scasb\n\t"
- "jne 2f\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b\n\t"
- "xorl %%eax,%%eax\n\t"
- "jmp 3f\n"
- "2:\tsbbl %%eax,%%eax\n\t"
- "orb $1,%%al\n"
- "3:"
- :"=a" (__res), "=&S" (d0), "=&D" (d1)
- :"1" (cs),"2" (ct)
- :"memory");
-return __res;
-}
+extern int strcmp(const char *cs, const char *ct);
#define __HAVE_ARCH_STRNCMP
-static inline int strncmp(const char * cs,const char * ct,size_t count)
-{
-register int __res;
-int d0, d1, d2;
-__asm__ __volatile__(
- "1:\tdecl %3\n\t"
- "js 2f\n\t"
- "lodsb\n\t"
- "scasb\n\t"
- "jne 3f\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b\n"
- "2:\txorl %%eax,%%eax\n\t"
- "jmp 4f\n"
- "3:\tsbbl %%eax,%%eax\n\t"
- "orb $1,%%al\n"
- "4:"
- :"=a" (__res), "=&S" (d0), "=&D" (d1), "=&c" (d2)
- :"1" (cs),"2" (ct),"3" (count)
- :"memory");
-return __res;
-}
+extern int strncmp(const char *cs, const char *ct, size_t count);
#define __HAVE_ARCH_STRCHR
-static inline char * strchr(const char * s, int c)
-{
-int d0;
-register char * __res;
-__asm__ __volatile__(
- "movb %%al,%%ah\n"
- "1:\tlodsb\n\t"
- "cmpb %%ah,%%al\n\t"
- "je 2f\n\t"
- "testb %%al,%%al\n\t"
- "jne 1b\n\t"
- "movl $1,%1\n"
- "2:\tmovl %1,%0\n\t"
- "decl %0"
- :"=a" (__res), "=&S" (d0)
- :"1" (s),"0" (c)
- :"memory");
-return __res;
-}
+extern char *strchr(const char *s, int c);
#define __HAVE_ARCH_STRRCHR
-static inline char * strrchr(const char * s, int c)
-{
-int d0, d1;
-register char * __res;
-__asm__ __volatile__(
- "movb %%al,%%ah\n"
- "1:\tlodsb\n\t"
- "cmpb %%ah,%%al\n\t"
- "jne 2f\n\t"
- "leal -1(%%esi),%0\n"
- "2:\ttestb %%al,%%al\n\t"
- "jne 1b"
- :"=g" (__res), "=&S" (d0), "=&a" (d1)
- :"0" (0),"1" (s),"2" (c)
- :"memory");
-return __res;
-}
+extern char *strrchr(const char *s, int c);
#define __HAVE_ARCH_STRLEN
-static inline size_t strlen(const char * s)
-{
-int d0;
-register int __res;
-__asm__ __volatile__(
- "repne\n\t"
- "scasb\n\t"
- "notl %0\n\t"
- "decl %0"
- :"=c" (__res), "=&D" (d0)
- :"1" (s),"a" (0), "0" (0xffffffffu)
- :"memory");
-return __res;
-}
+extern size_t strlen(const char *s);
static __always_inline void * __memcpy(void * to, const void * from, size_t n)
{
@@ -207,9 +39,7 @@ __asm__ __volatile__(
"rep ; movsl\n\t"
"movl %4,%%ecx\n\t"
"andl $3,%%ecx\n\t"
-#if 1 /* want to pay 2 byte penalty for a chance to skip microcoded rep? */
"jz 1f\n\t"
-#endif
"rep ; movsb\n\t"
"1:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
@@ -328,23 +158,7 @@ void *memmove(void * dest,const void * src, size_t n);
#define memcmp __builtin_memcmp
#define __HAVE_ARCH_MEMCHR
-static inline void * memchr(const void * cs,int c,size_t count)
-{
-int d0;
-register void * __res;
-if (!count)
- return NULL;
-__asm__ __volatile__(
- "repne\n\t"
- "scasb\n\t"
- "je 1f\n\t"
- "movl $1,%0\n"
- "1:\tdecl %0"
- :"=D" (__res), "=&c" (d0)
- :"a" (c),"0" (cs),"1" (count)
- :"memory");
-return __res;
-}
+extern void *memchr(const void * cs,int c,size_t count);
static inline void * __memset_generic(void * s, char c,size_t count)
{
@@ -386,29 +200,10 @@ return (s);
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
-static inline size_t strnlen(const char * s, size_t count)
-{
-int d0;
-register int __res;
-__asm__ __volatile__(
- "movl %2,%0\n\t"
- "jmp 2f\n"
- "1:\tcmpb $0,(%0)\n\t"
- "je 3f\n\t"
- "incl %0\n"
- "2:\tdecl %1\n\t"
- "cmpl $-1,%1\n\t"
- "jne 1b\n"
- "3:\tsubl %2,%0"
- :"=a" (__res), "=&d" (d0)
- :"c" (s),"1" (count)
- :"memory");
-return __res;
-}
+extern size_t strnlen(const char * s, size_t count);
/* end of additional stuff */
#define __HAVE_ARCH_STRSTR
-
extern char *strstr(const char *cs, const char *ct);
/*
@@ -474,19 +269,7 @@ __asm__ __volatile__( \
* find the first occurrence of byte 'c', or 1 past the area if none
*/
#define __HAVE_ARCH_MEMSCAN
-static inline void * memscan(void * addr, int c, size_t size)
-{
- if (!size)
- return addr;
- __asm__("repnz; scasb\n\t"
- "jnz 1f\n\t"
- "dec %%edi\n"
- "1:"
- : "=D" (addr), "=c" (size)
- : "0" (addr), "1" (size), "a" (c)
- : "memory");
- return addr;
-}
+extern void *memscan(void * addr, int c, size_t size);
#endif /* __KERNEL__ */
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h
index 8dbaafe611ff..a2520732ffd6 100644
--- a/include/asm-i386/suspend.h
+++ b/include/asm-i386/suspend.h
@@ -21,7 +21,7 @@ struct saved_context {
unsigned long return_address;
} __attribute__((packed));
-#ifdef CONFIG_ACPI_SLEEP
+#ifdef CONFIG_ACPI
extern unsigned long saved_eip;
extern unsigned long saved_esp;
extern unsigned long saved_ebp;
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 94ed3686a5f3..d69ba937e092 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -214,11 +214,6 @@ static inline unsigned long get_limit(unsigned long segment)
*/
-/*
- * Actually only lfence would be needed for mb() because all stores done
- * by the kernel should be already ordered. But keep a full barrier for now.
- */
-
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
@@ -310,15 +305,6 @@ void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible:
- */
-static inline void sched_cacheflush(void)
-{
- wbinvd();
-}
-
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index 4cb0f91ae64f..22a8cbcd35e2 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -124,22 +124,21 @@ static inline struct thread_info *current_thread_info(void)
* - other flags in MSW
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
-#define TIF_SIGPENDING 2 /* signal pending */
-#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
-#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
-#define TIF_IRET 5 /* return with iret */
-#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
-#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
-#define TIF_SECCOMP 8 /* secure computing */
-#define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal() */
+#define TIF_SIGPENDING 1 /* signal pending */
+#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
+#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
+#define TIF_IRET 4 /* return with iret */
+#define TIF_SYSCALL_EMU 5 /* syscall emulation active */
+#define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
+#define TIF_SECCOMP 7 /* secure computing */
+#define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 16
#define TIF_DEBUG 17 /* uses debug registers */
#define TIF_IO_BITMAP 18 /* uses I/O bitmap */
#define TIF_FREEZE 19 /* is freezing for suspend */
+#define TIF_NOTSC 20 /* TSC is not accessible in userland */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
@@ -151,6 +150,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_DEBUG (1<<TIF_DEBUG)
#define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP)
#define _TIF_FREEZE (1<<TIF_FREEZE)
+#define _TIF_NOTSC (1<<TIF_NOTSC)
/* work to do on interrupt/exception return */
#define _TIF_WORK_MASK \
@@ -160,7 +160,8 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP)
+#define _TIF_WORK_CTXSW_NEXT (_TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUG)
+#define _TIF_WORK_CTXSW_PREV (_TIF_IO_BITMAP | _TIF_NOTSC)
/*
* Thread-synchronous status.
diff --git a/include/asm-i386/timer.h b/include/asm-i386/timer.h
index 153770e25faa..0db7e994fb8b 100644
--- a/include/asm-i386/timer.h
+++ b/include/asm-i386/timer.h
@@ -5,18 +5,46 @@
#define TICK_SIZE (tick_nsec / 1000)
-void setup_pit_timer(void);
unsigned long long native_sched_clock(void);
unsigned long native_calculate_cpu_khz(void);
extern int timer_ack;
extern int no_timer_check;
-extern int no_sync_cmos_clock;
extern int recalibrate_cpu_khz(void);
#ifndef CONFIG_PARAVIRT
-#define get_scheduled_cycles(val) rdtscll(val)
#define calculate_cpu_khz() native_calculate_cpu_khz()
#endif
+/* Accellerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
+ * ns = cycles * (10^6 / cpu_khz)
+ *
+ * Then we use scaling math (suggested by george@mvista.com) to get:
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ *
+ * We can use khz divisor instead of mhz to keep a better percision, since
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+extern unsigned long cyc2ns_scale __read_mostly;
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+ return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
+}
+
+
#endif
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h
index fc525c5cd5a9..a50fa6741486 100644
--- a/include/asm-i386/tlbflush.h
+++ b/include/asm-i386/tlbflush.h
@@ -160,7 +160,11 @@ DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
native_flush_tlb_others(&mask, mm, va)
#endif
-#define flush_tlb_kernel_range(start, end) flush_tlb_all()
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index 7fc512d90ea8..19b2dafd0c81 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -67,7 +67,7 @@ static inline int node_to_first_cpu(int node)
return first_cpu(mask);
}
-#define pcibus_to_node(bus) ((long) (bus)->sysdata)
+#define pcibus_to_node(bus) ((struct pci_sysdata *)((bus)->sysdata))->node
#define pcibus_to_cpumask(bus) node_to_cpumask(pcibus_to_node(bus))
/* sched_domains SD_NODE_INIT for NUMAQ machines */
diff --git a/include/asm-i386/tsc.h b/include/asm-i386/tsc.h
index 62c091ffcccc..a4d806610b7f 100644
--- a/include/asm-i386/tsc.h
+++ b/include/asm-i386/tsc.h
@@ -63,6 +63,7 @@ extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern void init_tsc_clocksource(void);
+int check_tsc_unstable(void);
/*
* Boot-time check whether the TSCs are synchronized across
diff --git a/include/asm-i386/uaccess.h b/include/asm-i386/uaccess.h
index e2aa5e0d0cc7..d2a4f7be9c2c 100644
--- a/include/asm-i386/uaccess.h
+++ b/include/asm-i386/uaccess.h
@@ -581,7 +581,7 @@ long __must_check __strncpy_from_user(char *dst,
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
-#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
+#define strlen_user(str) strnlen_user(str, LONG_MAX)
long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h
index e84ace1ec8bf..9b15545eb9b5 100644
--- a/include/asm-i386/unistd.h
+++ b/include/asm-i386/unistd.h
@@ -329,10 +329,11 @@
#define __NR_signalfd 321
#define __NR_timerfd 322
#define __NR_eventfd 323
+#define __NR_fallocate 324
#ifdef __KERNEL__
-#define NR_syscalls 324
+#define NR_syscalls 325
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-i386/vmi_time.h b/include/asm-i386/vmi_time.h
index 213930b995cb..478188130328 100644
--- a/include/asm-i386/vmi_time.h
+++ b/include/asm-i386/vmi_time.h
@@ -49,7 +49,7 @@ extern struct vmi_timer_ops {
extern void __init vmi_time_init(void);
extern unsigned long vmi_get_wallclock(void);
extern int vmi_set_wallclock(unsigned long now);
-extern unsigned long long vmi_get_sched_cycles(void);
+extern unsigned long long vmi_sched_clock(void);
extern unsigned long vmi_cpu_khz(void);
#ifdef CONFIG_X86_LOCAL_APIC