summaryrefslogtreecommitdiff
path: root/arch/i386/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r--arch/i386/kernel/cpu/Makefile3
-rw-r--r--arch/i386/kernel/cpu/amd.c18
-rw-r--r--arch/i386/kernel/cpu/bugs.c1
-rw-r--r--arch/i386/kernel/cpu/common.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig31
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c47
-rw-r--r--arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c274
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.h12
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c29
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c276
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-ich.c4
-rw-r--r--arch/i386/kernel/cpu/cyrix.c2
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c83
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.c14
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c4
-rw-r--r--arch/i386/kernel/cpu/mcheck/therm_throt.c6
-rw-r--r--arch/i386/kernel/cpu/mtrr/cyrix.c1
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/state.c1
-rw-r--r--arch/i386/kernel/cpu/perfctr-watchdog.c30
-rw-r--r--arch/i386/kernel/cpu/proc.c21
-rw-r--r--arch/i386/kernel/cpu/rise.c52
25 files changed, 416 insertions, 513 deletions
diff --git a/arch/i386/kernel/cpu/Makefile b/arch/i386/kernel/cpu/Makefile
index 74f27a463db0..778396c78d65 100644
--- a/arch/i386/kernel/cpu/Makefile
+++ b/arch/i386/kernel/cpu/Makefile
@@ -8,8 +8,7 @@ obj-y += amd.o
obj-y += cyrix.o
obj-y += centaur.o
obj-y += transmeta.o
-obj-y += intel.o intel_cacheinfo.o
-obj-y += rise.o
+obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o
obj-y += nexgen.o
obj-y += umc.o
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 6f47eeeb93ea..dcf6bbb1c7c0 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -3,6 +3,7 @@
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/processor.h>
+#include <asm/apic.h>
#include "cpu.h"
@@ -22,6 +23,7 @@
extern void vide(void);
__asm__(".align 4\nvide: ret");
+#ifdef CONFIG_X86_LOCAL_APIC
#define ENABLE_C1E_MASK 0x18000000
#define CPUID_PROCESSOR_SIGNATURE 1
#define CPUID_XFAM 0x0ff00000
@@ -52,6 +54,7 @@ static __cpuinit int amd_apic_timer_broken(void)
}
return 0;
}
+#endif
int force_mwait __cpuinitdata;
@@ -231,6 +234,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
switch (c->x86) {
case 15:
+ /* Use K8 tuning for Fam10h and Fam11h */
+ case 0x10:
+ case 0x11:
set_bit(X86_FEATURE_K8, c->x86_capability);
break;
case 6:
@@ -272,11 +278,17 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
}
#endif
- if (cpuid_eax(0x80000000) >= 0x80000006)
- num_cache_leaves = 3;
+ if (cpuid_eax(0x80000000) >= 0x80000006) {
+ if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
+ num_cache_leaves = 4;
+ else
+ num_cache_leaves = 3;
+ }
+#ifdef CONFIG_X86_LOCAL_APIC
if (amd_apic_timer_broken())
- set_bit(X86_FEATURE_LAPIC_TIMER_BROKEN, c->x86_capability);
+ local_apic_timer_disabled = 1;
+#endif
if (c->x86 == 0x10 && !force_mwait)
clear_bit(X86_FEATURE_MWAIT, c->x86_capability);
diff --git a/arch/i386/kernel/cpu/bugs.c b/arch/i386/kernel/cpu/bugs.c
index 54428a2500f3..59266f03d1cd 100644
--- a/arch/i386/kernel/cpu/bugs.c
+++ b/arch/i386/kernel/cpu/bugs.c
@@ -11,6 +11,7 @@
*/
#include <linux/init.h>
#include <linux/utsname.h>
+#include <asm/bugs.h>
#include <asm/processor.h>
#include <asm/i387.h>
#include <asm/msr.h>
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 794d593c47eb..d506201d397c 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -353,6 +353,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
if ( xlvl >= 0x80000004 )
get_model_name(c); /* Default name */
}
+
+ init_scattered_cpuid_features(c);
}
early_intel_workaround(c);
@@ -604,7 +606,6 @@ extern int nsc_init_cpu(void);
extern int amd_init_cpu(void);
extern int centaur_init_cpu(void);
extern int transmeta_init_cpu(void);
-extern int rise_init_cpu(void);
extern int nexgen_init_cpu(void);
extern int umc_init_cpu(void);
@@ -616,7 +617,6 @@ void __init early_cpu_init(void)
amd_init_cpu();
centaur_init_cpu();
transmeta_init_cpu();
- rise_init_cpu();
nexgen_init_cpu();
umc_init_cpu();
early_cpu_detect();
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index e912aae9473c..d8c6f132dc7a 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -90,10 +90,17 @@ config X86_POWERNOW_K8
If in doubt, say N.
config X86_POWERNOW_K8_ACPI
- bool
- depends on X86_POWERNOW_K8 && ACPI_PROCESSOR
- depends on !(X86_POWERNOW_K8 = y && ACPI_PROCESSOR = m)
+ bool "ACPI Support"
+ select ACPI_PROCESSOR
+ depends on ACPI && X86_POWERNOW_K8
default y
+ help
+ This provides access to the K8s Processor Performance States via ACPI.
+ This driver is probably required for CPUFreq to work with multi-socket and
+ SMP systems. It is not required on at least some single-socket yet
+ multi-core systems, even if SMP is enabled.
+
+ It is safe to say Y here.
config X86_GX_SUSPMOD
tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
@@ -109,7 +116,7 @@ config X86_GX_SUSPMOD
config X86_SPEEDSTEP_CENTRINO
tristate "Intel Enhanced SpeedStep"
select CPU_FREQ_TABLE
- select X86_SPEEDSTEP_CENTRINO_TABLE if (!X86_SPEEDSTEP_CENTRINO_ACPI)
+ select X86_SPEEDSTEP_CENTRINO_TABLE
help
This adds the CPUFreq driver for Enhanced SpeedStep enabled
mobile CPUs. This means Intel Pentium M (Centrino) CPUs. However,
@@ -121,20 +128,6 @@ config X86_SPEEDSTEP_CENTRINO
If in doubt, say N.
-config X86_SPEEDSTEP_CENTRINO_ACPI
- bool "Use ACPI tables to decode valid frequency/voltage (deprecated)"
- depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR
- depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m)
- help
- This is deprecated and this functionality is now merged into
- acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
- speedstep_centrino.
- Use primarily the information provided in the BIOS ACPI tables
- to determine valid CPU frequency and voltage pairings. It is
- required for the driver to work on non-Banias CPUs.
-
- If in doubt, say Y.
-
config X86_SPEEDSTEP_CENTRINO_TABLE
bool "Built-in tables for Banias CPUs"
depends on X86_SPEEDSTEP_CENTRINO
@@ -230,7 +223,7 @@ comment "shared options"
config X86_ACPI_CPUFREQ_PROC_INTF
bool "/proc/acpi/processor/../performance interface (deprecated)"
depends on PROC_FS
- depends on X86_ACPI_CPUFREQ || X86_SPEEDSTEP_CENTRINO_ACPI || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
+ depends on X86_ACPI_CPUFREQ || X86_POWERNOW_K7_ACPI || X86_POWERNOW_K8_ACPI
help
This enables the deprecated /proc/acpi/processor/../performance
interface. While it is helpful for debugging, the generic,
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 18c8b67ea3a7..705e13a30781 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -68,7 +68,8 @@ struct acpi_cpufreq_data {
};
static struct acpi_cpufreq_data *drv_data[NR_CPUS];
-static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
+/* acpi_perf_data is a pointer to percpu data. */
+static struct acpi_processor_performance *acpi_perf_data;
static struct cpufreq_driver acpi_cpufreq_driver;
@@ -508,26 +509,14 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
* do _PDC and _PSD and find out the processor dependency for the
* actual init that will happen later...
*/
-static int acpi_cpufreq_early_init(void)
+static int __init acpi_cpufreq_early_init(void)
{
- struct acpi_processor_performance *data;
- cpumask_t covered;
- unsigned int i, j;
-
dprintk("acpi_cpufreq_early_init\n");
- for_each_possible_cpu(i) {
- data = kzalloc(sizeof(struct acpi_processor_performance),
- GFP_KERNEL);
- if (!data) {
- for_each_cpu_mask(j, covered) {
- kfree(acpi_perf_data[j]);
- acpi_perf_data[j] = NULL;
- }
- return -ENOMEM;
- }
- acpi_perf_data[i] = data;
- cpu_set(i, covered);
+ acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
+ if (!acpi_perf_data) {
+ dprintk("Memory allocation error for acpi_perf_data.\n");
+ return -ENOMEM;
}
/* Do initialization in ACPI core */
@@ -576,14 +565,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
dprintk("acpi_cpufreq_cpu_init\n");
- if (!acpi_perf_data[cpu])
- return -ENODEV;
-
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->acpi_data = acpi_perf_data[cpu];
+ data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
drv_data[cpu] = data;
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
@@ -665,8 +651,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
data->max_freq = perf->states[0].core_frequency * 1000;
/* table init */
for (i=0; i<perf->state_count; i++) {
- if (i>0 && perf->states[i].core_frequency ==
- perf->states[i-1].core_frequency)
+ if (i>0 && perf->states[i].core_frequency >=
+ data->freq_table[valid_states-1].frequency / 1000)
continue;
data->freq_table[valid_states].index = i;
@@ -780,24 +766,25 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
static int __init acpi_cpufreq_init(void)
{
+ int ret;
+
dprintk("acpi_cpufreq_init\n");
- acpi_cpufreq_early_init();
+ ret = acpi_cpufreq_early_init();
+ if (ret)
+ return ret;
return cpufreq_register_driver(&acpi_cpufreq_driver);
}
static void __exit acpi_cpufreq_exit(void)
{
- unsigned int i;
dprintk("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
- for_each_possible_cpu(i) {
- kfree(acpi_perf_data[i]);
- acpi_perf_data[i] = NULL;
- }
+ free_percpu(acpi_perf_data);
+
return;
}
diff --git a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
index 0d49d73d1b71..66acd5039918 100644
--- a/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
+++ b/arch/i386/kernel/cpu/cpufreq/cpufreq-nforce2.c
@@ -391,8 +391,6 @@ static struct cpufreq_driver nforce2_driver = {
*/
static unsigned int nforce2_detect_chipset(void)
{
- u8 revision;
-
nforce2_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_NFORCE2,
PCI_ANY_ID, PCI_ANY_ID, NULL);
@@ -400,10 +398,8 @@ static unsigned int nforce2_detect_chipset(void)
if (nforce2_chipset_dev == NULL)
return -ENODEV;
- pci_read_config_byte(nforce2_chipset_dev, PCI_REVISION_ID, &revision);
-
printk(KERN_INFO "cpufreq: Detected nForce2 chipset revision %X\n",
- revision);
+ nforce2_chipset_dev->revision);
printk(KERN_INFO
"cpufreq: FSB changing is maybe unstable and can lead to crashes and data loss.\n");
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 6667e9cceb9f..461dabc4e495 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -79,7 +79,7 @@
#include <linux/smp.h>
#include <linux/cpufreq.h>
#include <linux/pci.h>
-#include <asm/processor.h>
+#include <asm/processor-cyrix.h>
#include <asm/errno.h>
/* PCI config registers, all at F0 */
@@ -115,7 +115,6 @@ struct gxfreq_params {
u8 pci_suscfg;
u8 pci_pmer1;
u8 pci_pmer2;
- u8 pci_rev;
struct pci_dev *cs55x0;
};
@@ -276,7 +275,7 @@ static void gx_set_cpuspeed(unsigned int khz)
pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */
pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1);
- if (gx_params->pci_rev < 0x10) { /* CS5530(rev 1.2, 1.3) */
+ if (gx_params->cs55x0->revision < 0x10) { /* CS5530(rev 1.2, 1.3) */
suscfg = gx_params->pci_suscfg | SUSMOD;
} else { /* CS5530A,B.. */
suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE;
@@ -471,7 +470,6 @@ static int __init cpufreq_gx_init(void)
pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
- pci_read_config_byte(params->cs55x0, PCI_REVISION_ID, &params->pci_rev);
if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
kfree(params);
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index a3df9c039bd4..f0cce3c2dc3a 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -29,6 +29,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/delay.h>
#include <asm/msr.h>
#include <asm/timex.h>
@@ -55,7 +56,6 @@
/* Flags */
#define USE_ACPI_C3 (1 << 1)
#define USE_NORTHBRIDGE (1 << 2)
-#define USE_VT8235 (1 << 3)
static int cpu_model;
static unsigned int numscales=16;
@@ -63,22 +63,20 @@ static unsigned int fsb;
static const struct mV_pos *vrm_mV_table;
static const unsigned char *mV_vrm_table;
-struct f_msr {
- u8 vrm;
- u8 pos;
-};
-static struct f_msr f_msr_table[32];
static unsigned int highest_speed, lowest_speed; /* kHz */
static unsigned int minmult, maxmult;
static int can_scale_voltage;
static struct acpi_processor *pr = NULL;
static struct acpi_processor_cx *cx = NULL;
+static u32 acpi_regs_addr;
static u8 longhaul_flags;
-static u8 longhaul_pos;
+static unsigned int longhaul_index;
/* Module parameters */
static int scale_voltage;
+static int disable_acpi_c3;
+static int revid_errata;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)
@@ -144,7 +142,7 @@ static void do_longhaul1(unsigned int clock_ratio_index)
rdmsrl(MSR_VIA_BCR2, bcr2.val);
/* Enable software clock multiplier */
bcr2.bits.ESOFTBF = 1;
- bcr2.bits.CLOCKMUL = clock_ratio_index;
+ bcr2.bits.CLOCKMUL = clock_ratio_index & 0xff;
/* Sync to timer tick */
safe_halt();
@@ -163,26 +161,27 @@ static void do_longhaul1(unsigned int clock_ratio_index)
/* For processor with Longhaul MSR */
-static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
+static void do_powersaver(int cx_address, unsigned int clock_ratio_index,
+ unsigned int dir)
{
union msr_longhaul longhaul;
- u8 dest_pos;
u32 t;
- dest_pos = f_msr_table[clock_ratio_index].pos;
-
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Setup new frequency */
- longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ if (!revid_errata)
+ longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
+ else
+ longhaul.bits.RevisionKey = 0;
longhaul.bits.SoftBusRatio = clock_ratio_index & 0xf;
longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4;
/* Setup new voltage */
if (can_scale_voltage)
- longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm;
+ longhaul.bits.SoftVID = (clock_ratio_index >> 8) & 0x1f;
/* Sync to timer tick */
safe_halt();
/* Raise voltage if necessary */
- if (can_scale_voltage && longhaul_pos < dest_pos) {
+ if (can_scale_voltage && dir) {
longhaul.bits.EnableSoftVID = 1;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
@@ -199,7 +198,6 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
}
longhaul.bits.EnableSoftVID = 0;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
- longhaul_pos = dest_pos;
}
/* Change frequency on next halt or sleep */
@@ -220,7 +218,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Reduce voltage if necessary */
- if (can_scale_voltage && longhaul_pos > dest_pos) {
+ if (can_scale_voltage && !dir) {
longhaul.bits.EnableSoftVID = 1;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
/* Change voltage */
@@ -237,7 +235,6 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
}
longhaul.bits.EnableSoftVID = 0;
wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
- longhaul_pos = dest_pos;
}
}
@@ -248,25 +245,28 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
* Sets a new clock ratio.
*/
-static void longhaul_setstate(unsigned int clock_ratio_index)
+static void longhaul_setstate(unsigned int table_index)
{
+ unsigned int clock_ratio_index;
int speed, mult;
struct cpufreq_freqs freqs;
- static unsigned int old_ratio=-1;
unsigned long flags;
unsigned int pic1_mask, pic2_mask;
+ u16 bm_status = 0;
+ u32 bm_timeout = 1000;
+ unsigned int dir = 0;
- if (old_ratio == clock_ratio_index)
- return;
- old_ratio = clock_ratio_index;
-
- mult = clock_ratio[clock_ratio_index];
+ clock_ratio_index = longhaul_table[table_index].index;
+ /* Safety precautions */
+ mult = clock_ratio[clock_ratio_index & 0x1f];
if (mult == -1)
return;
-
speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed))
return;
+ /* Voltage transition before frequency transition? */
+ if (can_scale_voltage && longhaul_index < table_index)
+ dir = 1;
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
@@ -276,7 +276,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
dprintk ("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
-
+retry_loop:
preempt_disable();
local_irq_save(flags);
@@ -285,11 +285,24 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
outb(0xFF,0xA1); /* Overkill */
outb(0xFE,0x21); /* TMR0 only */
+ /* Wait while PCI bus is busy. */
+ if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE
+ || ((pr != NULL) && pr->flags.bm_control))) {
+ bm_status = inw(acpi_regs_addr);
+ bm_status &= 1 << 4;
+ while (bm_status && bm_timeout) {
+ outw(1 << 4, acpi_regs_addr);
+ bm_timeout--;
+ bm_status = inw(acpi_regs_addr);
+ bm_status &= 1 << 4;
+ }
+ }
+
if (longhaul_flags & USE_NORTHBRIDGE) {
/* Disable AGP and PCI arbiters */
outb(3, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) {
- /* Disable bus master arbitration */
+ /* Disable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
}
switch (longhaul_version) {
@@ -314,9 +327,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
if (longhaul_flags & USE_ACPI_C3) {
/* Don't allow wakeup */
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
- do_powersaver(cx->address, clock_ratio_index);
+ do_powersaver(cx->address, clock_ratio_index, dir);
} else {
- do_powersaver(0, clock_ratio_index);
+ do_powersaver(0, clock_ratio_index, dir);
}
break;
}
@@ -335,7 +348,51 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
preempt_enable();
freqs.new = calc_speed(longhaul_get_cpu_mult());
+ /* Check if requested frequency is set. */
+ if (unlikely(freqs.new != speed)) {
+ printk(KERN_INFO PFX "Failed to set requested frequency!\n");
+ /* Revision ID = 1 but processor is expecting revision key
+ * equal to 0. Jumpers at the bottom of processor will change
+ * multiplier and FSB, but will not change bits in Longhaul
+ * MSR nor enable voltage scaling. */
+ if (!revid_errata) {
+ printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
+ "option.\n");
+ revid_errata = 1;
+ msleep(200);
+ goto retry_loop;
+ }
+ /* Why ACPI C3 sometimes doesn't work is a mystery for me.
+ * But it does happen. Processor is entering ACPI C3 state,
+ * but it doesn't change frequency. I tried poking various
+ * bits in northbridge registers, but without success. */
+ if (longhaul_flags & USE_ACPI_C3) {
+ printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
+ longhaul_flags &= ~USE_ACPI_C3;
+ if (revid_errata) {
+ printk(KERN_INFO PFX "Disabling \"Ignore "
+ "Revision ID\" option.\n");
+ revid_errata = 0;
+ }
+ msleep(200);
+ goto retry_loop;
+ }
+ /* This shouldn't happen. Longhaul ver. 2 was reported not
+ * working on processors without voltage scaling, but with
+ * RevID = 1. RevID errata will make things right. Just
+ * to be 100% sure. */
+ if (longhaul_version == TYPE_LONGHAUL_V2) {
+ printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
+ longhaul_version = TYPE_LONGHAUL_V1;
+ msleep(200);
+ goto retry_loop;
+ }
+ }
+ /* Report true CPU frequency */
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ if (!bm_timeout)
+ printk(KERN_INFO PFX "Warning: Timeout while waiting for idle PCI bus.\n");
}
/*
@@ -369,7 +426,8 @@ static int guess_fsb(int mult)
static int __init longhaul_get_ranges(void)
{
- unsigned int j, k = 0;
+ unsigned int i, j, k = 0;
+ unsigned int ratio;
int mult;
/* Get current frequency */
@@ -423,8 +481,7 @@ static int __init longhaul_get_ranges(void)
if(!longhaul_table)
return -ENOMEM;
- for (j=0; j < numscales; j++) {
- unsigned int ratio;
+ for (j = 0; j < numscales; j++) {
ratio = clock_ratio[j];
if (ratio == -1)
continue;
@@ -434,13 +491,41 @@ static int __init longhaul_get_ranges(void)
longhaul_table[k].index = j;
k++;
}
+ if (k <= 1) {
+ kfree(longhaul_table);
+ return -ENODEV;
+ }
+ /* Sort */
+ for (j = 0; j < k - 1; j++) {
+ unsigned int min_f, min_i;
+ min_f = longhaul_table[j].frequency;
+ min_i = j;
+ for (i = j + 1; i < k; i++) {
+ if (longhaul_table[i].frequency < min_f) {
+ min_f = longhaul_table[i].frequency;
+ min_i = i;
+ }
+ }
+ if (min_i != j) {
+ unsigned int temp;
+ temp = longhaul_table[j].frequency;
+ longhaul_table[j].frequency = longhaul_table[min_i].frequency;
+ longhaul_table[min_i].frequency = temp;
+ temp = longhaul_table[j].index;
+ longhaul_table[j].index = longhaul_table[min_i].index;
+ longhaul_table[min_i].index = temp;
+ }
+ }
longhaul_table[k].frequency = CPUFREQ_TABLE_END;
- if (!k) {
- kfree (longhaul_table);
- return -EINVAL;
- }
+ /* Find index we are running on */
+ for (j = 0; j < k; j++) {
+ if (clock_ratio[longhaul_table[j].index & 0x1f] == mult) {
+ longhaul_index = j;
+ break;
+ }
+ }
return 0;
}
@@ -448,7 +533,7 @@ static int __init longhaul_get_ranges(void)
static void __init longhaul_setup_voltagescaling(void)
{
union msr_longhaul longhaul;
- struct mV_pos minvid, maxvid;
+ struct mV_pos minvid, maxvid, vid;
unsigned int j, speed, pos, kHz_step, numvscales;
int min_vid_speed;
@@ -459,11 +544,11 @@ static void __init longhaul_setup_voltagescaling(void)
}
if (!longhaul.bits.VRMRev) {
- printk (KERN_INFO PFX "VRM 8.5\n");
+ printk(KERN_INFO PFX "VRM 8.5\n");
vrm_mV_table = &vrm85_mV[0];
mV_vrm_table = &mV_vrm85[0];
} else {
- printk (KERN_INFO PFX "Mobile VRM\n");
+ printk(KERN_INFO PFX "Mobile VRM\n");
if (cpu_model < CPU_NEHEMIAH)
return;
vrm_mV_table = &mobilevrm_mV[0];
@@ -523,7 +608,6 @@ static void __init longhaul_setup_voltagescaling(void)
/* Calculate kHz for one voltage step */
kHz_step = (highest_speed - min_vid_speed) / numvscales;
-
j = 0;
while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) {
speed = longhaul_table[j].frequency;
@@ -531,15 +615,14 @@ static void __init longhaul_setup_voltagescaling(void)
pos = (speed - min_vid_speed) / kHz_step + minvid.pos;
else
pos = minvid.pos;
- f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos];
- f_msr_table[longhaul_table[j].index].pos = pos;
+ longhaul_table[j].index |= mV_vrm_table[pos] << 8;
+ vid = vrm_mV_table[mV_vrm_table[pos]];
+ printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV);
j++;
}
- longhaul_pos = maxvid.pos;
can_scale_voltage = 1;
- printk(KERN_INFO PFX "Voltage scaling enabled. "
- "Use of \"conservative\" governor is highly recommended.\n");
+ printk(KERN_INFO PFX "Voltage scaling enabled.\n");
}
@@ -553,15 +636,44 @@ static int longhaul_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
unsigned int table_index = 0;
- unsigned int new_clock_ratio = 0;
+ unsigned int i;
+ unsigned int dir = 0;
+ u8 vid, current_vid;
if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index))
return -EINVAL;
- new_clock_ratio = longhaul_table[table_index].index & 0xFF;
-
- longhaul_setstate(new_clock_ratio);
+ /* Don't set same frequency again */
+ if (longhaul_index == table_index)
+ return 0;
+ if (!can_scale_voltage)
+ longhaul_setstate(table_index);
+ else {
+ /* On test system voltage transitions exceeding single
+ * step up or down were turning motherboard off. Both
+ * "ondemand" and "userspace" are unsafe. C7 is doing
+ * this in hardware, C3 is old and we need to do this
+ * in software. */
+ i = longhaul_index;
+ current_vid = (longhaul_table[longhaul_index].index >> 8) & 0x1f;
+ if (table_index > longhaul_index)
+ dir = 1;
+ while (i != table_index) {
+ vid = (longhaul_table[i].index >> 8) & 0x1f;
+ if (vid != current_vid) {
+ longhaul_setstate(i);
+ current_vid = vid;
+ msleep(200);
+ }
+ if (dir)
+ i++;
+ else
+ i--;
+ }
+ longhaul_setstate(table_index);
+ }
+ longhaul_index = table_index;
return 0;
}
@@ -590,11 +702,10 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle,
static int enable_arbiter_disable(void)
{
struct pci_dev *dev;
- int status;
+ int status = 1;
int reg;
u8 pci_cmd;
- status = 1;
/* Find PLE133 host bridge */
reg = 0x78;
dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0,
@@ -627,13 +738,17 @@ static int enable_arbiter_disable(void)
return 0;
}
-static int longhaul_setup_vt8235(void)
+static int longhaul_setup_southbridge(void)
{
struct pci_dev *dev;
u8 pci_cmd;
/* Find VT8235 southbridge */
dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL);
+ if (dev == NULL)
+ /* Find VT8237 southbridge */
+ dev = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8237, NULL);
if (dev != NULL) {
/* Set transition time to max */
pci_read_config_byte(dev, 0xec, &pci_cmd);
@@ -645,6 +760,14 @@ static int longhaul_setup_vt8235(void)
pci_read_config_byte(dev, 0xe5, &pci_cmd);
pci_cmd |= 1 << 7;
pci_write_config_byte(dev, 0xe5, pci_cmd);
+ /* Get address of ACPI registers block*/
+ pci_read_config_byte(dev, 0x81, &pci_cmd);
+ if (pci_cmd & 1 << 7) {
+ pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
+ acpi_regs_addr &= 0xff00;
+ printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr);
+ }
+
pci_dev_put(dev);
return 1;
}
@@ -657,7 +780,6 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
char *cpuname=NULL;
int ret;
u32 lo, hi;
- int vt8235_present;
/* Check what we have on this motherboard */
switch (c->x86_model) {
@@ -755,7 +877,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
};
/* Doesn't hurt */
- vt8235_present = longhaul_setup_vt8235();
+ longhaul_setup_southbridge();
/* Find ACPI data for processor */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
@@ -765,35 +887,29 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
/* Check ACPI support for C3 state */
if (pr != NULL && longhaul_version == TYPE_POWERSAVER) {
cx = &pr->power.states[ACPI_STATE_C3];
- if (cx->address > 0 && cx->latency <= 1000) {
+ if (cx->address > 0 && cx->latency <= 1000)
longhaul_flags |= USE_ACPI_C3;
- goto print_support_type;
- }
}
+ /* Disable if it isn't working */
+ if (disable_acpi_c3)
+ longhaul_flags &= ~USE_ACPI_C3;
/* Check if northbridge is friendly */
- if (enable_arbiter_disable()) {
+ if (enable_arbiter_disable())
longhaul_flags |= USE_NORTHBRIDGE;
- goto print_support_type;
- }
- /* Use VT8235 southbridge if present */
- if (longhaul_version == TYPE_POWERSAVER && vt8235_present) {
- longhaul_flags |= USE_VT8235;
- goto print_support_type;
- }
+
/* Check ACPI support for bus master arbiter disable */
- if ((pr == NULL) || !(pr->flags.bm_control)) {
+ if (!(longhaul_flags & USE_ACPI_C3
+ || longhaul_flags & USE_NORTHBRIDGE)
+ && ((pr == NULL) || !(pr->flags.bm_control))) {
printk(KERN_ERR PFX
"No ACPI support. Unsupported northbridge.\n");
return -ENODEV;
}
-print_support_type:
if (longhaul_flags & USE_NORTHBRIDGE)
- printk (KERN_INFO PFX "Using northbridge support.\n");
- else if (longhaul_flags & USE_VT8235)
- printk (KERN_INFO PFX "Using VT8235 support.\n");
- else
- printk (KERN_INFO PFX "Using ACPI support.\n");
+ printk(KERN_INFO PFX "Using northbridge support.\n");
+ if (longhaul_flags & USE_ACPI_C3)
+ printk(KERN_INFO PFX "Using ACPI support.\n");
ret = longhaul_get_ranges();
if (ret != 0)
@@ -885,8 +1001,20 @@ static void __exit longhaul_exit(void)
kfree(longhaul_table);
}
+/* Even if BIOS is exporting ACPI C3 state, and it is used
+ * with success when CPU is idle, this state doesn't
+ * trigger frequency transition in some cases. */
+module_param (disable_acpi_c3, int, 0644);
+MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support");
+/* Change CPU voltage with frequency. Very usefull to save
+ * power, but most VIA C3 processors aren't supporting it. */
module_param (scale_voltage, int, 0644);
MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
+/* Force revision key to 0 for processors which doesn't
+ * support voltage scaling, but are introducing itself as
+ * such. */
+module_param(revid_errata, int, 0644);
+MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
MODULE_AUTHOR ("Dave Jones <davej@codemonkey.org.uk>");
MODULE_DESCRIPTION ("Longhaul driver for VIA Cyrix processors.");
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h
index 102548f12842..4fcc320997df 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.h
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h
@@ -180,7 +180,7 @@ static const int __initdata ezrat_clock_ratio[32] = {
-1, /* 0000 -> RESERVED (10.0x) */
110, /* 0001 -> 11.0x */
- 120, /* 0010 -> 12.0x */
+ -1, /* 0010 -> 12.0x */
-1, /* 0011 -> RESERVED (9.0x)*/
105, /* 0100 -> 10.5x */
115, /* 0101 -> 11.5x */
@@ -237,7 +237,7 @@ static const int __initdata ezrat_eblcr[32] = {
static const int __initdata nehemiah_clock_ratio[32] = {
100, /* 0000 -> 10.0x */
- 160, /* 0001 -> 16.0x */
+ -1, /* 0001 -> 16.0x */
40, /* 0010 -> 4.0x */
90, /* 0011 -> 9.0x */
95, /* 0100 -> 9.5x */
@@ -252,10 +252,10 @@ static const int __initdata nehemiah_clock_ratio[32] = {
75, /* 1101 -> 7.5x */
85, /* 1110 -> 8.5x */
120, /* 1111 -> 12.0x */
- 100, /* 0000 -> 10.0x */
+ -1, /* 0000 -> 10.0x */
110, /* 0001 -> 11.0x */
- 120, /* 0010 -> 12.0x */
- 90, /* 0011 -> 9.0x */
+ -1, /* 0010 -> 12.0x */
+ -1, /* 0011 -> 9.0x */
105, /* 0100 -> 10.5x */
115, /* 0101 -> 11.5x */
125, /* 0110 -> 12.5x */
@@ -267,7 +267,7 @@ static const int __initdata nehemiah_clock_ratio[32] = {
145, /* 1100 -> 14.5x */
155, /* 1101 -> 15.5x */
-1, /* 1110 -> RESERVED (13.0x) */
- 120, /* 1111 -> 12.0x */
+ -1, /* 1111 -> 12.0x */
};
static const int __initdata nehemiah_eblcr[32] = {
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 4ade55c5f333..34ed53a06730 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -599,14 +599,17 @@ static void print_basics(struct powernow_k8_data *data)
for (j = 0; j < data->numps; j++) {
if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) {
if (cpu_family == CPU_HW_PSTATE) {
- printk(KERN_INFO PFX " %d : fid 0x%x gid 0x%x (%d MHz)\n", j, (data->powernow_table[j].index & 0xff00) >> 8,
- (data->powernow_table[j].index & 0xff0000) >> 16,
- data->powernow_table[j].frequency/1000);
+ printk(KERN_INFO PFX " %d : fid 0x%x did 0x%x (%d MHz)\n",
+ j,
+ (data->powernow_table[j].index & 0xff00) >> 8,
+ (data->powernow_table[j].index & 0xff0000) >> 16,
+ data->powernow_table[j].frequency/1000);
} else {
- printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n", j,
- data->powernow_table[j].index & 0xff,
- data->powernow_table[j].frequency/1000,
- data->powernow_table[j].index >> 8);
+ printk(KERN_INFO PFX " %d : fid 0x%x (%d MHz), vid 0x%x\n",
+ j,
+ data->powernow_table[j].index & 0xff,
+ data->powernow_table[j].frequency/1000,
+ data->powernow_table[j].index >> 8);
}
}
}
@@ -1086,7 +1089,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
if (cpu_family == CPU_HW_PSTATE)
dprintk("targ: curr fid 0x%x, did 0x%x\n",
- data->currfid, data->currvid);
+ data->currfid, data->currdid);
else {
dprintk("targ: curr fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
@@ -1322,16 +1325,22 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0;
+ unsigned int booted_cores = 1;
for_each_online_cpu(i) {
if (check_supported_cpu(i))
supported_cpus++;
}
+#ifdef CONFIG_SMP
+ booted_cores = cpu_data[0].booted_cores;
+#endif
+
if (supported_cpus == num_online_cpus()) {
printk(KERN_INFO PFX "Found %d %s "
- "processors (" VERSION ")\n", supported_cpus,
- boot_cpu_data.x86_model_id);
+ "processors (%d cpu cores) (" VERSION ")\n",
+ supported_cpus/booted_cores,
+ boot_cpu_data.x86_model_id, supported_cpus);
return cpufreq_register_driver(&cpufreq_amd64_driver);
}
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index 35489fd68852..6c5dc2c85aeb 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -21,12 +21,6 @@
#include <linux/delay.h>
#include <linux/compiler.h>
-#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
-#include <linux/acpi.h>
-#include <linux/dmi.h>
-#include <acpi/processor.h>
-#endif
-
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
@@ -257,9 +251,7 @@ static int centrino_cpu_init_table(struct cpufreq_policy *policy)
/* Matched a non-match */
dprintk("no table support for CPU model \"%s\"\n",
cpu->x86_model_id);
-#ifndef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
- dprintk("try compiling with CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI enabled\n");
-#endif
+ dprintk("try using the acpi-cpufreq driver\n");
return -ENOENT;
}
@@ -346,213 +338,6 @@ static unsigned int get_cur_freq(unsigned int cpu)
}
-#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
-
-static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
-
-/*
- * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
- * library
- *
- * Before doing the actual init, we need to do _PSD related setup whenever
- * supported by the BIOS. These are handled by this early_init routine.
- */
-static int centrino_cpu_early_init_acpi(void)
-{
- unsigned int i, j;
- struct acpi_processor_performance *data;
-
- for_each_possible_cpu(i) {
- data = kzalloc(sizeof(struct acpi_processor_performance),
- GFP_KERNEL);
- if (!data) {
- for_each_possible_cpu(j) {
- kfree(acpi_perf_data[j]);
- acpi_perf_data[j] = NULL;
- }
- return (-ENOMEM);
- }
- acpi_perf_data[i] = data;
- }
-
- acpi_processor_preregister_performance(acpi_perf_data);
- return 0;
-}
-
-
-#ifdef CONFIG_SMP
-/*
- * Some BIOSes do SW_ANY coordination internally, either set it up in hw
- * or do it in BIOS firmware and won't inform about it to OS. If not
- * detected, this has a side effect of making CPU run at a different speed
- * than OS intended it to run at. Detect it and handle it cleanly.
- */
-static int bios_with_sw_any_bug;
-static int sw_any_bug_found(struct dmi_system_id *d)
-{
- bios_with_sw_any_bug = 1;
- return 0;
-}
-
-static struct dmi_system_id sw_any_bug_dmi_table[] = {
- {
- .callback = sw_any_bug_found,
- .ident = "Supermicro Server X6DLP",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
- DMI_MATCH(DMI_BIOS_VERSION, "080010"),
- DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
- },
- },
- { }
-};
-#endif
-
-/*
- * centrino_cpu_init_acpi - register with ACPI P-States library
- *
- * Register with the ACPI P-States library (part of drivers/acpi/processor.c)
- * in order to determine correct frequency and voltage pairings by reading
- * the _PSS of the ACPI DSDT or SSDT tables.
- */
-static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
-{
- unsigned long cur_freq;
- int result = 0, i;
- unsigned int cpu = policy->cpu;
- struct acpi_processor_performance *p;
-
- p = acpi_perf_data[cpu];
-
- /* register with ACPI core */
- if (acpi_processor_register_performance(p, cpu)) {
- dprintk(PFX "obtaining ACPI data failed\n");
- return -EIO;
- }
-
- policy->shared_type = p->shared_type;
- /*
- * Will let policy->cpus know about dependency only when software
- * coordination is required.
- */
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
- policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
- policy->cpus = p->shared_cpu_map;
- }
-
-#ifdef CONFIG_SMP
- dmi_check_system(sw_any_bug_dmi_table);
- if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
- policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- policy->cpus = cpu_core_map[cpu];
- }
-#endif
-
- /* verify the acpi_data */
- if (p->state_count <= 1) {
- dprintk("No P-States\n");
- result = -ENODEV;
- goto err_unreg;
- }
-
- if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
- (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
- dprintk("Invalid control/status registers (%x - %x)\n",
- p->control_register.space_id, p->status_register.space_id);
- result = -EIO;
- goto err_unreg;
- }
-
- for (i=0; i<p->state_count; i++) {
- if ((p->states[i].control & INTEL_MSR_RANGE) !=
- (p->states[i].status & INTEL_MSR_RANGE)) {
- dprintk("Different MSR bits in control (%llu) and status (%llu)\n",
- p->states[i].control, p->states[i].status);
- result = -EINVAL;
- goto err_unreg;
- }
-
- if (!p->states[i].core_frequency) {
- dprintk("Zero core frequency for state %u\n", i);
- result = -EINVAL;
- goto err_unreg;
- }
-
- if (p->states[i].core_frequency > p->states[0].core_frequency) {
- dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
- p->states[i].core_frequency, p->states[0].core_frequency);
- p->states[i].core_frequency = 0;
- continue;
- }
- }
-
- centrino_model[cpu] = kzalloc(sizeof(struct cpu_model), GFP_KERNEL);
- if (!centrino_model[cpu]) {
- result = -ENOMEM;
- goto err_unreg;
- }
-
- centrino_model[cpu]->model_name=NULL;
- centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
- centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
- (p->state_count + 1), GFP_KERNEL);
- if (!centrino_model[cpu]->op_points) {
- result = -ENOMEM;
- goto err_kfree;
- }
-
- for (i=0; i<p->state_count; i++) {
- centrino_model[cpu]->op_points[i].index = p->states[i].control & INTEL_MSR_RANGE;
- centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
- dprintk("adding state %i with frequency %u and control value %04x\n",
- i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
- }
- centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
-
- cur_freq = get_cur_freq(cpu);
-
- for (i=0; i<p->state_count; i++) {
- if (!p->states[i].core_frequency) {
- dprintk("skipping state %u\n", i);
- centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
- continue;
- }
-
- if (extract_clock(centrino_model[cpu]->op_points[i].index, cpu, 0) !=
- (centrino_model[cpu]->op_points[i].frequency)) {
- dprintk("Invalid encoded frequency (%u vs. %u)\n",
- extract_clock(centrino_model[cpu]->op_points[i].index, cpu, 0),
- centrino_model[cpu]->op_points[i].frequency);
- result = -EINVAL;
- goto err_kfree_all;
- }
-
- if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
- p->state = i;
- }
-
- /* notify BIOS that we exist */
- acpi_processor_notify_smm(THIS_MODULE);
- printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI "
- "config is deprecated.\n "
- "Use X86_ACPI_CPUFREQ (acpi-cpufreq) instead.\n" );
-
- return 0;
-
- err_kfree_all:
- kfree(centrino_model[cpu]->op_points);
- err_kfree:
- kfree(centrino_model[cpu]);
- err_unreg:
- acpi_processor_unregister_performance(p, cpu);
- dprintk(PFX "invalid ACPI data\n");
- return (result);
-}
-#else
-static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
-static inline int centrino_cpu_early_init_acpi(void) { return 0; }
-#endif
-
static int centrino_cpu_init(struct cpufreq_policy *policy)
{
struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
@@ -568,27 +353,25 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
- if (centrino_cpu_init_acpi(policy)) {
- if (policy->cpu != 0)
- return -ENODEV;
+ if (policy->cpu != 0)
+ return -ENODEV;
- for (i = 0; i < N_IDS; i++)
- if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
- break;
+ for (i = 0; i < N_IDS; i++)
+ if (centrino_verify_cpu_id(cpu, &cpu_ids[i]))
+ break;
- if (i != N_IDS)
- centrino_cpu[policy->cpu] = &cpu_ids[i];
+ if (i != N_IDS)
+ centrino_cpu[policy->cpu] = &cpu_ids[i];
- if (!centrino_cpu[policy->cpu]) {
- dprintk("found unsupported CPU with "
- "Enhanced SpeedStep: send /proc/cpuinfo to "
- MAINTAINER "\n");
- return -ENODEV;
- }
+ if (!centrino_cpu[policy->cpu]) {
+ dprintk("found unsupported CPU with "
+ "Enhanced SpeedStep: send /proc/cpuinfo to "
+ MAINTAINER "\n");
+ return -ENODEV;
+ }
- if (centrino_cpu_init_table(policy)) {
- return -ENODEV;
- }
+ if (centrino_cpu_init_table(policy)) {
+ return -ENODEV;
}
/* Check to see if Enhanced SpeedStep is enabled, and try to
@@ -634,20 +417,6 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
cpufreq_frequency_table_put_attr(cpu);
-#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
- if (!centrino_model[cpu]->model_name) {
- static struct acpi_processor_performance *p;
-
- if (acpi_perf_data[cpu]) {
- p = acpi_perf_data[cpu];
- dprintk("unregistering and freeing ACPI data\n");
- acpi_processor_unregister_performance(p, cpu);
- kfree(centrino_model[cpu]->op_points);
- kfree(centrino_model[cpu]);
- }
- }
-#endif
-
centrino_model[cpu] = NULL;
return 0;
@@ -849,25 +618,12 @@ static int __init centrino_init(void)
if (!cpu_has(cpu, X86_FEATURE_EST))
return -ENODEV;
- centrino_cpu_early_init_acpi();
-
return cpufreq_register_driver(&centrino_driver);
}
static void __exit centrino_exit(void)
{
-#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
- unsigned int j;
-#endif
-
cpufreq_unregister_driver(&centrino_driver);
-
-#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
- for_each_possible_cpu(j) {
- kfree(acpi_perf_data[j]);
- acpi_perf_data[j] = NULL;
- }
-#endif
}
MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
index 698f980eb443..a5b2346faf1f 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-ich.c
@@ -205,7 +205,6 @@ static unsigned int speedstep_detect_chipset (void)
* host brige. Abort on these systems.
*/
static struct pci_dev *hostbridge;
- u8 rev = 0;
hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_82815_MC,
@@ -216,8 +215,7 @@ static unsigned int speedstep_detect_chipset (void)
if (!hostbridge)
return 2; /* 2-M */
- pci_read_config_byte(hostbridge, PCI_REVISION_ID, &rev);
- if (rev < 5) {
+ if (hostbridge->revision < 5) {
dprintk("hostbridge does not support speedstep\n");
speedstep_chipset_dev = NULL;
pci_dev_put(hostbridge);
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index e88d2fba156b..122d2d75aa9f 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -4,7 +4,7 @@
#include <linux/pci.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/processor.h>
+#include <asm/processor-cyrix.h>
#include <asm/timer.h>
#include <asm/pci-direct.h>
#include <asm/tsc.h>
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index e5be819492ef..db6c25aa5776 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -4,7 +4,7 @@
* Changes:
* Venkatesh Pallipadi : Adding cache identification through cpuid(4)
* Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
- * Andi Kleen : CPUID4 emulation on AMD.
+ * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
*/
#include <linux/init.h>
@@ -135,7 +135,7 @@ unsigned short num_cache_leaves;
/* AMD doesn't have CPUID4. Emulate it here to report the same
information to the user. This makes some assumptions about the machine:
- No L3, L2 not shared, no SMT etc. that is currently true on AMD CPUs.
+ L2 not shared, no SMT etc. that is currently true on AMD CPUs.
In theory the TLBs could be reported as fake type (they are in "dummy").
Maybe later */
@@ -159,13 +159,26 @@ union l2_cache {
unsigned val;
};
+union l3_cache {
+ struct {
+ unsigned line_size : 8;
+ unsigned lines_per_tag : 4;
+ unsigned assoc : 4;
+ unsigned res : 2;
+ unsigned size_encoded : 14;
+ };
+ unsigned val;
+};
+
static const unsigned short assocs[] = {
[1] = 1, [2] = 2, [4] = 4, [6] = 8,
- [8] = 16,
+ [8] = 16, [0xa] = 32, [0xb] = 48,
+ [0xc] = 64,
[0xf] = 0xffff // ??
- };
-static const unsigned char levels[] = { 1, 1, 2 };
-static const unsigned char types[] = { 1, 2, 3 };
+};
+
+static const unsigned char levels[] = { 1, 1, 2, 3 };
+static const unsigned char types[] = { 1, 2, 3, 3 };
static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
@@ -175,37 +188,58 @@ static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
unsigned line_size, lines_per_tag, assoc, size_in_kb;
union l1_cache l1i, l1d;
union l2_cache l2;
+ union l3_cache l3;
+ union l1_cache *l1 = &l1d;
eax->full = 0;
ebx->full = 0;
ecx->full = 0;
cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
- cpuid(0x80000006, &dummy, &dummy, &l2.val, &dummy);
-
- if (leaf > 2 || !l1d.val || !l1i.val || !l2.val)
- return;
-
- eax->split.is_self_initializing = 1;
- eax->split.type = types[leaf];
- eax->split.level = levels[leaf];
- eax->split.num_threads_sharing = 0;
- eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
-
- if (leaf <= 1) {
- union l1_cache *l1 = leaf == 0 ? &l1d : &l1i;
+ cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
+
+ switch (leaf) {
+ case 1:
+ l1 = &l1i;
+ case 0:
+ if (!l1->val)
+ return;
assoc = l1->assoc;
line_size = l1->line_size;
lines_per_tag = l1->lines_per_tag;
size_in_kb = l1->size_in_kb;
- } else {
+ break;
+ case 2:
+ if (!l2.val)
+ return;
assoc = l2.assoc;
line_size = l2.line_size;
lines_per_tag = l2.lines_per_tag;
/* cpu_data has errata corrections for K7 applied */
size_in_kb = current_cpu_data.x86_cache_size;
+ break;
+ case 3:
+ if (!l3.val)
+ return;
+ assoc = l3.assoc;
+ line_size = l3.line_size;
+ lines_per_tag = l3.lines_per_tag;
+ size_in_kb = l3.size_encoded * 512;
+ break;
+ default:
+ return;
}
+ eax->split.is_self_initializing = 1;
+ eax->split.type = types[leaf];
+ eax->split.level = levels[leaf];
+ if (leaf == 3)
+ eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
+ else
+ eax->split.num_threads_sharing = 0;
+ eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
+
+
if (assoc == 0xf)
eax->split.is_fully_associative = 1;
ebx->split.coherency_line_size = line_size - 1;
@@ -239,8 +273,7 @@ static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_le
return 0;
}
-/* will only be called once; __init is safe here */
-static int __init find_num_cache_leaves(void)
+static int __cpuinit find_num_cache_leaves(void)
{
unsigned int eax, ebx, ecx, edx;
union _cpuid4_leaf_eax cache_eax;
@@ -482,7 +515,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
cpuid4_info[cpu] = kzalloc(
sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
- if (unlikely(cpuid4_info[cpu] == NULL))
+ if (cpuid4_info[cpu] == NULL)
return -ENOMEM;
oldmask = current->cpus_allowed;
@@ -710,11 +743,13 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
return retval;
}
-static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
+static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i;
+ if (cpuid4_info[cpu] == NULL)
+ return;
for (i = 0; i < num_cache_leaves; i++) {
cache_remove_shared_cpu_map(cpu, i);
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c
index 56cd485b127c..34c781eddee4 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.c
+++ b/arch/i386/kernel/cpu/mcheck/mce.c
@@ -60,6 +60,20 @@ void mcheck_init(struct cpuinfo_x86 *c)
}
}
+static unsigned long old_cr4 __initdata;
+
+void __init stop_mce(void)
+{
+ old_cr4 = read_cr4();
+ clear_in_cr4(X86_CR4_MCE);
+}
+
+void __init restart_mce(void)
+{
+ if (old_cr4 & X86_CR4_MCE)
+ set_in_cr4(X86_CR4_MCE);
+}
+
static int __init mcheck_disable(char *str)
{
mce_disabled = 1;
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 6b5d3518a1c0..bf39409b3838 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -57,7 +57,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
static void mce_work_fn(struct work_struct *work)
{
on_each_cpu(mce_checkregs, NULL, 1, 1);
- schedule_delayed_work(&mce_work, MCE_RATE);
+ schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
}
static int __init init_nonfatal_mce_checker(void)
@@ -82,7 +82,7 @@ static int __init init_nonfatal_mce_checker(void)
/*
* Check for non-fatal errors every MCE_RATE s
*/
- schedule_delayed_work(&mce_work, MCE_RATE);
+ schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE));
printk(KERN_INFO "Machine check exception polling timer started.\n");
return 0;
}
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 7ba7c3abd3a4..1203dc5ab87a 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -134,19 +134,21 @@ static __cpuinit int thermal_throttle_cpu_callback(struct notifier_block *nfb,
int err;
sys_dev = get_cpu_sysdev(cpu);
- mutex_lock(&therm_cpu_lock);
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ mutex_lock(&therm_cpu_lock);
err = thermal_throttle_add_dev(sys_dev);
+ mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
+ mutex_lock(&therm_cpu_lock);
thermal_throttle_remove_dev(sys_dev);
+ mutex_unlock(&therm_cpu_lock);
break;
}
- mutex_unlock(&therm_cpu_lock);
return NOTIFY_OK;
}
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
index 1001f1e0fe6d..2287d4863a8a 100644
--- a/arch/i386/kernel/cpu/mtrr/cyrix.c
+++ b/arch/i386/kernel/cpu/mtrr/cyrix.c
@@ -3,6 +3,7 @@
#include <asm/mtrr.h>
#include <asm/msr.h>
#include <asm/io.h>
+#include <asm/processor-cyrix.h>
#include "mtrr.h"
int arr3_protected;
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index f6e46943e6ef..56f64e34829f 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -79,7 +79,7 @@ static void print_fixed(unsigned base, unsigned step, const mtrr_type*types)
}
/* Grab all of the MTRR state for this CPU into *state */
-void get_mtrr_state(void)
+void __init get_mtrr_state(void)
{
unsigned int i;
struct mtrr_var_range *vrs;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index 75dc6d5214bc..c48b6fea5ab4 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -643,7 +643,7 @@ static struct sysdev_driver mtrr_sysdev_driver = {
* initialized (i.e. before smp_init()).
*
*/
-__init void mtrr_bp_init(void)
+void __init mtrr_bp_init(void)
{
init_ifs();
diff --git a/arch/i386/kernel/cpu/mtrr/state.c b/arch/i386/kernel/cpu/mtrr/state.c
index 7b39a2f954d9..c9014ca4a575 100644
--- a/arch/i386/kernel/cpu/mtrr/state.c
+++ b/arch/i386/kernel/cpu/mtrr/state.c
@@ -3,6 +3,7 @@
#include <asm/io.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
+#include <asm-i386/processor-cyrix.h>
#include "mtrr.h"
diff --git a/arch/i386/kernel/cpu/perfctr-watchdog.c b/arch/i386/kernel/cpu/perfctr-watchdog.c
index 996f6f89ce8e..93fecd4b03de 100644
--- a/arch/i386/kernel/cpu/perfctr-watchdog.c
+++ b/arch/i386/kernel/cpu/perfctr-watchdog.c
@@ -263,8 +263,8 @@ static int setup_k7_watchdog(unsigned nmi_hz)
unsigned int evntsel;
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
- perfctr_msr = MSR_K7_PERFCTR0;
- evntsel_msr = MSR_K7_EVNTSEL0;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
wrmsrl(perfctr_msr, 0UL);
@@ -325,7 +325,7 @@ static struct wd_ops k7_wd_ops = {
.stop = single_msr_stop_watchdog,
.perfctr = MSR_K7_PERFCTR0,
.evntsel = MSR_K7_EVNTSEL0,
- .checkbit = 1ULL<<63,
+ .checkbit = 1ULL<<47,
};
/* Intel Model 6 (PPro+,P2,P3,P-M,Core1) */
@@ -343,8 +343,8 @@ static int setup_p6_watchdog(unsigned nmi_hz)
unsigned int evntsel;
struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
- perfctr_msr = MSR_P6_PERFCTR0;
- evntsel_msr = MSR_P6_EVNTSEL0;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
/* KVM doesn't implement this MSR */
if (wrmsr_safe(perfctr_msr, 0, 0) < 0)
@@ -569,8 +569,8 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
return 0;
- perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1;
- evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1;
+ perfctr_msr = wd_ops->perfctr;
+ evntsel_msr = wd_ops->evntsel;
wrmsrl(perfctr_msr, 0UL);
@@ -601,6 +601,16 @@ static struct wd_ops intel_arch_wd_ops = {
.setup = setup_intel_arch_watchdog,
.rearm = p6_rearm,
.stop = single_msr_stop_watchdog,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR1,
+ .evntsel = MSR_ARCH_PERFMON_EVENTSEL1,
+};
+
+static struct wd_ops coreduo_wd_ops = {
+ .reserve = single_msr_reserve,
+ .unreserve = single_msr_unreserve,
+ .setup = setup_intel_arch_watchdog,
+ .rearm = p6_rearm,
+ .stop = single_msr_stop_watchdog,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
.evntsel = MSR_ARCH_PERFMON_EVENTSEL0,
};
@@ -615,6 +625,12 @@ static void probe_nmi_watchdog(void)
wd_ops = &k7_wd_ops;
break;
case X86_VENDOR_INTEL:
+ /* Work around Core Duo (Yonah) errata AE49 where perfctr1
+ doesn't have a working enable bit. */
+ if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) {
+ wd_ops = &coreduo_wd_ops;
+ break;
+ }
if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
wd_ops = &intel_arch_wd_ops;
break;
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 89d91e6cc972..1e31b6caffb1 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -29,7 +29,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
NULL, NULL, NULL, "mp", "nx", NULL, "mmxext", NULL,
- NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm", "3dnowext", "3dnow",
+ NULL, "fxsr_opt", "pdpe1gb", "rdtscp", NULL, "lm",
+ "3dnowext", "3dnow",
/* Transmeta-defined */
"recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
@@ -40,8 +41,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
/* Other (Linux-defined) */
"cxmmx", "k6_mtrr", "cyrix_arr", "centaur_mcr",
NULL, NULL, NULL, NULL,
- "constant_tsc", "up", NULL, NULL, NULL, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ "constant_tsc", "up", NULL, "arch_perfmon",
+ "pebs", "bts", NULL, "sync_rdtsc",
+ "rep_good", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* Intel-defined (#2) */
@@ -57,9 +59,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
/* AMD-defined (#2) */
- "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8legacy", "abm",
- "sse4a", "misalignsse",
- "3dnowprefetch", "osvw", "ibs", NULL, NULL, NULL, NULL, NULL,
+ "lahf_lm", "cmp_legacy", "svm", "extapic", "cr8_legacy",
+ "altmovcr8", "abm", "sse4a",
+ "misalignsse", "3dnowprefetch",
+ "osvw", "ibs", NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+
+ /* Auxiliary (Linux-defined) */
+ "ida", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
diff --git a/arch/i386/kernel/cpu/rise.c b/arch/i386/kernel/cpu/rise.c
deleted file mode 100644
index 50076f22e90f..000000000000
--- a/arch/i386/kernel/cpu/rise.c
+++ /dev/null
@@ -1,52 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/processor.h>
-
-#include "cpu.h"
-
-static void __cpuinit init_rise(struct cpuinfo_x86 *c)
-{
- printk("CPU: Rise iDragon");
- if (c->x86_model > 2)
- printk(" II");
- printk("\n");
-
- /* Unhide possibly hidden capability flags
- The mp6 iDragon family don't have MSRs.
- We switch on extra features with this cpuid weirdness: */
- __asm__ (
- "movl $0x6363452a, %%eax\n\t"
- "movl $0x3231206c, %%ecx\n\t"
- "movl $0x2a32313a, %%edx\n\t"
- "cpuid\n\t"
- "movl $0x63634523, %%eax\n\t"
- "movl $0x32315f6c, %%ecx\n\t"
- "movl $0x2333313a, %%edx\n\t"
- "cpuid\n\t" : : : "eax", "ebx", "ecx", "edx"
- );
- set_bit(X86_FEATURE_CX8, c->x86_capability);
-}
-
-static struct cpu_dev rise_cpu_dev __cpuinitdata = {
- .c_vendor = "Rise",
- .c_ident = { "RiseRiseRise" },
- .c_models = {
- { .vendor = X86_VENDOR_RISE, .family = 5, .model_names =
- {
- [0] = "iDragon",
- [2] = "iDragon",
- [8] = "iDragon II",
- [9] = "iDragon II"
- }
- },
- },
- .c_init = init_rise,
-};
-
-int __init rise_init_cpu(void)
-{
- cpu_devs[X86_VENDOR_RISE] = &rise_cpu_dev;
- return 0;
-}
-