summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/include/asm/elf.h8
-rw-r--r--arch/powerpc/include/asm/kmap_types.h11
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h2
-rw-r--r--arch/powerpc/include/asm/pmc.h16
-rw-r--r--arch/powerpc/include/asm/pte-common.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/kernel/lparcfg.c3
-rw-r--r--arch/powerpc/kernel/perf_counter.c68
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/rtas.c7
-rw-r--r--arch/powerpc/kernel/sysfs.c3
-rw-r--r--arch/powerpc/kernel/vector.S2
-rw-r--r--arch/powerpc/mm/pgtable.c19
-rw-r--r--arch/powerpc/mm/slb.c16
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c8
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c7
-rw-r--r--arch/powerpc/platforms/powermac/pci.c61
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c9
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/platforms/pseries/xics.c9
23 files changed, 190 insertions, 85 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index bc35f4e2b81c..39d44f7ff390 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -158,8 +158,6 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
# Default to zImage, override when needed
all: zImage
-CPPFLAGS_vmlinux.lds := -Upowerpc
-
BOOT_TARGETS = zImage zImage.initrd uImage zImage% dtbImage% treeImage.% cuImage.% simpleImage.%
PHONY += $(BOOT_TARGETS)
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 014a624f4c8e..56985023a47d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -236,14 +236,10 @@ typedef elf_vrregset_t elf_fpxregset_t;
#ifdef __powerpc64__
# define SET_PERSONALITY(ex) \
do { \
- unsigned long new_flags = 0; \
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- new_flags = _TIF_32BIT; \
- if ((current_thread_info()->flags & _TIF_32BIT) \
- != new_flags) \
- set_thread_flag(TIF_ABI_PENDING); \
+ set_thread_flag(TIF_32BIT); \
else \
- clear_thread_flag(TIF_ABI_PENDING); \
+ clear_thread_flag(TIF_32BIT); \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index b6bac6f61c16..916369575c97 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -29,5 +29,16 @@ enum km_type {
KM_TYPE_NR
};
+/*
+ * This is a temporary build fix that (so they say on lkml....) should no longer
+ * be required after 2.6.33, because of changes planned to the kmap code.
+ * Let's try to remove this cruft then.
+ */
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define KM_NMI (-1)
+#define KM_NMI_PTE (-1)
+#define KM_IRQ_PTE (-1)
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 98c104a09961..edab67ee8e53 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -41,6 +41,7 @@ extern char initial_stab[];
#define SLB_NUM_BOLTED 3
#define SLB_CACHE_ENTRIES 8
+#define SLB_MIN_SIZE 32
/* Bits in the SLB ESID word */
#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
@@ -296,6 +297,7 @@ extern void slb_flush_and_rebolt(void);
extern void stab_initialize(unsigned long stab);
extern void slb_vmalloc_update(void);
+extern void slb_set_size(u16 size);
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index d6a616a1b3ea..ccc68b50d05d 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -27,10 +27,22 @@ extern perf_irq_t perf_irq;
int reserve_pmc_hardware(perf_irq_t new_perf_irq);
void release_pmc_hardware(void);
+void ppc_enable_pmcs(void);
#ifdef CONFIG_PPC64
-void power4_enable_pmcs(void);
-void pasemi_enable_pmcs(void);
+#include <asm/lppaca.h>
+
+static inline void ppc_set_pmu_inuse(int inuse)
+{
+ get_lppaca()->pmcregs_in_use = inuse;
+}
+
+extern void power4_enable_pmcs(void);
+
+#else /* CONFIG_PPC64 */
+
+static inline void ppc_set_pmu_inuse(int inuse) { }
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
index a7e210b6b48c..0cd2e3433e1c 100644
--- a/arch/powerpc/include/asm/pte-common.h
+++ b/arch/powerpc/include/asm/pte-common.h
@@ -176,7 +176,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
#define HAVE_PAGE_AGP
/* Advertise support for _PAGE_SPECIAL */
-#ifdef _PAGE_SPECIAL
+#if _PAGE_SPECIAL != 0
#define __HAVE_ARCH_PTE_SPECIAL
#endif
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index c8b329255678..aa9d383a1c09 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -111,7 +111,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
#define TIF_FREEZE 14 /* Freezing for suspend */
#define TIF_RUNLATCH 15 /* Is the runlatch enabled? */
-#define TIF_ABI_PENDING 16 /* 32/64 bit switch needed */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -129,7 +128,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_FREEZE (1<<TIF_FREEZE)
#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
-#define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 2419cc706ff1..ed0ac4e4b8d8 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,6 +35,7 @@
#include <asm/prom.h>
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
+#include <asm/mmu.h>
#define MODULE_VERS "1.8"
#define MODULE_NAME "lparcfg"
@@ -537,6 +538,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
+ seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+
return 0;
}
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 70e1f57f7dd8..7ceefaf3a7f5 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -32,6 +32,9 @@ struct cpu_hw_counters {
unsigned long mmcr[3];
struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
+ u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
};
DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
@@ -62,7 +65,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
return 0;
}
-static inline void perf_set_pmu_inuse(int inuse) { }
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
@@ -93,11 +95,6 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
return 0;
}
-static inline void perf_set_pmu_inuse(int inuse)
-{
- get_lppaca()->pmcregs_in_use = inuse;
-}
-
/*
* The user wants a data address recorded.
* If we're not doing instruction sampling, give them the SDAR
@@ -245,13 +242,11 @@ static void write_pmc(int idx, unsigned long val)
* and see if any combination of alternative codes is feasible.
* The feasible set is returned in event[].
*/
-static int power_check_constraints(u64 event[], unsigned int cflags[],
+static int power_check_constraints(struct cpu_hw_counters *cpuhw,
+ u64 event[], unsigned int cflags[],
int n_ev)
{
unsigned long mask, value, nv;
- u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
int i, j;
@@ -266,21 +261,23 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
&& !ppmu->limited_pmc_event(event[i])) {
ppmu->get_alternatives(event[i], cflags[i],
- alternatives[i]);
- event[i] = alternatives[i][0];
+ cpuhw->alternatives[i]);
+ event[i] = cpuhw->alternatives[i][0];
}
- if (ppmu->get_constraint(event[i], &amasks[i][0],
- &avalues[i][0]))
+ if (ppmu->get_constraint(event[i], &cpuhw->amasks[i][0],
+ &cpuhw->avalues[i][0]))
return -1;
}
value = mask = 0;
for (i = 0; i < n_ev; ++i) {
- nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
+ nv = (value | cpuhw->avalues[i][0]) +
+ (value & cpuhw->avalues[i][0] & addf);
if ((((nv + tadd) ^ value) & mask) != 0 ||
- (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
+ (((nv + tadd) ^ cpuhw->avalues[i][0]) &
+ cpuhw->amasks[i][0]) != 0)
break;
value = nv;
- mask |= amasks[i][0];
+ mask |= cpuhw->amasks[i][0];
}
if (i == n_ev)
return 0; /* all OK */
@@ -291,10 +288,11 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
for (i = 0; i < n_ev; ++i) {
choice[i] = 0;
n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
- alternatives[i]);
+ cpuhw->alternatives[i]);
for (j = 1; j < n_alt[i]; ++j)
- ppmu->get_constraint(alternatives[i][j],
- &amasks[i][j], &avalues[i][j]);
+ ppmu->get_constraint(cpuhw->alternatives[i][j],
+ &cpuhw->amasks[i][j],
+ &cpuhw->avalues[i][j]);
}
/* enumerate all possibilities and see if any will work */
@@ -313,11 +311,11 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
* where k > j, will satisfy the constraints.
*/
while (++j < n_alt[i]) {
- nv = (value | avalues[i][j]) +
- (value & avalues[i][j] & addf);
+ nv = (value | cpuhw->avalues[i][j]) +
+ (value & cpuhw->avalues[i][j] & addf);
if ((((nv + tadd) ^ value) & mask) == 0 &&
- (((nv + tadd) ^ avalues[i][j])
- & amasks[i][j]) == 0)
+ (((nv + tadd) ^ cpuhw->avalues[i][j])
+ & cpuhw->amasks[i][j]) == 0)
break;
}
if (j >= n_alt[i]) {
@@ -339,7 +337,7 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
svalues[i] = value;
smasks[i] = mask;
value = nv;
- mask |= amasks[i][j];
+ mask |= cpuhw->amasks[i][j];
++i;
j = -1;
}
@@ -347,7 +345,7 @@ static int power_check_constraints(u64 event[], unsigned int cflags[],
/* OK, we have a feasible combination, tell the caller the solution */
for (i = 0; i < n_ev; ++i)
- event[i] = alternatives[i][choice[i]];
+ event[i] = cpuhw->alternatives[i][choice[i]];
return 0;
}
@@ -531,8 +529,7 @@ void hw_perf_disable(void)
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
- if (ppc_md.enable_pmcs)
- ppc_md.enable_pmcs();
+ ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
@@ -594,7 +591,7 @@ void hw_perf_enable(void)
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_counters == 0)
- perf_set_pmu_inuse(0);
+ ppc_set_pmu_inuse(0);
goto out_enable;
}
@@ -627,7 +624,7 @@ void hw_perf_enable(void)
* bit set and set the hardware counters to their initial values.
* Then unfreeze the counters.
*/
- perf_set_pmu_inuse(1);
+ ppc_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -752,7 +749,7 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
return -EAGAIN;
if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
return -EAGAIN;
- i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
+ i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
if (i < 0)
return -EAGAIN;
cpuhw->n_counters = n0 + n;
@@ -807,7 +804,7 @@ static int power_pmu_enable(struct perf_counter *counter)
cpuhw->flags[n0] = counter->hw.counter_base;
if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
goto out;
- if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
+ if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
goto out;
counter->hw.config = cpuhw->events[n0];
@@ -1012,6 +1009,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
unsigned int cflags[MAX_HWCOUNTERS];
int n;
int err;
+ struct cpu_hw_counters *cpuhw;
if (!ppmu)
return ERR_PTR(-ENXIO);
@@ -1090,7 +1088,11 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
cflags[n] = flags;
if (check_excludes(ctrs, cflags, n, 1))
return ERR_PTR(-EINVAL);
- if (power_check_constraints(events, cflags, n + 1))
+
+ cpuhw = &get_cpu_var(cpu_hw_counters);
+ err = power_check_constraints(cpuhw, events, cflags, n + 1);
+ put_cpu_var(cpu_hw_counters);
+ if (err)
return ERR_PTR(-EINVAL);
counter->hw.config = events[n];
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 892a9f2e6d76..1ea56fdccef4 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -554,18 +554,6 @@ void exit_thread(void)
void flush_thread(void)
{
-#ifdef CONFIG_PPC64
- struct thread_info *t = current_thread_info();
-
- if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
- clear_ti_thread_flag(t, TIF_ABI_PENDING);
- if (test_ti_thread_flag(t, TIF_32BIT))
- clear_ti_thread_flag(t, TIF_32BIT);
- else
- set_ti_thread_flag(t, TIF_32BIT);
- }
-#endif
-
discard_lazy_cpu_state();
if (current->thread.dabr) {
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index c434823b8c83..bf90361bb70f 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -39,6 +39,7 @@
#include <asm/smp.h>
#include <asm/atomic.h>
#include <asm/time.h>
+#include <asm/mmu.h>
struct rtas_t rtas = {
.lock = __RAW_SPIN_LOCK_UNLOCKED
@@ -713,6 +714,7 @@ static void rtas_percpu_suspend_me(void *info)
{
long rc = H_SUCCESS;
unsigned long msr_save;
+ u16 slb_size = mmu_slb_size;
int cpu;
struct rtas_suspend_me_data *data =
(struct rtas_suspend_me_data *)info;
@@ -735,13 +737,16 @@ static void rtas_percpu_suspend_me(void *info)
/* All other cpus are in H_JOIN, this cpu does
* the suspend.
*/
+ slb_set_size(SLB_MIN_SIZE);
printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n",
smp_processor_id());
data->error = rtas_call(data->token, 0, 1, NULL);
- if (data->error)
+ if (data->error) {
printk(KERN_DEBUG "ibm,suspend-me returned %d\n",
data->error);
+ slb_set_size(slb_size);
+ }
} else {
printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
smp_processor_id(), rc);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index f41aec85aa49..956ab33fd73f 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smp.h>
+#include <asm/pmc.h>
#include "cacheinfo.h"
@@ -123,6 +124,8 @@ static DEFINE_PER_CPU(char, pmcs_enabled);
void ppc_enable_pmcs(void)
{
+ ppc_set_pmu_inuse(1);
+
/* Only need to enable them once */
if (__get_cpu_var(pmcs_enabled))
return;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index ea4d64644d02..419f492cb586 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -58,7 +58,7 @@ _GLOBAL(load_up_altivec)
* all 1's
*/
mfspr r4,SPRN_VRSAVE
- cmpdi 0,r4,0
+ cmpwi 0,r4,0
bne+ 1f
li r4,-1
mtspr SPRN_VRSAVE,r4
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 627767d6169b..d8e672567f7e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -30,6 +30,8 @@
#include <asm/tlbflush.h>
#include <asm/tlb.h>
+#include "mmu_decl.h"
+
static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
static unsigned long pte_freelist_forced_free;
@@ -119,7 +121,7 @@ void pte_free_finish(void)
/*
* Handle i/d cache flushing, called from set_pte_at() or ptep_set_access_flags()
*/
-static pte_t do_dcache_icache_coherency(pte_t pte)
+static pte_t do_dcache_icache_coherency(pte_t pte, unsigned long addr)
{
unsigned long pfn = pte_pfn(pte);
struct page *page;
@@ -128,6 +130,17 @@ static pte_t do_dcache_icache_coherency(pte_t pte)
return pte;
page = pfn_to_page(pfn);
+#ifdef CONFIG_8xx
+ /* On 8xx, cache control instructions (particularly
+ * "dcbst" from flush_dcache_icache) fault as write
+ * operation if there is an unpopulated TLB entry
+ * for the address in question. To workaround that,
+ * we invalidate the TLB here, thus avoiding dcbst
+ * misbehaviour.
+ */
+ _tlbil_va(addr, 0 /* 8xx doesn't care about PID */);
+#endif
+
if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)) {
pr_devel("do_dcache_icache_coherency... flushing\n");
flush_dcache_icache_page(page);
@@ -198,7 +211,7 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte
*/
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
if (pte_need_exec_flush(pte, 1))
- pte = do_dcache_icache_coherency(pte);
+ pte = do_dcache_icache_coherency(pte, addr);
/* Perform the setting of the PTE */
__set_pte_at(mm, addr, ptep, pte, 0);
@@ -216,7 +229,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
{
int changed;
if (!dirty && pte_need_exec_flush(entry, 0))
- entry = do_dcache_icache_coherency(entry);
+ entry = do_dcache_icache_coherency(entry, address);
changed = !pte_same(*(ptep), entry);
if (changed) {
if (!(vma->vm_flags & VM_HUGETLB))
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 5b7038f248b6..deb619323f84 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -240,14 +240,22 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
static inline void patch_slb_encoding(unsigned int *insn_addr,
unsigned int immed)
{
- /* Assume the instruction had a "0" immediate value, just
- * "or" in the new value
- */
- *insn_addr |= immed;
+ *insn_addr = (*insn_addr & 0xffff0000) | immed;
flush_icache_range((unsigned long)insn_addr, 4+
(unsigned long)insn_addr);
}
+void slb_set_size(u16 size)
+{
+ extern unsigned int *slb_compare_rr_to_size;
+
+ if (mmu_slb_size == size)
+ return;
+
+ mmu_slb_size = size;
+ patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
+}
+
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 65c585b8b00d..08d94e4cedd3 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -44,14 +44,6 @@
*/
#undef DEBUG_FREQ
-/*
- * There is a problem with the core cpufreq code on SMP kernels,
- * it won't recalculate the Bogomips properly
- */
-#ifdef CONFIG_SMP
-#warning "WARNING, CPUFREQ not recommended on SMP kernels"
-#endif
-
extern void low_choose_7447a_dfs(int dfs);
extern void low_choose_750fx_pll(int pll);
extern void low_sleep_handler(void);
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 21226b74c9b2..414ca9849f23 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -540,8 +540,11 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
/* Make sure IRQ is disabled */
kw_write_reg(reg_ier, 0);
- /* Request chip interrupt */
- if (request_irq(host->irq, kw_i2c_irq, 0, "keywest i2c", host))
+ /* Request chip interrupt. We set IRQF_TIMER because we don't
+ * want that interrupt disabled between the 2 passes of driver
+ * suspend or we'll have issues running the pfuncs
+ */
+ if (request_irq(host->irq, kw_i2c_irq, IRQF_TIMER, "keywest i2c", host))
host->irq = NO_IRQ;
printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 04cdd32624d4..e81403b245b5 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1286,3 +1286,64 @@ static void fixup_k2_sata(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, 0x0240, fixup_k2_sata);
+/*
+ * On U4 (aka CPC945) the PCIe root complex "P2P" bridge resource ranges aren't
+ * configured by the firmware. The bridge itself seems to ignore them but it
+ * causes problems with Linux which then re-assigns devices below the bridge,
+ * thus changing addresses of those devices from what was in the device-tree,
+ * which sucks when those are video cards using offb
+ *
+ * We could just mark it transparent but I prefer fixing up the resources to
+ * properly show what's going on here, as I have some doubts about having them
+ * badly configured potentially being an issue for DMA.
+ *
+ * We leave PIO alone, it seems to be fine
+ *
+ * Oh and there's another funny bug. The OF properties advertize the region
+ * 0xf1000000..0xf1ffffff as being forwarded as memory space. But that's
+ * actually not true, this region is the memory mapped config space. So we
+ * also need to filter it out or we'll map things in the wrong place.
+ */
+static void fixup_u4_pcie(struct pci_dev* dev)
+{
+ struct pci_controller *host = pci_bus_to_host(dev->bus);
+ struct resource *region = NULL;
+ u32 reg;
+ int i;
+
+ /* Only do that on PowerMac */
+ if (!machine_is(powermac))
+ return;
+
+ /* Find the largest MMIO region */
+ for (i = 0; i < 3; i++) {
+ struct resource *r = &host->mem_resources[i];
+ if (!(r->flags & IORESOURCE_MEM))
+ continue;
+ /* Skip the 0xf0xxxxxx..f2xxxxxx regions, we know they
+ * are reserved by HW for other things
+ */
+ if (r->start >= 0xf0000000 && r->start < 0xf3000000)
+ continue;
+ if (!region || (r->end - r->start) >
+ (region->end - region->start))
+ region = r;
+ }
+ /* Nothing found, bail */
+ if (region == 0)
+ return;
+
+ /* Print things out */
+ printk(KERN_INFO "PCI: Fixup U4 PCIe bridge range: %pR\n", region);
+
+ /* Fixup bridge config space. We know it's a Mac, resource aren't
+ * offset so let's just blast them as-is. We also know that they
+ * fit in 32 bits
+ */
+ reg = ((region->start >> 16) & 0xfff0) | (region->end & 0xfff00000);
+ pci_write_config_dword(dev, PCI_MEMORY_BASE, reg);
+ pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0);
+ pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0);
+ pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6d4da7b46b41..937a38e73178 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -408,7 +408,7 @@ static void __init smp_psurge_setup_cpu(int cpu_nr)
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
- if (setup_irq(30, &psurge_irqaction))
+ if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index bf2e1ac41308..1164c3430f2c 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -432,8 +432,6 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
/* Read config space back so we can restore after reset */
read_msi_msg(virq, &msg);
entry->msg = msg;
-
- unmask_msi_irq(virq);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index b6f1b137d427..2e2bbe120b90 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -20,6 +20,7 @@
#include <asm/machdep.h>
#include <asm/uaccess.h>
#include <asm/pSeries_reconfig.h>
+#include <asm/mmu.h>
@@ -439,9 +440,15 @@ static int do_update_property(char *buf, size_t bufsize)
if (!newprop)
return -ENOMEM;
+ if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size"))
+ slb_set_size(*(int *)value);
+
oldprop = of_find_property(np, name,NULL);
- if (!oldprop)
+ if (!oldprop) {
+ if (strlen(name))
+ return prom_add_property(np, newprop);
return -ENODEV;
+ }
rc = prom_update_property(np, newprop, oldprop);
if (rc)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8d75ea21296f..ca5f2e10972c 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -223,10 +223,6 @@ static void pseries_lpar_enable_pmcs(void)
set = 1UL << 63;
reset = 0;
plpar_hcall_norets(H_PERFMON, set, reset);
-
- /* instruct hypervisor to maintain PMCs */
- if (firmware_has_feature(FW_FEATURE_SPLPAR))
- get_lppaca()->pmcregs_in_use = 1;
}
static void __init pseries_discover_pic(void)
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 419f8a637ffe..b9bf0eedccf2 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/radix-tree.h>
#include <linux/cpu.h>
+#include <linux/msi.h>
#include <linux/of.h>
#include <asm/firmware.h>
@@ -219,6 +220,14 @@ static void xics_unmask_irq(unsigned int virq)
static unsigned int xics_startup(unsigned int virq)
{
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (irq_to_desc(virq)->msi_desc)
+ unmask_msi_irq(virq);
+
/* unmask it */
xics_unmask_irq(virq);
return 0;