summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-14 13:44:19 +1100
committerPaul Mackerras <paulus@samba.org>2009-01-14 13:44:19 +1100
commit01d0287f068de2934109ba9b989d8807526cccc2 (patch)
tree31e49140ecc61fd158dbd8d4e9f58358d7f84197 /arch/powerpc/kernel/perf_counter.c
parentdd0e6ba22ea21bcc2c420b385a170593c58f4c08 (diff)
powerpc/perf_counter: Make sure PMU gets enabled properly
This makes sure that we call the platform-specific ppc_md.enable_pmcs function on each CPU before we try to use the PMU on that CPU. If the CPU goes off-line and then on-line, we need to do the enable_pmcs call again, so we use the hw_perf_counter_setup hook to ensure that. It gets called as each CPU comes online, but it isn't called on the CPU that is coming up, so this adds the CPU number as an argument to it (there were no non-empty instances of hw_perf_counter_setup before). This also arranges to set the pmcregs_in_use field of the lppaca (data structure shared with the hypervisor) on each CPU when we are using the PMU and clear it when we are not. This allows the hypervisor to optimize partition switches by not saving/restoring the PMU registers when we aren't using the PMU. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r--arch/powerpc/kernel/perf_counter.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index df3fe057dee9..85ad25923c2c 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -15,6 +15,7 @@
#include <linux/hardirq.h>
#include <asm/reg.h>
#include <asm/pmc.h>
+#include <asm/machdep.h>
struct cpu_hw_counters {
int n_counters;
@@ -24,6 +25,7 @@ struct cpu_hw_counters {
struct perf_counter *counter[MAX_HWCOUNTERS];
unsigned int events[MAX_HWCOUNTERS];
u64 mmcr[3];
+ u8 pmcs_enabled;
};
DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
@@ -262,6 +264,15 @@ u64 hw_perf_save_disable(void)
cpuhw->n_added = 0;
/*
+ * Check if we ever enabled the PMU on this cpu.
+ */
+ if (!cpuhw->pmcs_enabled) {
+ if (ppc_md.enable_pmcs)
+ ppc_md.enable_pmcs();
+ cpuhw->pmcs_enabled = 1;
+ }
+
+ /*
* Set the 'freeze counters' bit.
* The barrier is to make sure the mtspr has been
* executed and the PMU has frozen the counters
@@ -305,6 +316,8 @@ void hw_perf_restore(u64 disable)
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, cpuhw->mmcr[0]);
+ if (cpuhw->n_counters == 0)
+ get_lppaca()->pmcregs_in_use = 0;
goto out;
}
@@ -323,6 +336,7 @@ void hw_perf_restore(u64 disable)
* bit set and set the hardware counters to their initial values.
* Then unfreeze the counters.
*/
+ get_lppaca()->pmcregs_in_use = 1;
mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -741,6 +755,14 @@ static void perf_counter_interrupt(struct pt_regs *regs)
}
}
+void hw_perf_counter_setup(int cpu)
+{
+ struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+
+ memset(cpuhw, 0, sizeof(*cpuhw));
+ cpuhw->mmcr[0] = MMCR0_FC;
+}
+
extern struct power_pmu ppc970_pmu;
extern struct power_pmu power6_pmu;