summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 22e194eb8536..139e3c827369 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -79,6 +79,7 @@ struct arm_pmu {
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
+ void (*reset)(void *);
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -204,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event,
static u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx)
+ int idx, int overflow)
{
- int shift = 64 - 32;
- s64 prev_raw_count, new_raw_count;
- u64 delta;
+ u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
@@ -218,8 +217,13 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count << shift) - (prev_raw_count << shift);
- delta >>= shift;
+ new_raw_count &= armpmu->max_period;
+ prev_raw_count &= armpmu->max_period;
+
+ if (overflow)
+ delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
+ else
+ delta = new_raw_count - prev_raw_count;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -236,7 +240,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
}
static void
@@ -254,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -624,6 +628,19 @@ static struct pmu pmu = {
#include "perf_event_v6.c"
#include "perf_event_v7.c"
+/*
+ * Ensure the PMU has sane values out of reset.
+ * This requires SMP to be available, so exists as a separate initcall.
+ */
+static int __init
+armpmu_reset(void)
+{
+ if (armpmu && armpmu->reset)
+ return on_each_cpu(armpmu->reset, NULL, 1);
+ return 0;
+}
+arch_initcall(armpmu_reset);
+
static int __init
init_hw_perf_events(void)
{
@@ -729,7 +746,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
- while (tail && !((unsigned long)tail & 0x3))
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
}