From 2e62e8bde48c4ddca2359c579ed024d67f897358 Mon Sep 17 00:00:00 2001 From: Ashwin Chaugule Date: Thu, 6 Sep 2012 17:49:31 -0400 Subject: [PATCH] Perf: Restore correct CPU's PMU counters after power collpase Since the L1CC PMU's are per CPU, the variable to detect if a CPU came out of powercollapse also needs to be a per CPU variable. This ensures that we reset and restore the correct CPU's PMU counters. Change-Id: I02273df2eff9f6d88d68f46a7752c107b290a8ef Signed-off-by: Ashwin Chaugule --- arch/arm/include/asm/pmu.h | 1 - arch/arm/kernel/perf_event.c | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 0d0103a4aec..88d0872451b 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h @@ -113,7 +113,6 @@ struct arm_pmu { struct mutex reserve_mutex; u64 max_period; struct platform_device *plat_device; - u32 from_idle; irqreturn_t (*handle_irq)(int irq_num, void *dev); int (*request_pmu_irq)(int irq, irq_handler_t *irq_h); void (*free_pmu_irq)(int irq); diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index e97aef2b4ca..3f6a6d3fc5c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -41,6 +41,7 @@ */ #define ARMPMU_MAX_HWEVENTS 32 +static DEFINE_PER_CPU(u32, from_idle); static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); @@ -602,7 +603,7 @@ static void armpmu_enable(struct pmu *pmu) int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); int idx; - if (armpmu->from_idle) { + if (__get_cpu_var(from_idle)) { for (idx = 0; idx <= cpu_pmu->num_events; ++idx) { struct perf_event *event = hw_events->events[idx]; @@ -613,9 +614,12 @@ static void armpmu_enable(struct pmu *pmu) } /* Reset bit so we don't needlessly re-enable counters.*/ - armpmu->from_idle = 0; + __get_cpu_var(from_idle) = 0; } + /* So we don't start the PMU before enabling counters after idle. */ + barrier(); + if (enabled) armpmu->start(); } @@ -731,7 +735,6 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ - static int __cpuinit pmu_cpu_notify(struct notifier_block *b, unsigned long action, void *hcpu) { @@ -805,7 +808,7 @@ static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, * Flip this bit so armpmu_enable knows it needs * to re-enable active counters. */ - cpu_pmu->from_idle = 1; + __get_cpu_var(from_idle) = 1; cpu_pmu->reset(NULL); perf_pmu_enable(&cpu_pmu->pmu); }