| From 60792ad349f3c6dc5735aafefe5dc9121c79e320 Mon Sep 17 00:00:00 2001 |
| From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> |
| Date: Fri, 18 Dec 2015 10:35:54 +0000 |
| Subject: arm64: kernel: enforce pmuserenr_el0 initialization and restore |
| |
| From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> |
| |
| commit 60792ad349f3c6dc5735aafefe5dc9121c79e320 upstream. |
| |
| The pmuserenr_el0 register value is architecturally UNKNOWN on reset. |
| Current kernel code resets that register value iff the core pmu device is |
| correctly probed in the kernel. On platforms with missing DT pmu nodes (or |
| disabled perf events in the kernel), the pmu is not probed, therefore the |
| pmuserenr_el0 register is not reset in the kernel, which means that its |
| value retains the reset value that is architecturally UNKNOWN (system |
| may run with eg pmuserenr_el0 == 0x1, which means that PMU counters access |
| is available at EL0, which must be disallowed). |
| |
| This patch adds code that resets pmuserenr_el0 on cold boot and restores |
| it on core resume from shutdown, so that the pmuserenr_el0 setup is |
| always enforced in the kernel. |
| |
| Cc: Mark Rutland <mark.rutland@arm.com> |
| Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> |
| Signed-off-by: Will Deacon <will.deacon@arm.com> |
| Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| |
| --- |
| arch/arm64/kernel/perf_event.c | 3 --- |
| arch/arm64/mm/proc.S | 2 ++ |
| 2 files changed, 2 insertions(+), 3 deletions(-) |
| |
| --- a/arch/arm64/kernel/perf_event.c |
| +++ b/arch/arm64/kernel/perf_event.c |
| @@ -1159,9 +1159,6 @@ static void armv8pmu_reset(void *info) |
| |
| /* Initialize & Reset PMNC: C and P bits. */ |
| armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); |
| - |
| - /* Disable access from userspace. */ |
| - asm volatile("msr pmuserenr_el0, %0" :: "r" (0)); |
| } |
| |
| static int armv8_pmuv3_map_event(struct perf_event *event) |
| --- a/arch/arm64/mm/proc.S |
| +++ b/arch/arm64/mm/proc.S |
| @@ -115,6 +115,7 @@ ENTRY(cpu_do_resume) |
| */ |
| ubfx x11, x11, #1, #1 |
| msr oslar_el1, x11 |
| + msr pmuserenr_el0, xzr // Disable PMU access from EL0 |
| mov x0, x12 |
| dsb nsh // Make sure local tlb invalidation completed |
| isb |
| @@ -153,6 +154,7 @@ ENTRY(__cpu_setup) |
| msr cpacr_el1, x0 // Enable FP/ASIMD |
| mov x0, #1 << 12 // Reset mdscr_el1 and disable |
| msr mdscr_el1, x0 // access to the DCC from EL0 |
| + msr pmuserenr_el0, xzr // Disable PMU access from EL0 |
| /* |
| * Memory region attributes for LPAE: |
| * |