| /* |
| * Copyright (C) 2012,2013 - ARM Ltd |
| * Author: Marc Zyngier <marc.zyngier@arm.com> |
| * |
| * Derived from arch/arm/kvm/coproc.c: |
| * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| * Authors: Rusty Russell <rusty@rustcorp.com.au> |
| * Christoffer Dall <c.dall@virtualopensystems.com> |
| * |
| * This program is free software; you can redistribute it and/or modify |
| * it under the terms of the GNU General Public License, version 2, as |
| * published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public License |
| * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| */ |
| |
| #include <linux/bsearch.h> |
| #include <linux/kvm_host.h> |
| #include <linux/mm.h> |
| #include <linux/printk.h> |
| #include <linux/uaccess.h> |
| |
| #include <asm/cacheflush.h> |
| #include <asm/cputype.h> |
| #include <asm/debug-monitors.h> |
| #include <asm/esr.h> |
| #include <asm/kvm_arm.h> |
| #include <asm/kvm_coproc.h> |
| #include <asm/kvm_emulate.h> |
| #include <asm/kvm_host.h> |
| #include <asm/kvm_hyp.h> |
| #include <asm/kvm_mmu.h> |
| #include <asm/perf_event.h> |
| #include <asm/sysreg.h> |
| |
| #include <trace/events/kvm.h> |
| |
| #include "sys_regs.h" |
| |
| #include "trace.h" |
| |
| /* |
| * All of this file is extremly similar to the ARM coproc.c, but the |
| * types are different. My gut feeling is that it should be pretty |
| * easy to merge, but that would be an ABI breakage -- again. VFP |
| * would also need to be abstracted. |
| * |
| * For AArch32, we only take care of what is being trapped. Anything |
| * that has to do with init and userspace access has to go via the |
| * 64bit interface. |
| */ |
| |
| static bool read_from_write_only(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *params, |
| const struct sys_reg_desc *r) |
| { |
| WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n"); |
| print_sys_reg_instr(params); |
| kvm_inject_undefined(vcpu); |
| return false; |
| } |
| |
| static bool write_to_read_only(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *params, |
| const struct sys_reg_desc *r) |
| { |
| WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n"); |
| print_sys_reg_instr(params); |
| kvm_inject_undefined(vcpu); |
| return false; |
| } |
| |
| static u64 tcr_el2_ips_to_tcr_el1_ps(u64 tcr_el2) |
| { |
| return ((tcr_el2 & TCR_EL2_PS_MASK) >> TCR_EL2_PS_SHIFT) |
| << TCR_IPS_SHIFT; |
| } |
| |
| u64 translate_tcr(u64 tcr) |
| { |
| return TCR_EPD1 | /* disable TTBR1_EL1 */ |
| ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) | |
| tcr_el2_ips_to_tcr_el1_ps(tcr) | |
| (tcr & TCR_EL2_TG0_MASK) | |
| (tcr & TCR_EL2_ORGN0_MASK) | |
| (tcr & TCR_EL2_IRGN0_MASK) | |
| (tcr & TCR_EL2_T0SZ_MASK); |
| } |
| |
| u64 translate_cptr(u64 cptr_el2) |
| { |
| u64 cpacr_el1 = 0; |
| |
| if (!(cptr_el2 & CPTR_EL2_TFP)) |
| cpacr_el1 |= CPACR_EL1_FPEN; |
| if (cptr_el2 & CPTR_EL2_TTA) |
| cpacr_el1 |= CPACR_EL1_TTA; |
| if (!(cptr_el2 & CPTR_EL2_TZ)) |
| cpacr_el1 |= CPACR_EL1_ZEN; |
| |
| return cpacr_el1; |
| } |
| |
| u64 translate_sctlr(u64 sctlr) |
| { |
| /* Bit 20 is RES1 in SCTLR_EL1, but RES0 in SCTLR_EL2 */ |
| return sctlr | BIT(20); |
| } |
| |
| u64 translate_ttbr0(u64 ttbr0) |
| { |
| /* Force ASID to 0 (ASID 0 or RES0) */ |
| return ttbr0 & ~GENMASK_ULL(63, 48); |
| } |
| |
| u64 translate_cnthctl(u64 cnthctl) |
| { |
| return ((cnthctl & 0x3) << 10) | (cnthctl & 0xfc); |
| } |
| |
| u64 translate_spsr(struct kvm_vcpu *vcpu, u64 spsr) |
| { |
| u64 reg = read_sysreg_el1(spsr); |
| |
| /* |
| * SPSR.M == 0 isn't really possible, so if the CPU's copy of |
| * SPSR_EL1 has this "tag", we know that the CPU hasn't touched |
| * it, so the currently stored EL2 copy is the proper value. |
| */ |
| if ((reg & 0xf) == 0) |
| return spsr; |
| |
| /* |
| * Otherwise there was a "local" exception on the CPU, which from |
| * the guest's point of view was being taken from EL2 to EL2, although |
| * it actually happened to be from EL1 to EL1. |
| * So we need to fix the .M field in SPSR, to make it look like EL2, |
| * which is what the guest would expect. |
| */ |
| return (reg & ~0x0c) | CurrentEL_EL2; |
| } |
| |
| #define EL2_SYSREG(el2, el1, translate) \ |
| [el2 - FIRST_EL2_SYSREG] = { el2, el1, translate } |
| #define PURE_EL2_SYSREG(el2) \ |
| [el2 - FIRST_EL2_SYSREG] = { el2,__INVALID_SYSREG__, NULL } |
| /* |
| * Associate vEL2 registers to their EL1 counterparts on the CPU. |
| * The translate function can be NULL, when the register layout is identical. |
| */ |
| struct el2_sysreg_map { |
| int sysreg; /* EL2 register index into the array above */ |
| int mapping; /* associated EL1 register */ |
| u64 (*translate)(u64 value); |
| } nested_sysreg_map[NR_SYS_REGS - FIRST_EL2_SYSREG] = { |
| PURE_EL2_SYSREG( VPIDR_EL2 ), |
| PURE_EL2_SYSREG( VMPIDR_EL2 ), |
| PURE_EL2_SYSREG( ACTLR_EL2 ), |
| PURE_EL2_SYSREG( HCR_EL2 ), |
| PURE_EL2_SYSREG( MDCR_EL2 ), |
| PURE_EL2_SYSREG( HSTR_EL2 ), |
| PURE_EL2_SYSREG( HACR_EL2 ), |
| PURE_EL2_SYSREG( VTTBR_EL2 ), |
| PURE_EL2_SYSREG( VTCR_EL2 ), |
| PURE_EL2_SYSREG( RVBAR_EL2 ), |
| PURE_EL2_SYSREG( RMR_EL2 ), |
| PURE_EL2_SYSREG( TPIDR_EL2 ), |
| PURE_EL2_SYSREG( CNTVOFF_EL2 ), |
| PURE_EL2_SYSREG( CNTHCTL_EL2 ), |
| PURE_EL2_SYSREG( HPFAR_EL2 ), |
| EL2_SYSREG( SCTLR_EL2, SCTLR_EL1, translate_sctlr ), |
| EL2_SYSREG( CPTR_EL2, CPACR_EL1, translate_cptr ), |
| EL2_SYSREG( TTBR0_EL2, TTBR0_EL1, translate_ttbr0 ), |
| EL2_SYSREG( TTBR1_EL2, TTBR1_EL1, NULL ), |
| EL2_SYSREG( TCR_EL2, TCR_EL1, translate_tcr ), |
| EL2_SYSREG( VBAR_EL2, VBAR_EL1, NULL ), |
| EL2_SYSREG( AFSR0_EL2, AFSR0_EL1, NULL ), |
| EL2_SYSREG( AFSR1_EL2, AFSR1_EL1, NULL ), |
| EL2_SYSREG( ESR_EL2, ESR_EL1, NULL ), |
| EL2_SYSREG( FAR_EL2, FAR_EL1, NULL ), |
| EL2_SYSREG( MAIR_EL2, MAIR_EL1, NULL ), |
| EL2_SYSREG( AMAIR_EL2, AMAIR_EL1, NULL ), |
| }; |
| |
| static |
| const struct el2_sysreg_map *find_el2_sysreg(const struct el2_sysreg_map *map, |
| int reg) |
| { |
| const struct el2_sysreg_map *entry; |
| |
| if (!sysreg_is_el2(reg)) |
| return NULL; |
| |
| entry = &nested_sysreg_map[reg - FIRST_EL2_SYSREG]; |
| if (entry->sysreg == __INVALID_SYSREG__) |
| return NULL; |
| |
| return entry; |
| } |
| |
| u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg) |
| { |
| |
| if (!vcpu->arch.sysregs_loaded_on_cpu) |
| goto immediate_read; |
| |
| if (unlikely(sysreg_is_el2(reg))) { |
| const struct el2_sysreg_map *el2_reg; |
| |
| if (!vcpu_mode_el2(vcpu)) |
| goto immediate_read; |
| |
| el2_reg = find_el2_sysreg(nested_sysreg_map, reg); |
| if (el2_reg) { |
| /* |
| * If this register does not have an EL1 counterpart, |
| * then read the stored EL2 version. |
| */ |
| if (el2_reg->mapping == __INVALID_SYSREG__) |
| goto immediate_read; |
| |
| /* Get the current version of the EL1 counterpart. */ |
| reg = el2_reg->mapping; |
| } |
| } else { |
| /* EL1 register can't be on the CPU if the guest is in vEL2. */ |
| if (unlikely(vcpu_mode_el2(vcpu))) |
| goto immediate_read; |
| } |
| |
| /* |
| * System registers listed in the switch are not saved on every |
| * exit from the guest but are only saved on vcpu_put. |
| * |
| * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but |
| * should never be listed below, because the guest cannot modify its |
| * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's |
| * thread when emulating cross-VCPU communication. |
| */ |
| switch (reg) { |
| case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1); |
| case SCTLR_EL1: return read_sysreg_s(sctlr_EL12); |
| case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1); |
| case CPACR_EL1: return read_sysreg_s(cpacr_EL12); |
| case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12); |
| case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12); |
| case TCR_EL1: return read_sysreg_s(tcr_EL12); |
| case ESR_EL1: return read_sysreg_s(esr_EL12); |
| case AFSR0_EL1: return read_sysreg_s(afsr0_EL12); |
| case AFSR1_EL1: return read_sysreg_s(afsr1_EL12); |
| case FAR_EL1: return read_sysreg_s(far_EL12); |
| case MAIR_EL1: return read_sysreg_s(mair_EL12); |
| case VBAR_EL1: return read_sysreg_s(vbar_EL12); |
| case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12); |
| case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0); |
| case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0); |
| case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1); |
| case AMAIR_EL1: return read_sysreg_s(amair_EL12); |
| case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12); |
| case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1); |
| case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2); |
| case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2); |
| case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2); |
| case SP_EL2: return read_sysreg(sp_el1); |
| case ELR_EL2: return read_sysreg_el1(elr); |
| } |
| |
| immediate_read: |
| return __vcpu_sys_reg(vcpu, reg); |
| } |
| |
| void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg) |
| { |
| if (!vcpu->arch.sysregs_loaded_on_cpu) |
| goto immediate_write; |
| |
| if (unlikely(sysreg_is_el2(reg))) { |
| const struct el2_sysreg_map *el2_reg; |
| |
| if (!vcpu_mode_el2(vcpu)) |
| goto immediate_write; |
| |
| /* Store the EL2 version in the sysregs array. */ |
| __vcpu_sys_reg(vcpu, reg) = val; |
| |
| el2_reg = find_el2_sysreg(nested_sysreg_map, reg); |
| if (el2_reg) { |
| /* Does this register have an EL1 counterpart? */ |
| if (el2_reg->mapping == __INVALID_SYSREG__) |
| return; |
| |
| if (!vcpu_el2_e2h_is_set(&vcpu->arch.ctxt) && |
| el2_reg->translate) |
| val = el2_reg->translate(val); |
| |
| /* Redirect this to the EL1 version of the register. */ |
| reg = el2_reg->mapping; |
| } |
| } else { |
| /* EL1 register can't be on the CPU if the guest is in vEL2. */ |
| if (unlikely(vcpu_mode_el2(vcpu))) |
| goto immediate_write; |
| } |
| |
| /* |
| * System registers listed in the switch are not restored on every |
| * entry to the guest but are only restored on vcpu_load. |
| * |
| * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but |
| * should never be listed below, because the the MPIDR should only be |
| * set once, before running the VCPU, and never changed later. |
| */ |
| switch (reg) { |
| case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return; |
| case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return; |
| case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return; |
| case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return; |
| case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return; |
| case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return; |
| case TCR_EL1: write_sysreg_s(val, tcr_EL12); return; |
| case ESR_EL1: write_sysreg_s(val, esr_EL12); return; |
| case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return; |
| case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return; |
| case FAR_EL1: write_sysreg_s(val, far_EL12); return; |
| case MAIR_EL1: write_sysreg_s(val, mair_EL12); return; |
| case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return; |
| case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return; |
| case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return; |
| case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return; |
| case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return; |
| case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return; |
| case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return; |
| case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return; |
| case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return; |
| case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return; |
| case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return; |
| case SP_EL2: write_sysreg(val, sp_el1); return; |
| case ELR_EL2: write_sysreg_el1(val, elr); return; |
| } |
| |
| immediate_write: |
| __vcpu_sys_reg(vcpu, reg) = val; |
| } |
| |
| /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ |
| static u32 cache_levels; |
| |
| /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ |
| #define CSSELR_MAX 12 |
| |
| /* Which cache CCSIDR represents depends on CSSELR value. */ |
| static u32 get_ccsidr(u32 csselr) |
| { |
| u32 ccsidr; |
| |
| /* Make sure noone else changes CSSELR during this! */ |
| local_irq_disable(); |
| write_sysreg(csselr, csselr_el1); |
| isb(); |
| ccsidr = read_sysreg(ccsidr_el1); |
| local_irq_enable(); |
| |
| return ccsidr; |
| } |
| |
| static bool el12_reg(struct sys_reg_params *p) |
| { |
| /* All *_EL12 registers have Op1=5. */ |
| return (p->Op1 == 5); |
| } |
| |
| static bool access_rw(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (el12_reg(p) && forward_nv_traps(vcpu)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (p->is_write) |
| vcpu_write_sys_reg(vcpu, p->regval, r->reg); |
| else |
| p->regval = vcpu_read_sys_reg(vcpu, r->reg); |
| |
| return true; |
| } |
| |
| /* |
| * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
| */ |
| static bool access_dcsw(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (!p->is_write) |
| return read_from_write_only(vcpu, p, r); |
| |
| /* |
| * Only track S/W ops if we don't have FWB. It still indicates |
| * that the guest is a bit broken (S/W operations should only |
| * be done by firmware, knowing that there is only a single |
| * CPU left in the system, and certainly not from non-secure |
| * software). |
| */ |
| if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) |
| kvm_set_way_flush(vcpu); |
| |
| return true; |
| } |
| |
| /* This function is to support the recursive nested virtualization */ |
| static bool forward_vm_traps(struct kvm_vcpu *vcpu, struct sys_reg_params *p) |
| { |
| u64 hcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2); |
| |
| /* If a trap comes from the virtual EL2, the host hypervisor handles. */ |
| if (vcpu_mode_el2(vcpu)) |
| return false; |
| |
| /* |
| * If the virtual HCR_EL2.TVM or TRVM bit is set, we need to foward |
| * this trap to the virtual EL2. |
| */ |
| if ((hcr_el2 & HCR_TVM) && p->is_write) |
| return true; |
| else if ((hcr_el2 & HCR_TRVM) && !p->is_write) |
| return true; |
| |
| return false; |
| } |
| |
| /* |
| * Generic accessor for VM registers. Only called as long as HCR_TVM |
| * is set. If the guest enables the MMU, we stop trapping the VM |
| * sys_regs and leave it in complete control of the caches. |
| */ |
| static bool access_vm_reg(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| bool was_enabled = vcpu_has_cache_enabled(vcpu); |
| u64 val; |
| int reg = r->reg; |
| |
| if (el12_reg(p) && forward_nv_traps(vcpu)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (!el12_reg(p) && forward_vm_traps(vcpu, p)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| BUG_ON(!vcpu_mode_el2(vcpu) && !p->is_write); |
| |
| if (!p->is_write) { |
| p->regval = vcpu_read_sys_reg(vcpu, reg); |
| return true; |
| } |
| |
| /* See the 32bit mapping in kvm_host.h */ |
| if (p->is_aarch32) |
| reg = r->reg / 2; |
| |
| if (!p->is_aarch32 || !p->is_32bit) { |
| val = p->regval; |
| } else { |
| val = vcpu_read_sys_reg(vcpu, reg); |
| if (r->reg % 2) |
| val = (p->regval << 32) | (u64)lower_32_bits(val); |
| else |
| val = ((u64)upper_32_bits(val) << 32) | |
| lower_32_bits(p->regval); |
| } |
| vcpu_write_sys_reg(vcpu, val, reg); |
| |
| kvm_toggle_cache(vcpu, was_enabled); |
| return true; |
| } |
| |
| /* |
| * Trap handler for the GICv3 SGI generation system register. |
| * Forward the request to the VGIC emulation. |
| * The cp15_64 code makes sure this automatically works |
| * for both AArch64 and AArch32 accesses. |
| */ |
| static bool access_gic_sgi(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| bool g1; |
| |
| if (!p->is_write) |
| return read_from_write_only(vcpu, p, r); |
| |
| /* |
| * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates |
| * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group, |
| * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively |
| * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure |
| * group. |
| */ |
| if (p->is_aarch32) { |
| switch (p->Op1) { |
| default: /* Keep GCC quiet */ |
| case 0: /* ICC_SGI1R */ |
| g1 = true; |
| break; |
| case 1: /* ICC_ASGI1R */ |
| case 2: /* ICC_SGI0R */ |
| g1 = false; |
| break; |
| } |
| } else { |
| switch (p->Op2) { |
| default: /* Keep GCC quiet */ |
| case 5: /* ICC_SGI1R_EL1 */ |
| g1 = true; |
| break; |
| case 6: /* ICC_ASGI1R_EL1 */ |
| case 7: /* ICC_SGI0R_EL1 */ |
| g1 = false; |
| break; |
| } |
| } |
| |
| vgic_v3_dispatch_sgi(vcpu, p->regval, g1); |
| |
| return true; |
| } |
| |
| static bool access_gic_sre(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (p->is_write) |
| return ignore_write(vcpu, p); |
| |
| p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; |
| return true; |
| } |
| |
| static bool trap_raz_wi(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (p->is_write) |
| return ignore_write(vcpu, p); |
| else |
| return read_zero(vcpu, p); |
| } |
| |
| static bool trap_undef(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| kvm_inject_undefined(vcpu); |
| return false; |
| } |
| |
| static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (p->is_write) { |
| return ignore_write(vcpu, p); |
| } else { |
| p->regval = (1 << 3); |
| return true; |
| } |
| } |
| |
| static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (p->is_write) { |
| return ignore_write(vcpu, p); |
| } else { |
| p->regval = read_sysreg(dbgauthstatus_el1); |
| return true; |
| } |
| } |
| |
| /* |
| * We want to avoid world-switching all the DBG registers all the |
| * time: |
| * |
| * - If we've touched any debug register, it is likely that we're |
| * going to touch more of them. It then makes sense to disable the |
| * traps and start doing the save/restore dance |
| * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is |
| * then mandatory to save/restore the registers, as the guest |
| * depends on them. |
| * |
| * For this, we use a DIRTY bit, indicating the guest has modified the |
| * debug registers, used as follow: |
| * |
| * On guest entry: |
| * - If the dirty bit is set (because we're coming back from trapping), |
| * disable the traps, save host registers, restore guest registers. |
| * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), |
| * set the dirty bit, disable the traps, save host registers, |
| * restore guest registers. |
| * - Otherwise, enable the traps |
| * |
| * On guest exit: |
| * - If the dirty bit is set, save guest registers, restore host |
| * registers and clear the dirty bit. This ensure that the host can |
| * now use the debug registers. |
| */ |
| static bool trap_debug_regs(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| access_rw(vcpu, p, r); |
| if (p->is_write) |
| vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; |
| |
| trace_trap_reg(__func__, r->reg, p->is_write, p->regval); |
| |
| return true; |
| } |
| |
| /* |
| * reg_to_dbg/dbg_to_reg |
| * |
| * A 32 bit write to a debug register leave top bits alone |
| * A 32 bit read from a debug register only returns the bottom bits |
| * |
| * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the |
| * hyp.S code switches between host and guest values in future. |
| */ |
| static void reg_to_dbg(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| u64 *dbg_reg) |
| { |
| u64 val = p->regval; |
| |
| if (p->is_32bit) { |
| val &= 0xffffffffUL; |
| val |= ((*dbg_reg >> 32) << 32); |
| } |
| |
| *dbg_reg = val; |
| vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; |
| } |
| |
| static void dbg_to_reg(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| u64 *dbg_reg) |
| { |
| p->regval = *dbg_reg; |
| if (p->is_32bit) |
| p->regval &= 0xffffffffUL; |
| } |
| |
| static bool trap_bvr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *rd) |
| { |
| u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; |
| |
| if (p->is_write) |
| reg_to_dbg(vcpu, p, dbg_reg); |
| else |
| dbg_to_reg(vcpu, p, dbg_reg); |
| |
| trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); |
| |
| return true; |
| } |
| |
| static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; |
| |
| if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; |
| |
| if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static void reset_bvr(struct kvm_vcpu *vcpu, |
| const struct sys_reg_desc *rd) |
| { |
| vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; |
| } |
| |
| static bool trap_bcr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *rd) |
| { |
| u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; |
| |
| if (p->is_write) |
| reg_to_dbg(vcpu, p, dbg_reg); |
| else |
| dbg_to_reg(vcpu, p, dbg_reg); |
| |
| trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); |
| |
| return true; |
| } |
| |
| static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; |
| |
| if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| |
| return 0; |
| } |
| |
| static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; |
| |
| if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static void reset_bcr(struct kvm_vcpu *vcpu, |
| const struct sys_reg_desc *rd) |
| { |
| vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; |
| } |
| |
| static bool trap_wvr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *rd) |
| { |
| u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; |
| |
| if (p->is_write) |
| reg_to_dbg(vcpu, p, dbg_reg); |
| else |
| dbg_to_reg(vcpu, p, dbg_reg); |
| |
| trace_trap_reg(__func__, rd->reg, p->is_write, |
| vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); |
| |
| return true; |
| } |
| |
| static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; |
| |
| if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; |
| |
| if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static void reset_wvr(struct kvm_vcpu *vcpu, |
| const struct sys_reg_desc *rd) |
| { |
| vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; |
| } |
| |
| static bool trap_wcr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *rd) |
| { |
| u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; |
| |
| if (p->is_write) |
| reg_to_dbg(vcpu, p, dbg_reg); |
| else |
| dbg_to_reg(vcpu, p, dbg_reg); |
| |
| trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); |
| |
| return true; |
| } |
| |
| static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; |
| |
| if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; |
| |
| if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static void reset_wcr(struct kvm_vcpu *vcpu, |
| const struct sys_reg_desc *rd) |
| { |
| vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; |
| } |
| |
| static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
| { |
| u64 amair = read_sysreg(amair_el1); |
| vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1); |
| } |
| |
| static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
| { |
| u64 mpidr; |
| |
| /* |
| * Map the vcpu_id into the first three affinity level fields of |
| * the MPIDR. We limit the number of VCPUs in level 0 due to a |
| * limitation to 16 CPUs in that level in the ICC_SGIxR registers |
| * of the GICv3 to be able to address each CPU directly when |
| * sending IPIs. |
| */ |
| mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); |
| mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); |
| mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); |
| vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1); |
| } |
| |
| static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
| { |
| u64 pmcr, val; |
| |
| pmcr = read_sysreg(pmcr_el0); |
| /* |
| * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN |
| * except PMCR.E resetting to zero. |
| */ |
| val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) |
| | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); |
| __vcpu_sys_reg(vcpu, PMCR_EL0) = val; |
| } |
| |
| static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags) |
| { |
| u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0); |
| bool enabled = (reg & flags) || vcpu_mode_priv(vcpu); |
| |
| if (!enabled) |
| kvm_inject_undefined(vcpu); |
| |
| return !enabled; |
| } |
| |
| static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu) |
| { |
| return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN); |
| } |
| |
| static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu) |
| { |
| return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN); |
| } |
| |
| static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu) |
| { |
| return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN); |
| } |
| |
| static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu) |
| { |
| return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN); |
| } |
| |
| static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 val; |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (pmu_access_el0_disabled(vcpu)) |
| return false; |
| |
| if (p->is_write) { |
| /* Only update writeable bits of PMCR */ |
| val = __vcpu_sys_reg(vcpu, PMCR_EL0); |
| val &= ~ARMV8_PMU_PMCR_MASK; |
| val |= p->regval & ARMV8_PMU_PMCR_MASK; |
| __vcpu_sys_reg(vcpu, PMCR_EL0) = val; |
| kvm_pmu_handle_pmcr(vcpu, val); |
| } else { |
| /* PMCR.P & PMCR.C are RAZ */ |
| val = __vcpu_sys_reg(vcpu, PMCR_EL0) |
| & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); |
| p->regval = val; |
| } |
| |
| return true; |
| } |
| |
| static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (pmu_access_event_counter_el0_disabled(vcpu)) |
| return false; |
| |
| if (p->is_write) |
| __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; |
| else |
| /* return PMSELR.SEL field */ |
| p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0) |
| & ARMV8_PMU_COUNTER_MASK; |
| |
| return true; |
| } |
| |
| static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 pmceid; |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| BUG_ON(p->is_write); |
| |
| if (pmu_access_el0_disabled(vcpu)) |
| return false; |
| |
| if (!(p->Op2 & 1)) |
| pmceid = read_sysreg(pmceid0_el0); |
| else |
| pmceid = read_sysreg(pmceid1_el0); |
| |
| p->regval = pmceid; |
| |
| return true; |
| } |
| |
| static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) |
| { |
| u64 pmcr, val; |
| |
| pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); |
| val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; |
| if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { |
| kvm_inject_undefined(vcpu); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 idx; |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (r->CRn == 9 && r->CRm == 13) { |
| if (r->Op2 == 2) { |
| /* PMXEVCNTR_EL0 */ |
| if (pmu_access_event_counter_el0_disabled(vcpu)) |
| return false; |
| |
| idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) |
| & ARMV8_PMU_COUNTER_MASK; |
| } else if (r->Op2 == 0) { |
| /* PMCCNTR_EL0 */ |
| if (pmu_access_cycle_counter_el0_disabled(vcpu)) |
| return false; |
| |
| idx = ARMV8_PMU_CYCLE_IDX; |
| } else { |
| return false; |
| } |
| } else if (r->CRn == 0 && r->CRm == 9) { |
| /* PMCCNTR */ |
| if (pmu_access_event_counter_el0_disabled(vcpu)) |
| return false; |
| |
| idx = ARMV8_PMU_CYCLE_IDX; |
| } else if (r->CRn == 14 && (r->CRm & 12) == 8) { |
| /* PMEVCNTRn_EL0 */ |
| if (pmu_access_event_counter_el0_disabled(vcpu)) |
| return false; |
| |
| idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); |
| } else { |
| return false; |
| } |
| |
| if (!pmu_counter_idx_valid(vcpu, idx)) |
| return false; |
| |
| if (p->is_write) { |
| if (pmu_access_el0_disabled(vcpu)) |
| return false; |
| |
| kvm_pmu_set_counter_value(vcpu, idx, p->regval); |
| } else { |
| p->regval = kvm_pmu_get_counter_value(vcpu, idx); |
| } |
| |
| return true; |
| } |
| |
| static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 idx, reg; |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (pmu_access_el0_disabled(vcpu)) |
| return false; |
| |
| if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) { |
| /* PMXEVTYPER_EL0 */ |
| idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK; |
| reg = PMEVTYPER0_EL0 + idx; |
| } else if (r->CRn == 14 && (r->CRm & 12) == 12) { |
| idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); |
| if (idx == ARMV8_PMU_CYCLE_IDX) |
| reg = PMCCFILTR_EL0; |
| else |
| /* PMEVTYPERn_EL0 */ |
| reg = PMEVTYPER0_EL0 + idx; |
| } else { |
| BUG(); |
| } |
| |
| if (!pmu_counter_idx_valid(vcpu, idx)) |
| return false; |
| |
| if (p->is_write) { |
| kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); |
| __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; |
| } else { |
| p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; |
| } |
| |
| return true; |
| } |
| |
| static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 val, mask; |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (pmu_access_el0_disabled(vcpu)) |
| return false; |
| |
| mask = kvm_pmu_valid_counter_mask(vcpu); |
| if (p->is_write) { |
| val = p->regval & mask; |
| if (r->Op2 & 0x1) { |
| /* accessing PMCNTENSET_EL0 */ |
| __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; |
| kvm_pmu_enable_counter(vcpu, val); |
| } else { |
| /* accessing PMCNTENCLR_EL0 */ |
| __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; |
| kvm_pmu_disable_counter(vcpu, val); |
| } |
| } else { |
| p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask; |
| } |
| |
| return true; |
| } |
| |
| static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 mask = kvm_pmu_valid_counter_mask(vcpu); |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (!vcpu_mode_priv(vcpu)) { |
| kvm_inject_undefined(vcpu); |
| return false; |
| } |
| |
| if (p->is_write) { |
| u64 val = p->regval & mask; |
| |
| if (r->Op2 & 0x1) |
| /* accessing PMINTENSET_EL1 */ |
| __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val; |
| else |
| /* accessing PMINTENCLR_EL1 */ |
| __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val; |
| } else { |
| p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask; |
| } |
| |
| return true; |
| } |
| |
| static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 mask = kvm_pmu_valid_counter_mask(vcpu); |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (pmu_access_el0_disabled(vcpu)) |
| return false; |
| |
| if (p->is_write) { |
| if (r->CRm & 0x2) |
| /* accessing PMOVSSET_EL0 */ |
| __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask); |
| else |
| /* accessing PMOVSCLR_EL0 */ |
| __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask); |
| } else { |
| p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask; |
| } |
| |
| return true; |
| } |
| |
| static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 mask; |
| |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (!p->is_write) |
| return read_from_write_only(vcpu, p, r); |
| |
| if (pmu_write_swinc_el0_disabled(vcpu)) |
| return false; |
| |
| mask = kvm_pmu_valid_counter_mask(vcpu); |
| kvm_pmu_software_increment(vcpu, p->regval & mask); |
| return true; |
| } |
| |
| static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (!kvm_arm_pmu_v3_ready(vcpu)) |
| return trap_raz_wi(vcpu, p, r); |
| |
| if (p->is_write) { |
| if (!vcpu_mode_priv(vcpu)) { |
| kvm_inject_undefined(vcpu); |
| return false; |
| } |
| |
| __vcpu_sys_reg(vcpu, PMUSERENR_EL0) = |
| p->regval & ARMV8_PMU_USERENR_MASK; |
| } else { |
| p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0) |
| & ARMV8_PMU_USERENR_MASK; |
| } |
| |
| return true; |
| } |
| |
| /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ |
| #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ |
| { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ |
| trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ |
| { SYS_DESC(SYS_DBGBCRn_EL1(n)), \ |
| trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ |
| { SYS_DESC(SYS_DBGWVRn_EL1(n)), \ |
| trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ |
| { SYS_DESC(SYS_DBGWCRn_EL1(n)), \ |
| trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } |
| |
| /* Macro to expand the PMEVCNTRn_EL0 register */ |
| #define PMU_PMEVCNTR_EL0(n) \ |
| { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \ |
| access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), } |
| |
| /* Macro to expand the PMEVTYPERn_EL0 register */ |
| #define PMU_PMEVTYPER_EL0(n) \ |
| { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ |
| access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } |
| |
| #define reg_to_match_value(x) \ |
| ({ \ |
| unsigned long val; \ |
| val = (x)->Op0 << 14; \ |
| val |= (x)->Op1 << 11; \ |
| val |= (x)->CRn << 7; \ |
| val |= (x)->CRm << 3; \ |
| val |= (x)->Op2; \ |
| val; \ |
| }) |
| |
| static bool access_arch_timer(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 reg = reg_to_match_value(p); |
| |
| if (p->is_write) |
| kvm_arm_timer_set_reg(vcpu, reg, p->regval); |
| else |
| p->regval = kvm_arm_timer_get_reg(vcpu, reg); |
| |
| return true; |
| } |
| |
| /* Read a sanitised cpufeature ID register by sys_reg_desc */ |
| static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) |
| { |
| u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, |
| (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); |
| u64 val = raz ? 0 : read_sanitised_ftr_reg(id); |
| |
| if (id == SYS_ID_AA64PFR0_EL1) { |
| if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) |
| kvm_debug("SVE unsupported for guests, suppressing\n"); |
| |
| val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); |
| } else if (id == SYS_ID_AA64MMFR1_EL1) { |
| if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) |
| kvm_debug("LORegions unsupported for guests, suppressing\n"); |
| |
| val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); |
| } |
| |
| return val; |
| } |
| |
| /* cpufeature ID register access trap handlers */ |
| |
| static bool __access_id_reg(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r, |
| bool raz) |
| { |
| if (p->is_write) |
| return write_to_read_only(vcpu, p, r); |
| |
| p->regval = read_id_reg(r, raz); |
| return true; |
| } |
| |
| static bool access_id_reg(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| return __access_id_reg(vcpu, p, r, false); |
| } |
| |
| static bool access_raz_id_reg(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| return __access_id_reg(vcpu, p, r, true); |
| } |
| |
| static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); |
| static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); |
| static u64 sys_reg_to_index(const struct sys_reg_desc *reg); |
| |
| /* |
| * cpufeature ID register user accessors |
| * |
| * For now, these registers are immutable for userspace, so no values |
| * are stored, and for set_id_reg() we don't allow the effective value |
| * to be changed. |
| */ |
| static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, |
| bool raz) |
| { |
| const u64 id = sys_reg_to_index(rd); |
| const u64 val = read_id_reg(rd, raz); |
| |
| return reg_to_user(uaddr, &val, id); |
| } |
| |
| static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, |
| bool raz) |
| { |
| const u64 id = sys_reg_to_index(rd); |
| int err; |
| u64 val; |
| |
| err = reg_from_user(&val, uaddr, id); |
| if (err) |
| return err; |
| |
| /* This is what we mean by invariant: you can't change it. */ |
| if (val != read_id_reg(rd, raz)) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| return __get_id_reg(rd, uaddr, false); |
| } |
| |
| static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| return __set_id_reg(rd, uaddr, false); |
| } |
| |
| static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| return __get_id_reg(rd, uaddr, true); |
| } |
| |
| static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
| const struct kvm_one_reg *reg, void __user *uaddr) |
| { |
| return __set_id_reg(rd, uaddr, true); |
| } |
| |
| /* sys_reg_desc initialiser for known cpufeature ID registers */ |
| #define ID_SANITISED(name) { \ |
| SYS_DESC(SYS_##name), \ |
| .access = access_id_reg, \ |
| .get_user = get_id_reg, \ |
| .set_user = set_id_reg, \ |
| } |
| |
| /* |
| * sys_reg_desc initialiser for architecturally unallocated cpufeature ID |
| * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 |
| * (1 <= crm < 8, 0 <= Op2 < 8). |
| */ |
| #define ID_UNALLOCATED(crm, op2) { \ |
| Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \ |
| .access = access_raz_id_reg, \ |
| .get_user = get_raz_id_reg, \ |
| .set_user = set_raz_id_reg, \ |
| } |
| |
| /* |
| * sys_reg_desc initialiser for known ID registers that we hide from guests. |
| * For now, these are exposed just like unallocated ID regs: they appear |
| * RAZ for the guest. |
| */ |
| #define ID_HIDDEN(name) { \ |
| SYS_DESC(SYS_##name), \ |
| .access = access_raz_id_reg, \ |
| .get_user = get_raz_id_reg, \ |
| .set_user = set_raz_id_reg, \ |
| } |
| |
| static bool access_sp_el1(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| /* SP_EL1 is NOT maintained in sys_regs array */ |
| if (p->is_write) |
| vcpu->arch.ctxt.gp_regs.sp_el1 = p->regval; |
| else |
| p->regval = vcpu->arch.ctxt.gp_regs.sp_el1; |
| |
| return true; |
| } |
| |
| static bool forward_traps(struct kvm_vcpu *vcpu, u64 control_bit) |
| { |
| bool control_bit_set; |
| |
| control_bit_set = __vcpu_sys_reg(vcpu, HCR_EL2) & control_bit; |
| if (!vcpu_mode_el2(vcpu) && control_bit_set) { |
| kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| return true; |
| } |
| return false; |
| } |
| |
| static bool forward_at_traps(struct kvm_vcpu *vcpu) |
| { |
| return forward_traps(vcpu, HCR_AT); |
| } |
| |
| static bool forward_ttlb_traps(struct kvm_vcpu *vcpu) |
| { |
| return forward_traps(vcpu, HCR_TTLB); |
| } |
| |
| /* This function is to support the recursive nested virtualization */ |
| bool forward_nv_traps(struct kvm_vcpu *vcpu) |
| { |
| if (!vcpu_mode_el2(vcpu) && (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV)) |
| return true; |
| return false; |
| } |
| |
| /* This function is to support the recursive nested virtualization */ |
| static bool forward_nv1_traps(struct kvm_vcpu *vcpu, struct sys_reg_params *p) |
| { |
| if (!vcpu_mode_el2(vcpu) && (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV1)) |
| return true; |
| |
| return false; |
| } |
| |
| static bool access_elr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (el12_reg(p) && forward_nv_traps(vcpu)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (!el12_reg(p) && forward_nv1_traps(vcpu, p)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (p->is_write) |
| vcpu->arch.ctxt.gp_regs.elr_el1 = p->regval; |
| else |
| p->regval = vcpu->arch.ctxt.gp_regs.elr_el1; |
| |
| return true; |
| } |
| |
| static bool access_spsr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (el12_reg(p) && forward_nv_traps(vcpu)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (!el12_reg(p) && forward_nv1_traps(vcpu, p)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (p->is_write) |
| vcpu->arch.ctxt.gp_regs.spsr[KVM_SPSR_EL1] = p->regval; |
| else |
| p->regval = vcpu->arch.ctxt.gp_regs.spsr[KVM_SPSR_EL1]; |
| |
| return true; |
| } |
| |
| static bool access_spsr_el2(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (el12_reg(p) && forward_nv_traps(vcpu)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (!el12_reg(p) && forward_nv1_traps(vcpu, p)) |
| return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu)); |
| |
| if (p->is_write) |
| vcpu_write_spsr_el2(vcpu, p->regval); |
| else |
| p->regval = vcpu_read_spsr_el2(vcpu); |
| |
| return true; |
| } |
| |
| static bool access_id_aa64mmfr0_el1(struct kvm_vcpu *v, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 val; |
| u64 vtcr_tg0 = VTCR_EL2_TGRAN_FLAGS & VTCR_EL2_TG0_MASK; |
| |
| if (!nested_virt_in_use(v)) |
| return access_id_reg(v, p, r); |
| |
| if (p->is_write) |
| return write_to_read_only(v, p, r); |
| |
| val = p->regval; |
| /* |
| * Don't expose granules smaller than the host's granule to the guest. |
| * We can theoretically support a guest hypervisor having |
| * smaller-than-host granularities but it is not worth it since it |
| * makes the implementation complicated and it would waste memory. |
| */ |
| switch (vtcr_tg0) { |
| case VTCR_EL2_TG0_64K: |
| /* 16KB granule not supported */ |
| val &= ~(0xf << ID_AA64MMFR0_TGRAN16_SHIFT); |
| val |= (ID_AA64MMFR0_TGRAN16_NI << ID_AA64MMFR0_TGRAN16_SHIFT); |
| /* fall through */ |
| case VTCR_EL2_TG0_16K: |
| /* 4KB granule not supported */ |
| val &= ~(0xf << ID_AA64MMFR0_TGRAN4_SHIFT); |
| val |= (ID_AA64MMFR0_TGRAN4_NI << ID_AA64MMFR0_TGRAN4_SHIFT); |
| break; |
| default: |
| break; |
| } |
| |
| /* Expose only 40 bits physical address range to the guest hypervisor */ |
| val &= ~(0xf << ID_AA64MMFR0_PARANGE_SHIFT); |
| val |= (0x2 << ID_AA64MMFR0_PARANGE_SHIFT); /* 40 bits */ |
| |
| p->regval = val; |
| |
| return true; |
| } |
| |
| static bool access_vttbr_el2(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| struct kvm_s2_mmu *mmu; |
| struct kvm_s2_vmid *vmid; |
| |
| if (!p->is_write) { |
| p->regval = vcpu_read_sys_reg(vcpu, r->reg); |
| return true; |
| } |
| |
| vcpu_write_sys_reg(vcpu, p->regval, r->reg); |
| mmu = vcpu_get_active_s2_mmu(vcpu); |
| vmid = vcpu_get_active_vmid(vcpu); |
| |
| vcpu->arch.vttbr_el2 = kvm_get_vttbr(vmid, mmu); |
| vcpu->arch.hw_mmu = mmu; |
| |
| return true; |
| } |
| |
| /* |
| * Architected system registers. |
| * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 |
| * |
| * Debug handling: We do trap most, if not all debug related system |
| * registers. The implementation is good enough to ensure that a guest |
| * can use these with minimal performance degradation. The drawback is |
| * that we don't implement any of the external debug, none of the |
| * OSlock protocol. This should be revisited if we ever encounter a |
| * more demanding guest... |
| */ |
| static const struct sys_reg_desc sys_reg_descs[] = { |
| { SYS_DESC(SYS_DC_ISW), access_dcsw }, |
| { SYS_DESC(SYS_DC_CSW), access_dcsw }, |
| { SYS_DESC(SYS_DC_CISW), access_dcsw }, |
| |
| DBG_BCR_BVR_WCR_WVR_EL1(0), |
| DBG_BCR_BVR_WCR_WVR_EL1(1), |
| { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, |
| { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 }, |
| DBG_BCR_BVR_WCR_WVR_EL1(2), |
| DBG_BCR_BVR_WCR_WVR_EL1(3), |
| DBG_BCR_BVR_WCR_WVR_EL1(4), |
| DBG_BCR_BVR_WCR_WVR_EL1(5), |
| DBG_BCR_BVR_WCR_WVR_EL1(6), |
| DBG_BCR_BVR_WCR_WVR_EL1(7), |
| DBG_BCR_BVR_WCR_WVR_EL1(8), |
| DBG_BCR_BVR_WCR_WVR_EL1(9), |
| DBG_BCR_BVR_WCR_WVR_EL1(10), |
| DBG_BCR_BVR_WCR_WVR_EL1(11), |
| DBG_BCR_BVR_WCR_WVR_EL1(12), |
| DBG_BCR_BVR_WCR_WVR_EL1(13), |
| DBG_BCR_BVR_WCR_WVR_EL1(14), |
| DBG_BCR_BVR_WCR_WVR_EL1(15), |
| |
| { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 }, |
| { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 }, |
| |
| { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi }, |
| { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi }, |
| // DBGDTR[TR]X_EL0 share the same encoding |
| { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, |
| |
| { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, |
| |
| { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, |
| |
| /* |
| * ID regs: all ID_SANITISED() entries here must have corresponding |
| * entries in arm64_ftr_regs[]. |
| */ |
| |
| /* AArch64 mappings of the AArch32 ID registers */ |
| /* CRm=1 */ |
| ID_SANITISED(ID_PFR0_EL1), |
| ID_SANITISED(ID_PFR1_EL1), |
| ID_SANITISED(ID_DFR0_EL1), |
| ID_HIDDEN(ID_AFR0_EL1), |
| { SYS_DESC(SYS_ID_MMFR0_EL1), access_id_aa64mmfr0_el1, NULL, 0, 0, |
| get_id_reg, set_id_reg }, |
| ID_SANITISED(ID_MMFR1_EL1), |
| ID_SANITISED(ID_MMFR2_EL1), |
| ID_SANITISED(ID_MMFR3_EL1), |
| |
| /* CRm=2 */ |
| ID_SANITISED(ID_ISAR0_EL1), |
| ID_SANITISED(ID_ISAR1_EL1), |
| ID_SANITISED(ID_ISAR2_EL1), |
| ID_SANITISED(ID_ISAR3_EL1), |
| ID_SANITISED(ID_ISAR4_EL1), |
| ID_SANITISED(ID_ISAR5_EL1), |
| ID_SANITISED(ID_MMFR4_EL1), |
| ID_UNALLOCATED(2,7), |
| |
| /* CRm=3 */ |
| ID_SANITISED(MVFR0_EL1), |
| ID_SANITISED(MVFR1_EL1), |
| ID_SANITISED(MVFR2_EL1), |
| ID_UNALLOCATED(3,3), |
| ID_UNALLOCATED(3,4), |
| ID_UNALLOCATED(3,5), |
| ID_UNALLOCATED(3,6), |
| ID_UNALLOCATED(3,7), |
| |
| /* AArch64 ID registers */ |
| /* CRm=4 */ |
| ID_SANITISED(ID_AA64PFR0_EL1), |
| ID_SANITISED(ID_AA64PFR1_EL1), |
| ID_UNALLOCATED(4,2), |
| ID_UNALLOCATED(4,3), |
| ID_UNALLOCATED(4,4), |
| ID_UNALLOCATED(4,5), |
| ID_UNALLOCATED(4,6), |
| ID_UNALLOCATED(4,7), |
| |
| /* CRm=5 */ |
| ID_SANITISED(ID_AA64DFR0_EL1), |
| ID_SANITISED(ID_AA64DFR1_EL1), |
| ID_UNALLOCATED(5,2), |
| ID_UNALLOCATED(5,3), |
| ID_HIDDEN(ID_AA64AFR0_EL1), |
| ID_HIDDEN(ID_AA64AFR1_EL1), |
| ID_UNALLOCATED(5,6), |
| ID_UNALLOCATED(5,7), |
| |
| /* CRm=6 */ |
| ID_SANITISED(ID_AA64ISAR0_EL1), |
| ID_SANITISED(ID_AA64ISAR1_EL1), |
| ID_UNALLOCATED(6,2), |
| ID_UNALLOCATED(6,3), |
| ID_UNALLOCATED(6,4), |
| ID_UNALLOCATED(6,5), |
| ID_UNALLOCATED(6,6), |
| ID_UNALLOCATED(6,7), |
| |
| /* CRm=7 */ |
| ID_SANITISED(ID_AA64MMFR0_EL1), |
| ID_SANITISED(ID_AA64MMFR1_EL1), |
| ID_SANITISED(ID_AA64MMFR2_EL1), |
| ID_UNALLOCATED(7,3), |
| ID_UNALLOCATED(7,4), |
| ID_UNALLOCATED(7,5), |
| ID_UNALLOCATED(7,6), |
| ID_UNALLOCATED(7,7), |
| |
| { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, |
| { SYS_DESC(SYS_CPACR_EL1), access_rw, reset_val, CPACR_EL1, 0 }, |
| { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, |
| { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, |
| { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, |
| |
| { SYS_DESC(SYS_SPSR_EL1), access_spsr, reset_unknown, SPSR_EL1}, |
| { SYS_DESC(SYS_ELR_EL1), access_elr, reset_unknown, ELR_EL1}, |
| |
| { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, |
| { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, |
| { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, |
| |
| { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, |
| { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, |
| |
| { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, |
| { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, |
| |
| { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 }, |
| { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 }, |
| |
| { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, |
| { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
| |
| { SYS_DESC(SYS_LORSA_EL1), trap_undef }, |
| { SYS_DESC(SYS_LOREA_EL1), trap_undef }, |
| { SYS_DESC(SYS_LORN_EL1), trap_undef }, |
| { SYS_DESC(SYS_LORC_EL1), trap_undef }, |
| { SYS_DESC(SYS_LORID_EL1), trap_undef }, |
| |
| { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, |
| { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, |
| |
| { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only }, |
| { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only }, |
| { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only }, |
| { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only }, |
| { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only }, |
| { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi }, |
| { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi }, |
| { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi }, |
| { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only }, |
| { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only }, |
| { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only }, |
| { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre }, |
| |
| { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, |
| { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 }, |
| |
| { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0}, |
| |
| { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 }, |
| |
| { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, }, |
| { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 }, |
| { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 }, |
| { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 }, |
| { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 }, |
| { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 }, |
| { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid }, |
| { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid }, |
| { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 }, |
| { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper }, |
| { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr }, |
| /* |
| * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero |
| * in 32bit mode. Here we choose to reset it as zero for consistency. |
| */ |
| { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 }, |
| { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 }, |
| |
| { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, |
| { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, |
| |
| { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer }, |
| { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer }, |
| { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer }, |
| |
| /* PMEVCNTRn_EL0 */ |
| PMU_PMEVCNTR_EL0(0), |
| PMU_PMEVCNTR_EL0(1), |
| PMU_PMEVCNTR_EL0(2), |
| PMU_PMEVCNTR_EL0(3), |
| PMU_PMEVCNTR_EL0(4), |
| PMU_PMEVCNTR_EL0(5), |
| PMU_PMEVCNTR_EL0(6), |
| PMU_PMEVCNTR_EL0(7), |
| PMU_PMEVCNTR_EL0(8), |
| PMU_PMEVCNTR_EL0(9), |
| PMU_PMEVCNTR_EL0(10), |
| PMU_PMEVCNTR_EL0(11), |
| PMU_PMEVCNTR_EL0(12), |
| PMU_PMEVCNTR_EL0(13), |
| PMU_PMEVCNTR_EL0(14), |
| PMU_PMEVCNTR_EL0(15), |
| PMU_PMEVCNTR_EL0(16), |
| PMU_PMEVCNTR_EL0(17), |
| PMU_PMEVCNTR_EL0(18), |
| PMU_PMEVCNTR_EL0(19), |
| PMU_PMEVCNTR_EL0(20), |
| PMU_PMEVCNTR_EL0(21), |
| PMU_PMEVCNTR_EL0(22), |
| PMU_PMEVCNTR_EL0(23), |
| PMU_PMEVCNTR_EL0(24), |
| PMU_PMEVCNTR_EL0(25), |
| PMU_PMEVCNTR_EL0(26), |
| PMU_PMEVCNTR_EL0(27), |
| PMU_PMEVCNTR_EL0(28), |
| PMU_PMEVCNTR_EL0(29), |
| PMU_PMEVCNTR_EL0(30), |
| /* PMEVTYPERn_EL0 */ |
| PMU_PMEVTYPER_EL0(0), |
| PMU_PMEVTYPER_EL0(1), |
| PMU_PMEVTYPER_EL0(2), |
| PMU_PMEVTYPER_EL0(3), |
| PMU_PMEVTYPER_EL0(4), |
| PMU_PMEVTYPER_EL0(5), |
| PMU_PMEVTYPER_EL0(6), |
| PMU_PMEVTYPER_EL0(7), |
| PMU_PMEVTYPER_EL0(8), |
| PMU_PMEVTYPER_EL0(9), |
| PMU_PMEVTYPER_EL0(10), |
| PMU_PMEVTYPER_EL0(11), |
| PMU_PMEVTYPER_EL0(12), |
| PMU_PMEVTYPER_EL0(13), |
| PMU_PMEVTYPER_EL0(14), |
| PMU_PMEVTYPER_EL0(15), |
| PMU_PMEVTYPER_EL0(16), |
| PMU_PMEVTYPER_EL0(17), |
| PMU_PMEVTYPER_EL0(18), |
| PMU_PMEVTYPER_EL0(19), |
| PMU_PMEVTYPER_EL0(20), |
| PMU_PMEVTYPER_EL0(21), |
| PMU_PMEVTYPER_EL0(22), |
| PMU_PMEVTYPER_EL0(23), |
| PMU_PMEVTYPER_EL0(24), |
| PMU_PMEVTYPER_EL0(25), |
| PMU_PMEVTYPER_EL0(26), |
| PMU_PMEVTYPER_EL0(27), |
| PMU_PMEVTYPER_EL0(28), |
| PMU_PMEVTYPER_EL0(29), |
| PMU_PMEVTYPER_EL0(30), |
| /* |
| * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero |
| * in 32bit mode. Here we choose to reset it as zero for consistency. |
| */ |
| { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 }, |
| |
| { SYS_DESC(SYS_VPIDR_EL2), access_rw, reset_val, VPIDR_EL2, 0 }, |
| { SYS_DESC(SYS_VMPIDR_EL2), access_rw, reset_val, VMPIDR_EL2, 0 }, |
| |
| { SYS_DESC(SYS_SCTLR_EL2), access_rw, reset_val, SCTLR_EL2, 0 }, |
| { SYS_DESC(SYS_ACTLR_EL2), access_rw, reset_val, ACTLR_EL2, 0 }, |
| { SYS_DESC(SYS_HCR_EL2), access_rw, reset_val, HCR_EL2, 0 }, |
| { SYS_DESC(SYS_MDCR_EL2), access_rw, reset_val, MDCR_EL2, 0 }, |
| { SYS_DESC(SYS_CPTR_EL2), access_rw, reset_val, CPTR_EL2, 0 }, |
| { SYS_DESC(SYS_HSTR_EL2), access_rw, reset_val, HSTR_EL2, 0 }, |
| { SYS_DESC(SYS_HACR_EL2), access_rw, reset_val, HACR_EL2, 0 }, |
| |
| { SYS_DESC(SYS_TTBR0_EL2), access_rw, reset_val, TTBR0_EL2, 0 }, |
| { SYS_DESC(SYS_TTBR1_EL2), access_rw, reset_val, TTBR1_EL2, 0 }, |
| { SYS_DESC(SYS_TCR_EL2), access_rw, reset_val, TCR_EL2, 0 }, |
| { SYS_DESC(SYS_VTTBR_EL2), access_vttbr_el2, reset_val, VTTBR_EL2, 0 }, |
| { SYS_DESC(SYS_VTCR_EL2), access_rw, reset_val, VTCR_EL2, 0 }, |
| |
| { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, |
| { SYS_DESC(SYS_SPSR_EL2), access_spsr_el2, reset_val, SPSR_EL2, 0 }, |
| { SYS_DESC(SYS_ELR_EL2), access_rw, reset_val, ELR_EL2, 0 }, |
| { SYS_DESC(SYS_SP_EL1), access_sp_el1, reset_unknown, SP_EL1 }, |
| |
| { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, |
| { SYS_DESC(SYS_AFSR0_EL2), access_rw, reset_val, AFSR0_EL2, 0 }, |
| { SYS_DESC(SYS_AFSR1_EL2), access_rw, reset_val, AFSR1_EL2, 0 }, |
| { SYS_DESC(SYS_ESR_EL2), access_rw, reset_val, ESR_EL2, 0 }, |
| { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 }, |
| |
| { SYS_DESC(SYS_FAR_EL2), access_rw, reset_val, FAR_EL2, 0 }, |
| { SYS_DESC(SYS_HPFAR_EL2), access_rw, reset_val, HPFAR_EL2, 0 }, |
| |
| { SYS_DESC(SYS_MAIR_EL2), access_rw, reset_val, MAIR_EL2, 0 }, |
| { SYS_DESC(SYS_AMAIR_EL2), access_rw, reset_val, AMAIR_EL2, 0 }, |
| |
| { SYS_DESC(SYS_VBAR_EL2), access_rw, reset_val, VBAR_EL2, 0 }, |
| { SYS_DESC(SYS_RVBAR_EL2), access_rw, reset_val, RVBAR_EL2, 0 }, |
| { SYS_DESC(SYS_RMR_EL2), access_rw, reset_val, RMR_EL2, 0 }, |
| |
| { SYS_DESC(SYS_ICC_SRE_EL2), trap_raz_wi }, |
| |
| { SYS_DESC(SYS_CONTEXTIDR_EL2), access_rw, reset_val, CONTEXTIDR_EL2, 0 }, |
| { SYS_DESC(SYS_TPIDR_EL2), access_rw, reset_val, TPIDR_EL2, 0 }, |
| |
| { SYS_DESC(SYS_CNTVOFF_EL2), access_rw, reset_val, CNTVOFF_EL2, 0 }, |
| { SYS_DESC(SYS_CNTHCTL_EL2), access_rw, reset_val, CNTHCTL_EL2, 0 }, |
| |
| { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer }, |
| { SYS_DESC(SYS_CNTHP_CTL_EL2), access_arch_timer }, |
| { SYS_DESC(SYS_CNTHP_CVAL_EL2), access_arch_timer }, |
| { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_arch_timer }, |
| { SYS_DESC(SYS_CNTHV_CTL_EL2), access_arch_timer }, |
| { SYS_DESC(SYS_CNTHV_CVAL_EL2), access_arch_timer }, |
| |
| { SYS_DESC(sctlr_EL12), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, |
| { SYS_DESC(cpacr_EL12), access_rw, reset_val, CPACR_EL1, 0 }, |
| { SYS_DESC(ttbr0_EL12), access_vm_reg, reset_unknown, TTBR0_EL1 }, |
| { SYS_DESC(ttbr1_EL12), access_vm_reg, reset_unknown, TTBR1_EL1 }, |
| { SYS_DESC(tcr_EL12), access_vm_reg, reset_val, TCR_EL1, 0 }, |
| { SYS_DESC(spsr_EL12), access_spsr}, |
| { SYS_DESC(elr_EL12), access_elr}, |
| { SYS_DESC(afsr0_EL12), access_vm_reg, reset_unknown, AFSR0_EL1 }, |
| { SYS_DESC(afsr1_EL12), access_vm_reg, reset_unknown, AFSR1_EL1 }, |
| { SYS_DESC(esr_EL12), access_vm_reg, reset_unknown, ESR_EL1 }, |
| { SYS_DESC(far_EL12), access_vm_reg, reset_unknown, FAR_EL1 }, |
| { SYS_DESC(mair_EL12), access_vm_reg, reset_unknown, MAIR_EL1 }, |
| { SYS_DESC(amair_EL12), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
| { SYS_DESC(vbar_EL12), access_rw, reset_val, VBAR_EL1, 0 }, |
| { SYS_DESC(contextidr_EL12), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, |
| { SYS_DESC(cntkctl_EL12), access_rw, reset_val, CNTKCTL_EL1, 0 }, |
| |
| { SYS_DESC(cntp_tval_EL02), access_arch_timer }, |
| { SYS_DESC(cntp_ctl_EL02), access_arch_timer }, |
| { SYS_DESC(cntp_cval_EL02), access_arch_timer }, |
| |
| { SYS_DESC(cntv_tval_EL02), access_arch_timer }, |
| { SYS_DESC(cntv_ctl_EL02), access_arch_timer }, |
| { SYS_DESC(cntv_cval_EL02), access_arch_timer }, |
| |
| { SYS_DESC(SYS_SP_EL2), NULL, reset_unknown, SP_EL2 }, |
| }; |
| |
| static bool handle_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| // struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; |
| bool el2_format; |
| int sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); |
| |
| /* See '2. EL0/EL1 AT instructions: S1E[01]x, S12E1x' table. */ |
| /* TODO: Do a vcpu_put()/vcpu_load() here? |
| * Was: |
| if (vcpu_el2_e2h_is_set(&vcpu->arch.ctxt) && vcpu_el2_tge_is_set(vcpu)) |
| ctxt->hw_sys_regs = ctxt->shadow_sys_regs; |
| else |
| ctxt->hw_sys_regs = ctxt->sys_regs; |
| */ |
| |
| el2_format = vcpu_el2_format_used(vcpu); |
| |
| kvm_call_hyp(__kvm_at_insn, vcpu, p->regval, el2_format, sys_encoding); |
| |
| return true; |
| } |
| |
| static bool handle_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| // struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; |
| bool el2_format; |
| int sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); |
| |
| /* See the '1. EL2 AT instructions: S1E2x' table */ |
| /* TODO: Do a vcpu_put()/vcpu_load() here? |
| * Was: |
| ctxt->hw_sys_regs = ctxt->shadow_sys_regs; |
| */ |
| el2_format = !vcpu_el2_e2h_is_set(&vcpu->arch.ctxt); |
| |
| kvm_call_hyp(__kvm_at_insn, vcpu, p->regval, el2_format, sys_encoding); |
| return true; |
| } |
| |
| static u64 setup_par_aborted(u32 esr) |
| { |
| u64 par = 0; |
| |
| /* S [9]: fault in the stage 2 translation */ |
| par |= (1 << 9); |
| /* FST [6:1]: Fault status code */ |
| par |= (esr << 1); |
| /* F [0]: translation is aborted */ |
| par |= 1; |
| |
| return par; |
| } |
| |
| static u64 setup_par_completed(struct kvm_vcpu *vcpu, struct kvm_s2_trans *out) |
| { |
| u64 par, vtcr_sh0; |
| |
| /* F [0]: Translation is completed successfully */ |
| par = 0; |
| /* ATTR [63:56] */ |
| par |= out->upper_attr; |
| /* PA [47:12] */ |
| par |= out->output & GENMASK_ULL(11, 0); |
| /* RES1 [11] */ |
| par |= (1UL << 11); |
| /* SH [8:7]: Shareability attribute */ |
| vtcr_sh0 = __vcpu_sys_reg(vcpu, VTCR_EL2) & VTCR_EL2_SH0_MASK; |
| par |= (vtcr_sh0 >> VTCR_EL2_SH0_SHIFT) << 7; |
| |
| return par; |
| } |
| |
| static bool handle_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r, bool write) |
| { |
| u64 par, va; |
| u32 esr; |
| phys_addr_t ipa; |
| struct kvm_s2_trans out; |
| int ret; |
| |
| /* Do the stage-1 translation */ |
| handle_s1e01(vcpu, p, r); |
| par = vcpu_read_sys_reg(vcpu, PAR_EL1); |
| if (par & 1) { |
| /* The stage-1 translation aborted */ |
| return true; |
| } |
| |
| /* Do the stage-2 translation */ |
| va = p->regval; |
| ipa = (par & GENMASK_ULL(47, 12)) | (va & GENMASK_ULL(11, 0)); |
| out.esr = 0; |
| ret = kvm_walk_nested_s2(vcpu, ipa, &out); |
| if (ret < 0) |
| return false; |
| |
| /* Check if the stage-2 PTW is aborted */ |
| if (out.esr) { |
| esr = out.esr; |
| goto s2_trans_abort; |
| } |
| |
| /* Check the access permission */ |
| if ((!write && !out.readable) || (write && !out.writable)) { |
| esr = ESR_ELx_FSC_PERM; |
| esr |= out.level & 0x3; |
| goto s2_trans_abort; |
| } |
| |
| vcpu_write_sys_reg(vcpu, setup_par_completed(vcpu, &out), PAR_EL1); |
| return true; |
| |
| s2_trans_abort: |
| vcpu_write_sys_reg(vcpu, setup_par_aborted(esr), PAR_EL1); |
| return true; |
| } |
| |
| static bool handle_s12r(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| return handle_s12(vcpu, p, r, false); |
| } |
| |
| static bool handle_s12w(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| return handle_s12(vcpu, p, r, true); |
| } |
| |
| static bool handle_alle2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| struct kvm_s2_mmu *mmu = &vcpu->kvm->arch.mmu; |
| u64 vttbr = kvm_get_vttbr(&mmu->el2_vmid, mmu); |
| |
| /* |
| * To emulate invalidating all EL2 regime stage 1 TLB entries, |
| * invalidate EL1&0 regime stage 1 TLB entries with the virtual EL2's |
| * VMID. |
| */ |
| kvm_call_hyp(__kvm_tlb_flush_local_vmid, vttbr); |
| return true; |
| } |
| |
| static bool handle_alle2is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| struct kvm_s2_mmu *mmu = &vcpu->kvm->arch.mmu; |
| u64 vttbr = kvm_get_vttbr(&mmu->el2_vmid, mmu); |
| |
| /* |
| * To emulate invalidating all EL2 regime stage 1 TLB entries for all |
| * PEs, executing TLBI VMALLE1IS is enough. But reuse the existing |
| * interface for the simplicity; invalidating stage 2 entries doesn't |
| * affect the correctness. |
| */ |
| kvm_call_hyp(__kvm_tlb_flush_vmid, vttbr); |
| return true; |
| } |
| |
| static bool handle_vae2(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| struct kvm_s2_mmu *mmu = &vcpu->kvm->arch.mmu; |
| u64 vttbr = kvm_get_vttbr(&mmu->el2_vmid, mmu); |
| int sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); |
| |
| /* |
| * Based on the same principle as TLBI ALLE2 instruction emulation, we |
| * emulate TLBI VAE2* instructions by executing corresponding TLBI VAE1* |
| * instructions with the virtual EL2's VMID assigned by the host |
| * hypervisor. |
| */ |
| kvm_call_hyp(__kvm_tlb_vae2, vttbr, p->regval, sys_encoding); |
| return true; |
| } |
| |
| static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| struct kvm_s2_mmu *mmu = &vcpu->kvm->arch.mmu; |
| u64 vttbr = kvm_get_vttbr(&mmu->vmid, mmu); |
| |
| if (vcpu->kvm->arch.mmu.vmid.vmid_gen) { |
| /* |
| * Invalidate the stage 1 and 2 TLB entries for the host OS |
| * in a VM only if there is one. |
| */ |
| kvm_call_hyp(__kvm_tlb_flush_vmid, vttbr); |
| } |
| |
| spin_lock(&vcpu->kvm->mmu_lock); |
| /* |
| * Clear all mappings in the shadow page tables and invalidate the stage |
| * 1 and 2 TLB entries via kvm_tlb_flush_vmid_ipa(). |
| */ |
| kvm_nested_s2_clear(vcpu->kvm); |
| spin_unlock(&vcpu->kvm->mmu_lock); |
| |
| return true; |
| } |
| |
| static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 vttbr; |
| struct kvm_s2_mmu *mmu; |
| bool ret; |
| |
| spin_lock(&vcpu->kvm->mmu_lock); |
| /* |
| * Clear mappings in the shadow page tables and invalidate the stage |
| * 1 and 2 TLB entries via kvm_tlb_flush_vmid_ipa() for the current |
| * VMID. |
| */ |
| ret = kvm_nested_s2_clear_curr_vmid(vcpu, 0, KVM_PHYS_SIZE); |
| spin_unlock(&vcpu->kvm->mmu_lock); |
| |
| if (!ret) { |
| /* |
| * Invalidate TLB entries explicitly for the case that the |
| * current VMID is for the host OS in the VM; we don't manage |
| * shadow stage 2 page tables for it. |
| */ |
| mmu = &vcpu->kvm->arch.mmu; |
| vttbr = kvm_get_vttbr(&mmu->vmid, mmu); |
| kvm_call_hyp(__kvm_tlb_flush_vmid, vttbr); |
| } |
| return true; |
| } |
| |
| static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 vttbr; |
| struct kvm_s2_mmu *mmu; |
| bool ret; |
| |
| spin_lock(&vcpu->kvm->mmu_lock); |
| /* |
| * Clear a mapping in the shadow page tables and invalidate the stage |
| * 2 TLB entries via kvm_tlb_flush_vmid_ipa() for the current |
| * VMID and the given ipa. |
| */ |
| ret = kvm_nested_s2_clear_curr_vmid(vcpu, p->regval, PAGE_SIZE); |
| spin_unlock(&vcpu->kvm->mmu_lock); |
| |
| if (!ret) { |
| /* |
| * Invalidate TLB entries explicitly for the case that the |
| * current VMID is for the host OS in the VM; we don't manage |
| * shadow stage 2 page tables for it. |
| */ |
| mmu = &vcpu->kvm->arch.mmu; |
| vttbr = kvm_get_vttbr(&mmu->vmid, mmu); |
| kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, vttbr, p->regval); |
| } |
| |
| return true; |
| } |
| |
| static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| u64 virtual_vttbr = __vcpu_sys_reg(vcpu, VTTBR_EL2); |
| u64 vttbr; |
| struct kvm_nested_s2_mmu *nested_mmu; |
| struct kvm_s2_mmu *mmu = &vcpu->kvm->arch.mmu; |
| int sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2); |
| |
| nested_mmu = lookup_nested_mmu(vcpu, virtual_vttbr); |
| if (!nested_mmu) { |
| /* |
| * If we can't find a shadow VMID, it is either the virtual |
| * VMID is for the host OS or the nested VM having the virtual |
| * VMID is never executed. (Note that we create a showdow VMID |
| * when entering a VM.) For the former, we can flush TLB |
| * entries belonging to the host OS in a VM. For the latter, we |
| * don't have to do anything. Since we can't differentiate |
| * between those cases, just do what we can do for the former. |
| */ |
| mmu = &vcpu->kvm->arch.mmu; |
| } else { |
| mmu = &nested_mmu->mmu; |
| } |
| |
| vttbr = kvm_get_vttbr(&mmu->vmid, mmu); |
| kvm_call_hyp(__kvm_tlb_el1_instr, vttbr, p->regval, sys_encoding); |
| |
| return true; |
| } |
| |
| /* |
| * AT instruction emulation |
| * |
| * We emulate AT instructions executed in the virtual EL2. |
| * Basic strategy for the stage-1 translation emulation is to load proper |
| * context, which depends on the trapped instruction and the virtual HCR_EL2, |
| * to the EL1 virtual memory control registers and execute S1E[01] instructions |
| * in EL2. See below for more detail. |
| * |
| * For the stage-2 translation, which is necessary for S12E[01] emulation, |
| * we walk the guest hypervisor's stage-2 page table in software. |
| * |
| * The stage-1 translation emulations can be divided into two groups depending |
| * on the translation regime. |
| * |
| * 1. EL2 AT instructions: S1E2x |
| * +-----------------------------------------------------------------------+ |
| * | | Setting for the emulation | |
| * | Virtual HCR_EL2.E2H on trap |-----------------------------------------+ |
| * | | Phys EL1 regs | Phys NV, NV1 | Phys TGE | |
| * |-----------------------------------------------------------------------| |
| * | 0 | vEL2 | (1, 1) | 0 | |
| * | 1 | vEL2 | (0, 0) | 0 | |
| * +-----------------------------------------------------------------------+ |
| * |
| * We emulate the EL2 AT instructions by loading virtual EL2 context |
| * to the EL1 virtual memory control registers and executing corresponding |
| * EL1 AT instructions. |
| * |
| * We set physical NV and NV1 bits to use EL2 page table format for non-VHE |
| * guest hypervisor (i.e. HCR_EL2.E2H == 0). As a VHE guest hypervisor uses the |
| * EL1 page table format, we don't set those bits. |
| * |
| * We should clear physical TGE bit not to use the EL2 translation regime when |
| * the host uses the VHE feature. |
| * |
| * |
| * 2. EL0/EL1 AT instructions: S1E[01]x, S12E1x |
| * +----------------------------------------------------------------------+ |
| * | Virtual HCR_EL2 on trap | Setting for the emulation | |
| * |----------------------------------------------------------------------+ |
| * | (vE2H, vTGE) | (vNV, vNV1) | Phys EL1 regs | Phys NV, NV1 | Phys TGE | |
| * |----------------------------------------------------------------------| |
| * | (0, 0)* | (0, 0) | vEL1 | (0, 0) | 0 | |
| * | (0, 0) | (1, 1) | vEL1 | (1, 1) | 0 | |
| * | (1, 1) | (0, 0) | vEL2 | (0, 0) | 0 | |
| * | (1, 1) | (1, 1) | vEL2 | (1, 1) | 0 | |
| * +----------------------------------------------------------------------+ |
| * |
| * *For (0, 0) in the 'Virtual HCR_EL2 on trap' column, it actually means |
| * (1, 1). Keep them (0, 0) just for the readability. |
| * |
| * We set physical EL1 virtual memory control registers depending on |
| * (vE2H, vTGE) pair. When the pair is (0, 0) where AT instructions are |
| * supposed to use EL0/EL1 translation regime, we load the EL1 registers with |
| * the virtual EL1 registers (i.e. EL1 registers from the guest hypervisor's |
| * point of view). When the pair is (1, 1), however, AT instructions are defined |
| * to apply EL2 translation regime. To emulate this behavior, we load the EL1 |
| * registers with the virtual EL2 context. (i.e the shadow registers) |
| * |
| * We respect the virtual NV and NV1 bit for the emulation. When those bits are |
| * set, it means that a guest hypervisor would like to use EL2 page table format |
| * for the EL1 translation regime. We emulate this by setting the physical |
| * NV and NV1 bits. |
| */ |
| |
| #define SYS_INSN_TO_DESC(insn, access_fn, forward_fn) \ |
| { SYS_DESC((insn)), (access_fn), NULL, 0, 0, NULL, NULL, (forward_fn) } |
| static struct sys_reg_desc sys_insn_descs[] = { |
| SYS_INSN_TO_DESC(AT_S1E1R, handle_s1e01, forward_at_traps), |
| SYS_INSN_TO_DESC(AT_S1E1W, handle_s1e01, forward_at_traps), |
| SYS_INSN_TO_DESC(AT_S1E0R, handle_s1e01, forward_at_traps), |
| SYS_INSN_TO_DESC(AT_S1E0W, handle_s1e01, forward_at_traps), |
| SYS_INSN_TO_DESC(AT_S1E1RP, handle_s1e01, forward_at_traps), |
| SYS_INSN_TO_DESC(AT_S1E1WP, handle_s1e01, forward_at_traps), |
| |
| SYS_INSN_TO_DESC(TLBI_VMALLE1IS, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VAE1IS, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_ASIDE1IS, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VAAE1IS, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VALE1IS, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VAALE1IS, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VMALLE1, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VAE1, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_ASIDE1, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VAAE1, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VALE1, handle_tlbi_el1, forward_ttlb_traps), |
| SYS_INSN_TO_DESC(TLBI_VAALE1, handle_tlbi_el1, forward_ttlb_traps), |
| |
| SYS_INSN_TO_DESC(AT_S1E2R, handle_s1e2, forward_nv_traps), |
| SYS_INSN_TO_DESC(AT_S1E2W, handle_s1e2, forward_nv_traps), |
| SYS_INSN_TO_DESC(AT_S12E1R, handle_s12r, forward_nv_traps), |
| SYS_INSN_TO_DESC(AT_S12E1W, handle_s12w, forward_nv_traps), |
| SYS_INSN_TO_DESC(AT_S12E0R, handle_s12r, forward_nv_traps), |
| SYS_INSN_TO_DESC(AT_S12E0W, handle_s12w, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_IPAS2E1IS, handle_ipas2e1is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_IPAS2LE1IS, handle_ipas2e1is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_ALLE2IS, handle_alle2is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_VAE2IS, handle_vae2, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_ALLE1IS, handle_alle1is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_VALE2IS, handle_vae2, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_VMALLS12E1IS, handle_vmalls12e1is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_IPAS2E1, handle_ipas2e1is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_IPAS2LE1, handle_ipas2e1is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_ALLE2, handle_alle2, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_VAE2, handle_vae2, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_ALLE1, handle_alle1is, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_VALE2, handle_vae2, forward_nv_traps), |
| SYS_INSN_TO_DESC(TLBI_VMALLS12E1, handle_vmalls12e1is, forward_nv_traps), |
| }; |
| |
| #define reg_to_match_value(x) \ |
| ({ \ |
| unsigned long val; \ |
| val = (x)->Op0 << 14; \ |
| val |= (x)->Op1 << 11; \ |
| val |= (x)->CRn << 7; \ |
| val |= (x)->CRm << 3; \ |
| val |= (x)->Op2; \ |
| val; \ |
| }) |
| |
| static int match_sys_reg(const void *key, const void *elt) |
| { |
| const unsigned long pval = (unsigned long)key; |
| const struct sys_reg_desc *r = elt; |
| |
| return pval - reg_to_match_value(r); |
| } |
| |
| static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, |
| const struct sys_reg_desc table[], |
| unsigned int num) |
| { |
| unsigned long pval = reg_to_match_value(params); |
| |
| return bsearch((void *)pval, table, num, sizeof(table[0]), |
| match_sys_reg); |
| } |
| |
| static void perform_access(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *params, |
| const struct sys_reg_desc *r) |
| { |
| /* |
| * Not having an accessor means that we have configured a trap |
| * that we don't know how to handle. This certainly qualifies |
| * as a gross bug that should be fixed right away. |
| */ |
| BUG_ON(!r->access); |
| |
| /* |
| * Forward this trap to the virtual EL2 if the guest hypervisor has |
| * configured to trap the current instruction. |
| */ |
| if (nested_virt_in_use(vcpu) && r->forward_trap |
| && unlikely(r->forward_trap(vcpu))) |
| return; |
| |
| /* Skip instruction if instructed so */ |
| if (likely(r->access(vcpu, params, r))) |
| kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
| } |
| |
| static int emulate_sys_instr(struct kvm_vcpu *vcpu, struct sys_reg_params *p) |
| { |
| |
| const struct sys_reg_desc *r; |
| |
| /* Search from the system instruction table. */ |
| r = find_reg(p, sys_insn_descs, ARRAY_SIZE(sys_insn_descs)); |
| |
| if (likely(r)) { |
| perform_access(vcpu, p, r); |
| } else { |
| kvm_err("Unsupported guest sys instruction at: %lx\n", |
| *vcpu_pc(vcpu)); |
| print_sys_reg_instr(p); |
| kvm_inject_undefined(vcpu); |
| } |
| return 1; |
| } |
| |
| static bool trap_dbgidr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (p->is_write) { |
| return ignore_write(vcpu, p); |
| } else { |
| u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); |
| u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
| u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT); |
| |
| p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | |
| (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | |
| (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
| | (6 << 16) | (el3 << 14) | (el3 << 12)); |
| return true; |
| } |
| } |
| |
| static bool trap_debug32(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *r) |
| { |
| if (p->is_write) { |
| vcpu_cp14(vcpu, r->reg) = p->regval; |
| vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; |
| } else { |
| p->regval = vcpu_cp14(vcpu, r->reg); |
| } |
| |
| return true; |
| } |
| |
| /* AArch32 debug register mappings |
| * |
| * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] |
| * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] |
| * |
| * All control registers and watchpoint value registers are mapped to |
| * the lower 32 bits of their AArch64 equivalents. We share the trap |
| * handlers with the above AArch64 code which checks what mode the |
| * system is in. |
| */ |
| |
| static bool trap_xvr(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *p, |
| const struct sys_reg_desc *rd) |
| { |
| u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; |
| |
| if (p->is_write) { |
| u64 val = *dbg_reg; |
| |
| val &= 0xffffffffUL; |
| val |= p->regval << 32; |
| *dbg_reg = val; |
| |
| vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY; |
| } else { |
| p->regval = *dbg_reg >> 32; |
| } |
| |
| trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); |
| |
| return true; |
| } |
| |
| #define DBG_BCR_BVR_WCR_WVR(n) \ |
| /* DBGBVRn */ \ |
| { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ |
| /* DBGBCRn */ \ |
| { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ |
| /* DBGWVRn */ \ |
| { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ |
| /* DBGWCRn */ \ |
| { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } |
| |
| #define DBGBXVR(n) \ |
| { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } |
| |
| /* |
| * Trapped cp14 registers. We generally ignore most of the external |
| * debug, on the principle that they don't really make sense to a |
| * guest. Revisit this one day, would this principle change. |
| */ |
| static const struct sys_reg_desc cp14_regs[] = { |
| /* DBGIDR */ |
| { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, |
| /* DBGDTRRXext */ |
| { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, |
| |
| DBG_BCR_BVR_WCR_WVR(0), |
| /* DBGDSCRint */ |
| { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, |
| DBG_BCR_BVR_WCR_WVR(1), |
| /* DBGDCCINT */ |
| { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, |
| /* DBGDSCRext */ |
| { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, |
| DBG_BCR_BVR_WCR_WVR(2), |
| /* DBGDTR[RT]Xint */ |
| { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, |
| /* DBGDTR[RT]Xext */ |
| { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, |
| DBG_BCR_BVR_WCR_WVR(3), |
| DBG_BCR_BVR_WCR_WVR(4), |
| DBG_BCR_BVR_WCR_WVR(5), |
| /* DBGWFAR */ |
| { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, |
| /* DBGOSECCR */ |
| { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, |
| DBG_BCR_BVR_WCR_WVR(6), |
| /* DBGVCR */ |
| { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, |
| DBG_BCR_BVR_WCR_WVR(7), |
| DBG_BCR_BVR_WCR_WVR(8), |
| DBG_BCR_BVR_WCR_WVR(9), |
| DBG_BCR_BVR_WCR_WVR(10), |
| DBG_BCR_BVR_WCR_WVR(11), |
| DBG_BCR_BVR_WCR_WVR(12), |
| DBG_BCR_BVR_WCR_WVR(13), |
| DBG_BCR_BVR_WCR_WVR(14), |
| DBG_BCR_BVR_WCR_WVR(15), |
| |
| /* DBGDRAR (32bit) */ |
| { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, |
| |
| DBGBXVR(0), |
| /* DBGOSLAR */ |
| { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, |
| DBGBXVR(1), |
| /* DBGOSLSR */ |
| { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, |
| DBGBXVR(2), |
| DBGBXVR(3), |
| /* DBGOSDLR */ |
| { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, |
| DBGBXVR(4), |
| /* DBGPRCR */ |
| { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, |
| DBGBXVR(5), |
| DBGBXVR(6), |
| DBGBXVR(7), |
| DBGBXVR(8), |
| DBGBXVR(9), |
| DBGBXVR(10), |
| DBGBXVR(11), |
| DBGBXVR(12), |
| DBGBXVR(13), |
| DBGBXVR(14), |
| DBGBXVR(15), |
| |
| /* DBGDSAR (32bit) */ |
| { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, |
| |
| /* DBGDEVID2 */ |
| { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, |
| /* DBGDEVID1 */ |
| { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, |
| /* DBGDEVID */ |
| { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, |
| /* DBGCLAIMSET */ |
| { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, |
| /* DBGCLAIMCLR */ |
| { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, |
| /* DBGAUTHSTATUS */ |
| { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, |
| }; |
| |
| /* Trapped cp14 64bit registers */ |
| static const struct sys_reg_desc cp14_64_regs[] = { |
| /* DBGDRAR (64bit) */ |
| { Op1( 0), CRm( 1), .access = trap_raz_wi }, |
| |
| /* DBGDSAR (64bit) */ |
| { Op1( 0), CRm( 2), .access = trap_raz_wi }, |
| }; |
| |
| /* Macro to expand the PMEVCNTRn register */ |
| #define PMU_PMEVCNTR(n) \ |
| /* PMEVCNTRn */ \ |
| { Op1(0), CRn(0b1110), \ |
| CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ |
| access_pmu_evcntr } |
| |
| /* Macro to expand the PMEVTYPERn register */ |
| #define PMU_PMEVTYPER(n) \ |
| /* PMEVTYPERn */ \ |
| { Op1(0), CRn(0b1110), \ |
| CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \ |
| access_pmu_evtyper } |
| |
| /* |
| * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, |
| * depending on the way they are accessed (as a 32bit or a 64bit |
| * register). |
| */ |
| static const struct sys_reg_desc cp15_regs[] = { |
| { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, |
| { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
| { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, |
| { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, |
| { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, |
| { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, |
| { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, |
| { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, |
| { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, |
| { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, |
| { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, |
| |
| /* |
| * DC{C,I,CI}SW operations: |
| */ |
| { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, |
| { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, |
| { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, |
| |
| /* PMU */ |
| { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, |
| { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten }, |
| { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten }, |
| { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs }, |
| { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc }, |
| { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, |
| { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, |
| { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, |
| { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr }, |
| { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper }, |
| { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr }, |
| { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr }, |
| { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten }, |
| { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten }, |
| { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs }, |
| |
| { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, |
| { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, |
| { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, |
| { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, |
| |
| /* ICC_SRE */ |
| { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre }, |
| |
| { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, |
| |
| /* CNTP_TVAL */ |
| { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_arch_timer }, |
| /* CNTP_CTL */ |
| { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_arch_timer }, |
| |
| /* PMEVCNTRn */ |
| PMU_PMEVCNTR(0), |
| PMU_PMEVCNTR(1), |
| PMU_PMEVCNTR(2), |
| PMU_PMEVCNTR(3), |
| PMU_PMEVCNTR(4), |
| PMU_PMEVCNTR(5), |
| PMU_PMEVCNTR(6), |
| PMU_PMEVCNTR(7), |
| PMU_PMEVCNTR(8), |
| PMU_PMEVCNTR(9), |
| PMU_PMEVCNTR(10), |
| PMU_PMEVCNTR(11), |
| PMU_PMEVCNTR(12), |
| PMU_PMEVCNTR(13), |
| PMU_PMEVCNTR(14), |
| PMU_PMEVCNTR(15), |
| PMU_PMEVCNTR(16), |
| PMU_PMEVCNTR(17), |
| PMU_PMEVCNTR(18), |
| PMU_PMEVCNTR(19), |
| PMU_PMEVCNTR(20), |
| PMU_PMEVCNTR(21), |
| PMU_PMEVCNTR(22), |
| PMU_PMEVCNTR(23), |
| PMU_PMEVCNTR(24), |
| PMU_PMEVCNTR(25), |
| PMU_PMEVCNTR(26), |
| PMU_PMEVCNTR(27), |
| PMU_PMEVCNTR(28), |
| PMU_PMEVCNTR(29), |
| PMU_PMEVCNTR(30), |
| /* PMEVTYPERn */ |
| PMU_PMEVTYPER(0), |
| PMU_PMEVTYPER(1), |
| PMU_PMEVTYPER(2), |
| PMU_PMEVTYPER(3), |
| PMU_PMEVTYPER(4), |
| PMU_PMEVTYPER(5), |
| PMU_PMEVTYPER(6), |
| PMU_PMEVTYPER(7), |
| PMU_PMEVTYPER(8), |
| PMU_PMEVTYPER(9), |
| PMU_PMEVTYPER(10), |
| PMU_PMEVTYPER(11), |
| PMU_PMEVTYPER(12), |
| PMU_PMEVTYPER(13), |
| PMU_PMEVTYPER(14), |
| PMU_PMEVTYPER(15), |
| PMU_PMEVTYPER(16), |
| PMU_PMEVTYPER(17), |
| PMU_PMEVTYPER(18), |
| PMU_PMEVTYPER(19), |
| PMU_PMEVTYPER(20), |
| PMU_PMEVTYPER(21), |
| PMU_PMEVTYPER(22), |
| PMU_PMEVTYPER(23), |
| PMU_PMEVTYPER(24), |
| PMU_PMEVTYPER(25), |
| PMU_PMEVTYPER(26), |
| PMU_PMEVTYPER(27), |
| PMU_PMEVTYPER(28), |
| PMU_PMEVTYPER(29), |
| PMU_PMEVTYPER(30), |
| /* PMCCFILTR */ |
| { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper }, |
| }; |
| |
| static const struct sys_reg_desc cp15_64_regs[] = { |
| { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
| { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr }, |
| { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */ |
| { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, |
| { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */ |
| { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */ |
| { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_arch_timer }, |
| }; |
| |
| /* Target specific emulation tables */ |
| static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; |
| |
| void kvm_register_target_sys_reg_table(unsigned int target, |
| struct kvm_sys_reg_target_table *table) |
| { |
| target_tables[target] = table; |
| } |
| |
| /* Get specific register table for this target. */ |
| static const struct sys_reg_desc *get_target_table(unsigned target, |
| bool mode_is_64, |
| size_t *num) |
| { |
| struct kvm_sys_reg_target_table *table; |
| |
| table = target_tables[target]; |
| if (mode_is_64) { |
| *num = table->table64.num; |
| return table->table64.table; |
| } else { |
| *num = table->table32.num; |
| return table->table32.table; |
| } |
| } |
| |
| int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| { |
| kvm_inject_undefined(vcpu); |
| return 1; |
| } |
| |
| /* |
| * emulate_cp -- tries to match a sys_reg access in a handling table, and |
| * call the corresponding trap handler. |
| * |
| * @params: pointer to the descriptor of the access |
| * @table: array of trap descriptors |
| * @num: size of the trap descriptor array |
| * |
| * Return 0 if the access has been handled, and -1 if not. |
| */ |
| static int emulate_cp(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *params, |
| const struct sys_reg_desc *table, |
| size_t num) |
| { |
| const struct sys_reg_desc *r; |
| |
| if (!table) |
| return -1; /* Not handled */ |
| |
| r = find_reg(params, table, num); |
| |
| if (r) { |
| perform_access(vcpu, params, r); |
| return 0; |
| } |
| |
| /* Not handled */ |
| return -1; |
| } |
| |
| static void unhandled_cp_access(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *params) |
| { |
| u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); |
| int cp = -1; |
| |
| switch(hsr_ec) { |
| case ESR_ELx_EC_CP15_32: |
| case ESR_ELx_EC_CP15_64: |
| cp = 15; |
| break; |
| case ESR_ELx_EC_CP14_MR: |
| case ESR_ELx_EC_CP14_64: |
| cp = 14; |
| break; |
| default: |
| WARN_ON(1); |
| } |
| |
| kvm_err("Unsupported guest CP%d access at: %08lx\n", |
| cp, *vcpu_pc(vcpu)); |
| print_sys_reg_instr(params); |
| kvm_inject_undefined(vcpu); |
| } |
| |
| /** |
| * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access |
| * @vcpu: The VCPU pointer |
| * @run: The kvm_run struct |
| */ |
| static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, |
| const struct sys_reg_desc *global, |
| size_t nr_global, |
| const struct sys_reg_desc *target_specific, |
| size_t nr_specific) |
| { |
| struct sys_reg_params params; |
| u32 hsr = kvm_vcpu_get_hsr(vcpu); |
| int Rt = kvm_vcpu_sys_get_rt(vcpu); |
| int Rt2 = (hsr >> 10) & 0x1f; |
| |
| params.is_aarch32 = true; |
| params.is_32bit = false; |
| params.CRm = (hsr >> 1) & 0xf; |
| params.is_write = ((hsr & 1) == 0); |
| |
| params.Op0 = 0; |
| params.Op1 = (hsr >> 16) & 0xf; |
| params.Op2 = 0; |
| params.CRn = 0; |
| |
| /* |
| * Make a 64-bit value out of Rt and Rt2. As we use the same trap |
| * backends between AArch32 and AArch64, we get away with it. |
| */ |
| if (params.is_write) { |
| params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; |
| params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; |
| } |
| |
| /* |
| * Try to emulate the coprocessor access using the target |
| * specific table first, and using the global table afterwards. |
| * If either of the tables contains a handler, handle the |
| * potential register operation in the case of a read and return |
| * with success. |
| */ |
| if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || |
| !emulate_cp(vcpu, ¶ms, global, nr_global)) { |
| /* Split up the value between registers for the read side */ |
| if (!params.is_write) { |
| vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); |
| vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); |
| } |
| |
| return 1; |
| } |
| |
| unhandled_cp_access(vcpu, ¶ms); |
| return 1; |
| } |
| |
| /** |
| * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access |
| * @vcpu: The VCPU pointer |
| * @run: The kvm_run struct |
| */ |
| static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, |
| const struct sys_reg_desc *global, |
| size_t nr_global, |
| const struct sys_reg_desc *target_specific, |
| size_t nr_specific) |
| { |
| struct sys_reg_params params; |
| u32 hsr = kvm_vcpu_get_hsr(vcpu); |
| int Rt = kvm_vcpu_sys_get_rt(vcpu); |
| |
| params.is_aarch32 = true; |
| params.is_32bit = true; |
| params.CRm = (hsr >> 1) & 0xf; |
| params.regval = vcpu_get_reg(vcpu, Rt); |
| params.is_write = ((hsr & 1) == 0); |
| params.CRn = (hsr >> 10) & 0xf; |
| params.Op0 = 0; |
| params.Op1 = (hsr >> 14) & 0x7; |
| params.Op2 = (hsr >> 17) & 0x7; |
| |
| if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || |
| !emulate_cp(vcpu, ¶ms, global, nr_global)) { |
| if (!params.is_write) |
| vcpu_set_reg(vcpu, Rt, params.regval); |
| return 1; |
| } |
| |
| unhandled_cp_access(vcpu, ¶ms); |
| return 1; |
| } |
| |
| int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| { |
| const struct sys_reg_desc *target_specific; |
| size_t num; |
| |
| target_specific = get_target_table(vcpu->arch.target, false, &num); |
| return kvm_handle_cp_64(vcpu, |
| cp15_64_regs, ARRAY_SIZE(cp15_64_regs), |
| target_specific, num); |
| } |
| |
| int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| { |
| const struct sys_reg_desc *target_specific; |
| size_t num; |
| |
| target_specific = get_target_table(vcpu->arch.target, false, &num); |
| return kvm_handle_cp_32(vcpu, |
| cp15_regs, ARRAY_SIZE(cp15_regs), |
| target_specific, num); |
| } |
| |
| int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| { |
| return kvm_handle_cp_64(vcpu, |
| cp14_64_regs, ARRAY_SIZE(cp14_64_regs), |
| NULL, 0); |
| } |
| |
| int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| { |
| return kvm_handle_cp_32(vcpu, |
| cp14_regs, ARRAY_SIZE(cp14_regs), |
| NULL, 0); |
| } |
| |
| static int emulate_sys_reg(struct kvm_vcpu *vcpu, |
| struct sys_reg_params *params) |
| { |
| size_t num; |
| const struct sys_reg_desc *table, *r; |
| |
| table = get_target_table(vcpu->arch.target, true, &num); |
| |
| /* Search target-specific then generic table. */ |
| r = find_reg(params, table, num); |
| if (!r) |
| r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); |
| |
| if (likely(r)) { |
| perform_access(vcpu, params, r); |
| } else { |
| kvm_err("Unsupported guest sys_reg access at: %lx\n", |
| *vcpu_pc(vcpu)); |
| print_sys_reg_instr(params); |
| kvm_inject_undefined(vcpu); |
| } |
| return 1; |
| } |
| |
| static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, |
| const struct sys_reg_desc *table, size_t num) |
| { |
| unsigned long i; |
| |
| for (i = 0; i < num; i++) |
| if (table[i].reset) |
| table[i].reset(vcpu, &table[i]); |
| } |
| |
| /** |
| * kvm_handle_sys-- handles a system instruction or mrs/msr instruction trap |
| on a guest execution |
| * @vcpu: The VCPU pointer |
| * @run: The kvm_run struct |
| */ |
| int kvm_handle_sys(struct kvm_vcpu *vcpu, struct kvm_run *run) |
| { |
| struct sys_reg_params params; |
| unsigned long esr = kvm_vcpu_get_hsr(vcpu); |
| int Rt = kvm_vcpu_sys_get_rt(vcpu); |
| int ret; |
| |
| trace_kvm_handle_sys(esr); |
| |
| params.is_aarch32 = false; |
| params.is_32bit = false; |
| params.Op0 = (esr >> 20) & 3; |
| params.Op1 = (esr >> 14) & 0x7; |
| params.CRn = (esr >> 10) & 0xf; |
| params.CRm = (esr >> 1) & 0xf; |
| params.Op2 = (esr >> 17) & 0x7; |
| params.regval = vcpu_get_reg(vcpu, Rt); |
| params.is_write = !(esr & 1); |
| |
| if (params.Op0 == 1) { |
| /* System instructions */ |
| ret = emulate_sys_instr(vcpu, ¶ms); |
| } else { |
| /* MRS/MSR instructions */ |
| ret = emulate_sys_reg(vcpu, ¶ms); |
| if (!params.is_write) |
| vcpu_set_reg(vcpu, Rt, params.regval); |
| } |
| |
| return ret; |
| } |
| |
| /****************************************************************************** |
| * Userspace API |
| *****************************************************************************/ |
| |
| static bool index_to_params(u64 id, struct sys_reg_params *params) |
| { |
| switch (id & KVM_REG_SIZE_MASK) { |
| case KVM_REG_SIZE_U64: |
| /* Any unused index bits means it's not valid. */ |
| if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK |
| | KVM_REG_ARM_COPROC_MASK |
| | KVM_REG_ARM64_SYSREG_OP0_MASK |
| | KVM_REG_ARM64_SYSREG_OP1_MASK |
| | KVM_REG_ARM64_SYSREG_CRN_MASK |
| | KVM_REG_ARM64_SYSREG_CRM_MASK |
| | KVM_REG_ARM64_SYSREG_OP2_MASK)) |
| return false; |
| params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) |
| >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); |
| params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) |
| >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); |
| params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) |
| >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); |
| params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) |
| >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); |
| params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) |
| >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); |
| return true; |
| default: |
| return false; |
| } |
| } |
| |
| const struct sys_reg_desc *find_reg_by_id(u64 id, |
| struct sys_reg_params *params, |
| const struct sys_reg_desc table[], |
| unsigned int num) |
| { |
| if (!index_to_params(id, params)) |
| return NULL; |
| |
| return find_reg(params, table, num); |
| } |
| |
| /* Decode an index value, and find the sys_reg_desc entry. */ |
| static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, |
| u64 id) |
| { |
| size_t num; |
| const struct sys_reg_desc *table, *r; |
| struct sys_reg_params params; |
| |
| /* We only do sys_reg for now. */ |
| if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) |
| return NULL; |
| |
| table = get_target_table(vcpu->arch.target, true, &num); |
| r = find_reg_by_id(id, ¶ms, table, num); |
| if (!r) |
| r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); |
| |
| /* Not saved in the sys_reg array and not otherwise accessible? */ |
| if (r && !(r->reg || r->get_user)) |
| r = NULL; |
| |
| return r; |
| } |
| |
| /* |
| * These are the invariant sys_reg registers: we let the guest see the |
| * host versions of these, so they're part of the guest state. |
| * |
| * A future CPU may provide a mechanism to present different values to |
| * the guest, or a future kvm may trap them. |
| */ |
| |
| #define FUNCTION_INVARIANT(reg) \ |
| static void get_##reg(struct kvm_vcpu *v, \ |
| const struct sys_reg_desc *r) \ |
| { \ |
| ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \ |
| } |
| |
| FUNCTION_INVARIANT(midr_el1) |
| FUNCTION_INVARIANT(ctr_el0) |
| FUNCTION_INVARIANT(revidr_el1) |
| FUNCTION_INVARIANT(clidr_el1) |
| FUNCTION_INVARIANT(aidr_el1) |
| |
| /* ->val is filled in by kvm_sys_reg_table_init() */ |
| static struct sys_reg_desc invariant_sys_regs[] = { |
| { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 }, |
| { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 }, |
| { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 }, |
| { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 }, |
| { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 }, |
| }; |
| |
| static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) |
| { |
| if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) |
| { |
| if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) |
| return -EFAULT; |
| return 0; |
| } |
| |
| static int get_invariant_sys_reg(u64 id, void __user *uaddr) |
| { |
| struct sys_reg_params params; |
| const struct sys_reg_desc *r; |
| |
| r = find_reg_by_id(id, ¶ms, invariant_sys_regs, |
| ARRAY_SIZE(invariant_sys_regs)); |
| if (!r) |
| return -ENOENT; |
| |
| return reg_to_user(uaddr, &r->val, id); |
| } |
| |
| static int set_invariant_sys_reg(u64 id, void __user *uaddr) |
| { |
| struct sys_reg_params params; |
| const struct sys_reg_desc *r; |
| int err; |
| u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ |
| |
| r = find_reg_by_id(id, ¶ms, invariant_sys_regs, |
| ARRAY_SIZE(invariant_sys_regs)); |
| if (!r) |
| return -ENOENT; |
| |
| err = reg_from_user(&val, uaddr, id); |
| if (err) |
| return err; |
| |
| /* This is what we mean by invariant: you can't change it. */ |
| if (r->val != val) |
| return -EINVAL; |
| |
| return 0; |
| } |
| |
| static bool is_valid_cache(u32 val) |
| { |
| u32 level, ctype; |
| |
| if (val >= CSSELR_MAX) |
| return false; |
| |
| /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ |
| level = (val >> 1); |
| ctype = (cache_levels >> (level * 3)) & 7; |
| |
| switch (ctype) { |
| case 0: /* No cache */ |
| return false; |
| case 1: /* Instruction cache only */ |
| return (val & 1); |
| case 2: /* Data cache only */ |
| case 4: /* Unified cache */ |
| return !(val & 1); |
| case 3: /* Separate instruction and data caches */ |
| return true; |
| default: /* Reserved: we can't know instruction or data. */ |
| return false; |
| } |
| } |
| |
| static int demux_c15_get(u64 id, void __user *uaddr) |
| { |
| u32 val; |
| u32 __user *uval = uaddr; |
| |
| /* Fail if we have unknown bits set. */ |
| if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
| | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
| return -ENOENT; |
| |
| switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { |
| case KVM_REG_ARM_DEMUX_ID_CCSIDR: |
| if (KVM_REG_SIZE(id) != 4) |
| return -ENOENT; |
| val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) |
| >> KVM_REG_ARM_DEMUX_VAL_SHIFT; |
| if (!is_valid_cache(val)) |
| return -ENOENT; |
| |
| return put_user(get_ccsidr(val), uval); |
| default: |
| return -ENOENT; |
| } |
| } |
| |
| static int demux_c15_set(u64 id, void __user *uaddr) |
| { |
| u32 val, newval; |
| u32 __user *uval = uaddr; |
| |
| /* Fail if we have unknown bits set. */ |
| if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK |
| | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) |
| return -ENOENT; |
| |
| switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { |
| case KVM_REG_ARM_DEMUX_ID_CCSIDR: |
| if (KVM_REG_SIZE(id) != 4) |
| return -ENOENT; |
| val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) |
| >> KVM_REG_ARM_DEMUX_VAL_SHIFT; |
| if (!is_valid_cache(val)) |
| return -ENOENT; |
| |
| if (get_user(newval, uval)) |
| return -EFAULT; |
| |
| /* This is also invariant: you can't change it. */ |
| if (newval != get_ccsidr(val)) |
| return -EINVAL; |
| return 0; |
| default: |
| return -ENOENT; |
| } |
| } |
| |
| int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| { |
| const struct sys_reg_desc *r; |
| void __user *uaddr = (void __user *)(unsigned long)reg->addr; |
| |
| if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) |
| return demux_c15_get(reg->id, uaddr); |
| |
| if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) |
| return -ENOENT; |
| |
| r = index_to_sys_reg_desc(vcpu, reg->id); |
| if (!r) |
| return get_invariant_sys_reg(reg->id, uaddr); |
| |
| if (r->get_user) |
| return (r->get_user)(vcpu, r, reg, uaddr); |
| |
| return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id); |
| } |
| |
| int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
| { |
| const struct sys_reg_desc *r; |
| void __user *uaddr = (void __user *)(unsigned long)reg->addr; |
| |
| if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) |
| return demux_c15_set(reg->id, uaddr); |
| |
| if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) |
| return -ENOENT; |
| |
| r = index_to_sys_reg_desc(vcpu, reg->id); |
| if (!r) |
| return set_invariant_sys_reg(reg->id, uaddr); |
| |
| if (r->set_user) |
| return (r->set_user)(vcpu, r, reg, uaddr); |
| |
| return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); |
| } |
| |
| static unsigned int num_demux_regs(void) |
| { |
| unsigned int i, count = 0; |
| |
| for (i = 0; i < CSSELR_MAX; i++) |
| if (is_valid_cache(i)) |
| count++; |
| |
| return count; |
| } |
| |
| static int write_demux_regids(u64 __user *uindices) |
| { |
| u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; |
| unsigned int i; |
| |
| val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; |
| for (i = 0; i < CSSELR_MAX; i++) { |
| if (!is_valid_cache(i)) |
| continue; |
| if (put_user(val | i, uindices)) |
| return -EFAULT; |
| uindices++; |
| } |
| return 0; |
| } |
| |
| static u64 sys_reg_to_index(const struct sys_reg_desc *reg) |
| { |
| return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | |
| KVM_REG_ARM64_SYSREG | |
| (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | |
| (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | |
| (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | |
| (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | |
| (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); |
| } |
| |
| static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) |
| { |
| if (!*uind) |
| return true; |
| |
| if (put_user(sys_reg_to_index(reg), *uind)) |
| return false; |
| |
| (*uind)++; |
| return true; |
| } |
| |
| static int walk_one_sys_reg(const struct sys_reg_desc *rd, |
| u64 __user **uind, |
| unsigned int *total) |
| { |
| /* |
| * Ignore registers we trap but don't save, |
| * and for which no custom user accessor is provided. |
| */ |
| if (!(rd->reg || rd->get_user)) |
| return 0; |
| |
| if (!copy_reg_to_user(rd, uind)) |
| return -EFAULT; |
| |
| (*total)++; |
| return 0; |
| } |
| |
| /* Assumed ordered tables, see kvm_sys_reg_table_init. */ |
| static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) |
| { |
| const struct sys_reg_desc *i1, *i2, *end1, *end2; |
| unsigned int total = 0; |
| size_t num; |
| int err; |
| |
| /* We check for duplicates here, to allow arch-specific overrides. */ |
| i1 = get_target_table(vcpu->arch.target, true, &num); |
| end1 = i1 + num; |
| i2 = sys_reg_descs; |
| end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); |
| |
| BUG_ON(i1 == end1 || i2 == end2); |
| |
| /* Walk carefully, as both tables may refer to the same register. */ |
| while (i1 || i2) { |
| int cmp = cmp_sys_reg(i1, i2); |
| /* target-specific overrides generic entry. */ |
| if (cmp <= 0) |
| err = walk_one_sys_reg(i1, &uind, &total); |
| else |
| err = walk_one_sys_reg(i2, &uind, &total); |
| |
| if (err) |
| return err; |
| |
| if (cmp <= 0 && ++i1 == end1) |
| i1 = NULL; |
| if (cmp >= 0 && ++i2 == end2) |
| i2 = NULL; |
| } |
| return total; |
| } |
| |
| unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) |
| { |
| return ARRAY_SIZE(invariant_sys_regs) |
| + num_demux_regs() |
| + walk_sys_regs(vcpu, (u64 __user *)NULL); |
| } |
| |
| int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
| { |
| unsigned int i; |
| int err; |
| |
| /* Then give them all the invariant registers' indices. */ |
| for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { |
| if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) |
| return -EFAULT; |
| uindices++; |
| } |
| |
| err = walk_sys_regs(vcpu, uindices); |
| if (err < 0) |
| return err; |
| uindices += err; |
| |
| return write_demux_regids(uindices); |
| } |
| |
| static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) |
| { |
| unsigned int i; |
| |
| for (i = 1; i < n; i++) { |
| if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { |
| kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); |
| return 1; |
| } |
| } |
| |
| return 0; |
| } |
| |
| void kvm_sys_reg_table_init(void) |
| { |
| unsigned int i; |
| struct sys_reg_desc clidr; |
| |
| /* Make sure tables are unique and in order. */ |
| BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); |
| BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); |
| BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); |
| BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); |
| BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); |
| BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); |
| BUG_ON(check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs))); |
| |
| /* We abuse the reset function to overwrite the table itself. */ |
| for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) |
| invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); |
| |
| /* |
| * CLIDR format is awkward, so clean it up. See ARM B4.1.20: |
| * |
| * If software reads the Cache Type fields from Ctype1 |
| * upwards, once it has seen a value of 0b000, no caches |
| * exist at further-out levels of the hierarchy. So, for |
| * example, if Ctype3 is the first Cache Type field with a |
| * value of 0b000, the values of Ctype4 to Ctype7 must be |
| * ignored. |
| */ |
| get_clidr_el1(NULL, &clidr); /* Ugly... */ |
| cache_levels = clidr.val; |
| for (i = 0; i < 7; i++) |
| if (((cache_levels >> (i*3)) & 7) == 0) |
| break; |
| /* Clear all higher bits. */ |
| cache_levels &= (1 << (i*3))-1; |
| } |
| |
| /** |
| * kvm_reset_sys_regs - sets system registers to reset value |
| * @vcpu: The VCPU pointer |
| * |
| * This function finds the right table above and sets the registers on the |
| * virtual CPU struct to their architecturally defined reset values. |
| */ |
| void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) |
| { |
| size_t num; |
| const struct sys_reg_desc *table; |
| |
| /* Catch someone adding a register without putting in reset entry. */ |
| memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); |
| |
| /* Generic chip reset first (so target could override). */ |
| reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); |
| |
| table = get_target_table(vcpu->arch.target, true, &num); |
| reset_sys_reg_descs(vcpu, table, num); |
| |
| for (num = 1; num < NR_SYS_REGS; num++) |
| if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) |
| panic("Didn't reset __vcpu_sys_reg(%zi)", num); |
| } |