KVM: arm64: Allow cache clean+invalidate of tagged memory
Signed-off-by: Marc Zyngier <maz@kernel.org>
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index ec3e690..f461152 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1090,9 +1090,12 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
-#define kvm_has_mte(kvm) \
+#define kvm_arch_has_mte(arch) \
(system_supports_mte() && \
- test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
+ test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(arch)->flags))
+
+#define kvm_has_mte(kvm) \
+ (system_supports_mte() && kvm_arch_has_mte(&(kvm)->arch))
#define kvm_supports_32bit_el0() \
(system_supports_32bit_el0() && \
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 27e63c1..818f662 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -195,6 +195,9 @@ struct kvm;
#define kvm_flush_dcache_to_poc(a,l) \
dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
+#define kvm_flush_dcache_tag_to_poc(a,l) \
+ dcache_tag_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
+
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
u64 cache_bits = SCTLR_ELx_M | SCTLR_ELx_C;
@@ -208,7 +211,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits;
}
-static inline void __clean_dcache_guest_page(void *va, size_t size)
+static inline void __clean_dcache_guest_page(void *va, size_t size, bool tagged)
{
/*
* With FWB, we ensure that the guest always accesses memory using
@@ -219,7 +222,10 @@ static inline void __clean_dcache_guest_page(void *va, size_t size)
if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
return;
- kvm_flush_dcache_to_poc(va, size);
+ if (tagged)
+ kvm_flush_dcache_tag_to_poc(va, size);
+ else
+ kvm_flush_dcache_to_poc(va, size);
}
static inline void __invalidate_icache_guest_page(void *va, size_t size)
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index dc3c072..cc78c62 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -116,7 +116,8 @@ static inline bool kvm_level_supports_block_mapping(u32 level)
* @virt_to_phys: Convert a virtual address mapped in the current
* context into a physical address.
* @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
- * for the specified memory address range.
+ * for the specified memory address range,
+ * specifying whether the memory is tagged or not.
* @icache_inval_pou: Invalidate the instruction cache to the PoU
* for the specified memory address range.
*/
@@ -130,7 +131,8 @@ struct kvm_pgtable_mm_ops {
int (*page_count)(void *addr);
void* (*phys_to_virt)(phys_addr_t phys);
phys_addr_t (*virt_to_phys)(void *addr);
- void (*dcache_clean_inval_poc)(void *addr, size_t size);
+ void (*dcache_clean_inval_poc)(void *addr, size_t size,
+ bool tagged);
void (*icache_inval_pou)(void *addr, size_t size);
};
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 2e9ec4a..e02c369 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -217,9 +217,9 @@ static void guest_s2_put_page(void *addr)
hyp_put_page(¤t_vm->pool, addr);
}
-static void clean_dcache_guest_page(void *va, size_t size)
+static void clean_dcache_guest_page(void *va, size_t size, bool tagged)
{
- __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
+ __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size, tagged);
hyp_fixmap_unmap();
}
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 5282cb9..56c16454 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -859,7 +859,8 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
/* Perform CMOs before installation of the guest stage-2 PTE */
if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
- granule);
+ granule,
+ kvm_arch_has_mte(pgt->mmu->arch));
if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
@@ -1039,7 +1040,8 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (need_flush && mm_ops->dcache_clean_inval_poc)
mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
- kvm_granule_size(ctx->level));
+ kvm_granule_size(ctx->level),
+ kvm_arch_has_mte(mmu->arch));
if (childp)
mm_ops->put_page(childp);
@@ -1212,7 +1214,8 @@ static int stage2_flush_walker(const struct kvm_pgtable_visit_ctx *ctx,
if (mm_ops->dcache_clean_inval_poc)
mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
- kvm_granule_size(ctx->level));
+ kvm_granule_size(ctx->level),
+ kvm_arch_has_mte(pgt->mmu->arch));
return 0;
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 3b9d4d24..afdfe59 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -182,9 +182,9 @@ static void *kvm_host_va(phys_addr_t phys)
return __va(phys);
}
-static void clean_dcache_guest_page(void *va, size_t size)
+static void clean_dcache_guest_page(void *va, size_t size, bool tagged)
{
- __clean_dcache_guest_page(va, size);
+ __clean_dcache_guest_page(va, size, tagged);
}
static void invalidate_icache_guest_page(void *va, size_t size)