From fb42dbc44b45250821aa17cdec55081cff44c6bf Mon Sep 17 00:00:00 2001 From: hjx_gitff <1435901016@qq.com> Date: Mon, 10 Mar 2025 08:47:32 -0400 Subject: [PATCH 1/4] improve vcpu check and cvm timer update --- arch/arm64/include/asm/kvm_tmm.h | 2 +- include/kvm/arm_arch_timer.h | 4 ++++ include/linux/kvm_host.h | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index e66d5e6a128f..ed6a8ef13932 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -54,7 +54,7 @@ struct cvm { struct cvm_tec { u64 tec; bool tec_created; - void *tec_run; + void *run; }; struct cvm_ttt_addr { diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 1a35cda7bc72..b527172e8916 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -174,4 +174,8 @@ static inline bool has_cntpoff(void) return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); } +#ifdef CONFIG_HISI_VIRTCCA_HOST +/* Needed for S-EL2 */ +void kvm_cvm_timers_update(struct kvm_vcpu *vcpu); +#endif #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 5caed9d00cad..a7ff2a856e6c 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -551,7 +551,7 @@ static __always_inline void guest_state_exit_irqoff(void) static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu) { - return false; + return (vcpu->arch.tec.run != NULL); } #endif -- Gitee From 368bdf0e4ba6d148b75b76e0ace45432637be95d Mon Sep 17 00:00:00 2001 From: hjx_gitff <1435901016@qq.com> Date: Mon, 10 Mar 2025 08:48:06 -0400 Subject: [PATCH 2/4] revert virtcca base files --- arch/arm64/include/asm/kvm_tmi.h | 414 ---------- arch/arm64/kvm/tmi.c | 171 ----- arch/arm64/kvm/virtcca_cvm.c | 1237 ------------------------------ 3 files changed, 1822 deletions(-) delete mode 100644 arch/arm64/include/asm/kvm_tmi.h delete mode 100644 arch/arm64/kvm/tmi.c delete mode 100644 arch/arm64/kvm/virtcca_cvm.c diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h deleted file mode 100644 index cab2b4cdd0f2..000000000000 --- a/arch/arm64/include/asm/kvm_tmi.h +++ /dev/null @@ -1,414 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#ifndef __TMM_TMI_H -#define __TMM_TMI_H -#ifdef CONFIG_HISI_VIRTCCA_HOST -#include -#include -#include -#include -#include - -#define NO_NUMA 0 /* numa bitmap */ - -#define TMM_TTT_LEVEL_2 2 -#define TMM_TTT_LEVEL_3 3 - -/* TMI error codes. */ -#define TMI_SUCCESS 0 -#define TMI_ERROR_INPUT 1 -#define TMI_ERROR_MEMORY 2 -#define TMI_ERROR_ALIAS 3 -#define TMI_ERROR_IN_USE 4 -#define TMI_ERROR_CVM_STATE 5 -#define TMI_ERROR_OWNER 6 -#define TMI_ERROR_TEC 7 -#define TMI_ERROR_TTT_WALK 8 -#define TMI_ERROR_TTT_ENTRY 9 -#define TMI_ERROR_NOT_SUPPORTED 10 -#define TMI_ERROR_INTERNAL 11 -#define TMI_ERROR_CVM_POWEROFF 12 -#define TMI_ERROR_TTT_CREATED 13 - -#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF) -#define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF) - -#define TMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0) -#define TMI_FEATURE_REGISTER_0_LPA2 BIT(8) -#define TMI_FEATURE_REGISTER_0_SVE_EN BIT(9) -#define TMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10) -#define TMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(17, 14) -#define TMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(21, 18) -#define TMI_FEATURE_REGISTER_0_PMU_EN BIT(22) -#define TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(27, 23) -#define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28) -#define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29) - -#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0) -#define TMI_CVM_PARAM_FLAG_SVE BIT(1) -#define TMI_CVM_PARAM_FLAG_PMU BIT(2) - -#define TMI_NOT_RUNNABLE 0 -#define TMI_RUNNABLE 1 - -/* - * The number of GPRs (starting from X0) that are - * configured by the host when a TEC is created. - */ -#define TEC_CREATE_NR_GPRS (8U) - -struct tmi_tec_params { - uint64_t gprs[TEC_CREATE_NR_GPRS]; - uint64_t pc; - uint64_t flags; - uint64_t ram_size; -}; - -struct tmi_smmu_ste_params { - uint64_t ns_src; /* non-secure STE source address */ - uint64_t sid; /* stream id */ - uint64_t smmu_id; /* smmu id */ -}; - -struct tmi_smmu_cfg_params { - uint64_t smmu_id; /* smmu id */ - uint64_t ioaddr; /* smmu base address */ - uint8_t strtab_base_RA_bit : 1; /* Read-Allocate hint */ - uint8_t q_base_RA_WA_bit : 1; /* Write-Allocate hint*/ - uint8_t is_cmd_queue : 1; /* Whether to configure command queue */ -}; - -#define TMI_SMMU_CMD_QUEUE 1 -#define TMI_SMMU_EVT_QUEUE 2 -struct tmi_smmu_queue_params { - uint64_t smmu_base_addr; /* smmu base address */ - uint64_t size; /* queue size */ - uint64_t smmu_id; /* smmu id */ - uint64_t type; /* cmdq or evtq */ -}; - -#define MAX_DEV_PER_PORT 256 -struct tmi_dev_delegate_params { - /* BDF of PCIe root bus, F=0. BD are used to calculate APB base and port number. */ - uint16_t root_bd; - uint16_t num_dev; /* number of attachable devices */ - uint32_t _reserved; /* padding for 64-bit alignment */ - uint16_t devs[MAX_DEV_PER_PORT]; /* BDF of each attachable device */ -}; - -#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U) -#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U) -#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U) -#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U) - -#define TMI_EXIT_SYNC 0 -#define TMI_EXIT_IRQ 1 -#define TMI_EXIT_FIQ 2 -#define TMI_EXIT_PSCI 3 -#define TMI_EXIT_HOST_CALL 5 -#define TMI_EXIT_SERROR 6 - -/* - * The number of GPRs (starting from X0) per voluntary exit context. - * Per SMCCC. - */ - #define TEC_EXIT_NR_GPRS (31U) - -/* Maximum number of Interrupt Controller List Registers. */ -#define TEC_GIC_NUM_LRS (16U) - -struct tmi_tec_entry { - uint64_t flags; - uint64_t gprs[TEC_EXIT_NR_GPRS]; - uint64_t gicv3_lrs[TEC_GIC_NUM_LRS]; - uint64_t gicv3_hcr; -}; - -struct tmi_tec_exit { - uint64_t exit_reason; - uint64_t esr; - uint64_t far; - uint64_t hpfar; - uint64_t gprs[TEC_EXIT_NR_GPRS]; - uint64_t gicv3_hcr; - uint64_t gicv3_lrs[TEC_GIC_NUM_LRS]; - uint64_t gicv3_misr; - uint64_t gicv3_vmcr; - uint64_t cntv_ctl; - uint64_t cntv_cval; - uint64_t cntp_ctl; - uint64_t cntp_cval; - uint64_t imm; - uint64_t pmu_ovf_status; -}; - -struct tmi_tec_run { - struct tmi_tec_entry tec_entry; - struct tmi_tec_exit tec_exit; -}; - -#define TMI_FNUM_MIN_VALUE U(0x150) -#define TMI_FNUM_MAX_VALUE U(0x18F) - -/****************************************************************************** - * Bit definitions inside the function id as per the SMC calling convention - ******************************************************************************/ -#define FUNCID_TYPE_SHIFT 31 -#define FUNCID_CC_SHIFT 30 -#define FUNCID_OEN_SHIFT 24 -#define FUNCID_NUM_SHIFT 0 - -#define FUNCID_TYPE_MASK 0x1 -#define FUNCID_CC_MASK 0x1 -#define FUNCID_OEN_MASK 0x3f -#define FUNCID_NUM_MASK 0xffff - -#define FUNCID_TYPE_WIDTH 1 -#define FUNCID_CC_WIDTH 1 -#define FUNCID_OEN_WIDTH 6 -#define FUNCID_NUM_WIDTH 16 - -#define SMC_64 1 -#define SMC_32 0 -#define SMC_TYPE_FAST 1 -#define SMC_TYPE_STD 0 - -/***************************************************************************** - * Owning entity number definitions inside the function id as per the SMC - * calling convention - *****************************************************************************/ -#define OEN_ARM_START 0 -#define OEN_ARM_END 0 -#define OEN_CPU_START 1 -#define OEN_CPU_END 1 -#define OEN_SIP_START 2 -#define OEN_SIP_END 2 -#define OEN_OEM_START 3 -#define OEN_OEM_END 3 -#define OEN_STD_START 4 /* Standard Calls */ -#define OEN_STD_END 4 -#define OEN_TAP_START 48 /* Trusted Applications */ -#define OEN_TAP_END 49 -#define OEN_TOS_START 50 /* Trusted OS */ -#define OEN_TOS_END 63 -#define OEN_LIMIT 64 - -/* Get TMI fastcall std FID from function number */ -#define TMI_FID(smc_cc, func_num) \ - ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \ - ((smc_cc) << FUNCID_CC_SHIFT) | \ - (OEN_STD_START << FUNCID_OEN_SHIFT) | \ - ((func_num) << FUNCID_NUM_SHIFT)) - -#define U(_x) (_x##U) - -#define TMI_NO_MEASURE_CONTENT U(0) -#define TMI_MEASURE_CONTENT U(1) - -#define CVM_IPA_MAX_VAL (1UL << 48) - -/* - * SMC_TMM_INIT_COMPLETE is the only function in the TMI that originates from - * the CVM world and is handled by the SPMD. The remaining functions are - * always invoked by the Normal world, forward by SPMD and handled by the - * TMM. - */ -#define TMI_FNUM_VERSION_REQ U(0x260) -#define TMI_FNUM_MEM_INFO_SHOW U(0x261) -#define TMI_FNUM_DATA_CREATE U(0x262) -#define TMI_FNUM_DATA_DESTROY U(0x263) -#define TMI_FNUM_CVM_ACTIVATE U(0x264) -#define TMI_FNUM_CVM_CREATE U(0x265) -#define TMI_FNUM_CVM_DESTROY U(0x266) -#define TMI_FNUM_TEC_CREATE U(0x267) -#define TMI_FNUM_TEC_DESTROY U(0x268) -#define TMI_FNUM_TEC_ENTER U(0x269) -#define TMI_FNUM_TTT_CREATE U(0x26A) -#define TMI_FNUM_PSCI_COMPLETE U(0x26B) -#define TMI_FNUM_FEATURES U(0x26C) -#define TMI_FNUM_TTT_MAP_RANGE U(0x26D) -#define TMI_FNUM_TTT_UNMAP_RANGE U(0x26E) -#define TMI_FNUM_INF_TEST U(0x270) - -#define TMI_FNUM_SMMU_QUEUE_CREATE U(0x277) -#define TMI_FNUM_SMMU_QUEUE_WRITE U(0x278) -#define TMI_FNUM_SMMU_STE_CREATE U(0x279) -#define TMI_FNUM_MMIO_MAP U(0x27A) -#define TMI_FNUM_MMIO_UNMAP U(0x27B) -#define TMI_FNUM_MMIO_WRITE U(0x27C) -#define TMI_FNUM_MMIO_READ U(0x27D) -#define TMI_FNUM_DEV_DELEGATE U(0x27E) -#define TMI_FNUM_DEV_ATTACH U(0x27F) -#define TMI_FNUM_HANDLE_S_EVTQ U(0x280) -#define TMI_FNUM_SMMU_DEVICE_RESET U(0x281) -#define TMI_FNUM_SMMU_WRITE U(0x282) -#define TMI_FNUM_SMMU_READ U(0x283) -#define TMI_FNUM_SMMU_PCIE_CORE_CHECK U(0x284) -#define TMI_FNUM_DEV_TTT_CREATE U(0x285) - -/* TMI SMC64 PIDs handled by the SPMD */ -#define TMI_TMM_VERSION_REQ TMI_FID(SMC_64, TMI_FNUM_VERSION_REQ) -#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE) -#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY) -#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE) -#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE) -#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY) -#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE) -#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY) -#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER) -#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE) -#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE) -#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES) -#define TMI_TMM_MEM_INFO_SHOW TMI_FID(SMC_64, TMI_FNUM_MEM_INFO_SHOW) -#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE) -#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE) -#define TMI_TMM_INF_TEST TMI_FID(SMC_64, TMI_FNUM_INF_TEST) - -#define TMI_TMM_SMMU_QUEUE_CREATE TMI_FID(SMC_64, TMI_FNUM_SMMU_QUEUE_CREATE) -#define TMI_TMM_SMMU_QUEUE_WRITE TMI_FID(SMC_64, TMI_FNUM_SMMU_QUEUE_WRITE) -#define TMI_TMM_SMMU_STE_CREATE TMI_FID(SMC_64, TMI_FNUM_SMMU_STE_CREATE) -#define TMI_TMM_MMIO_MAP TMI_FID(SMC_64, TMI_FNUM_MMIO_MAP) -#define TMI_TMM_MMIO_UNMAP TMI_FID(SMC_64, TMI_FNUM_MMIO_UNMAP) -#define TMI_TMM_MMIO_WRITE TMI_FID(SMC_64, TMI_FNUM_MMIO_WRITE) -#define TMI_TMM_MMIO_READ TMI_FID(SMC_64, TMI_FNUM_MMIO_READ) -#define TMI_TMM_DEV_DELEGATE TMI_FID(SMC_64, TMI_FNUM_DEV_DELEGATE) -#define TMI_TMM_DEV_ATTACH TMI_FID(SMC_64, TMI_FNUM_DEV_ATTACH) -#define TMI_TMM_HANDLE_S_EVTQ TMI_FID(SMC_64, TMI_FNUM_HANDLE_S_EVTQ) -#define TMI_TMM_SMMU_DEVICE_RESET TMI_FID(SMC_64, TMI_FNUM_SMMU_DEVICE_RESET) -#define TMI_TMM_SMMU_WRITE TMI_FID(SMC_64, TMI_FNUM_SMMU_WRITE) -#define TMI_TMM_SMMU_READ TMI_FID(SMC_64, TMI_FNUM_SMMU_READ) -#define TMI_TMM_SMMU_PCIE_CORE_CHECK TMI_FID(SMC_64, TMI_FNUM_SMMU_PCIE_CORE_CHECK) -#define TMI_TMM_DEV_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_DEV_TTT_CREATE) - -#define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16) -#define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF) - -#define TMI_ABI_VERSION_MAJOR U(0x2) - -/* KVM_CAP_ARM_TMM on VM fd */ -#define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0 -#define KVM_CAP_ARM_TMM_CREATE_RD 1 -#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 -#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3 - -#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 -#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 - -#define KVM_CAP_ARM_TMM_RPV_SIZE 64 - -/* List of configuration items accepted for KVM_CAP_ARM_TMM_CONFIG_CVM_HOST */ -#define KVM_CAP_ARM_TMM_CFG_RPV 0 -#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1 -#define KVM_CAP_ARM_TMM_CFG_SVE 2 -#define KVM_CAP_ARM_TMM_CFG_DBG 3 -#define KVM_CAP_ARM_TMM_CFG_PMU 4 - -DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_available); - -struct kvm_cap_arm_tmm_config_item { - __u32 cfg; - union { - /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */ - struct { - __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE]; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */ - struct { - __u32 hash_algo; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */ - struct { - __u32 sve_vq; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */ - struct { - __u32 num_brps; - __u32 num_wrps; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */ - struct { - __u32 num_pmu_cntrs; - }; - /* Fix the size of the union */ - __u8 reserved[256]; - }; -}; - -#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) -struct kvm_cap_arm_tmm_populate_region_args { - __u64 populate_ipa_base1; - __u64 populate_ipa_size1; - __u64 populate_ipa_base2; - __u64 populate_ipa_size2; - __u32 flags; - __u32 reserved[3]; -}; - -static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level) -{ - uint64_t mask = (1 << (12 + 9 * (3 - level))) - 1; - - return (addr & mask) == 0; -} - -#define ID_AA64PFR0_SEL2_MASK ULL(0xf) -#define ID_AA64PFR0_SEL2_SHIFT 36 - -static inline bool is_armv8_4_sel2_present(void) -{ - return ((read_sysreg(id_aa64pfr0_el1) >> ID_AA64PFR0_SEL2_SHIFT) & - ID_AA64PFR0_SEL2_MASK) == 1UL; -} - -u64 tmi_version(void); -u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level); -u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level); -u64 tmi_cvm_activate(u64 rd); -u64 tmi_cvm_create(u64 params_ptr, u64 numa_set); -u64 tmi_cvm_destroy(u64 rd); -u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr); -u64 tmi_tec_destroy(u64 tec); -u64 tmi_tec_enter(u64 tec, u64 run_ptr); -u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level); -u64 tmi_psci_complete(u64 calling_tec, u64 target_tec); -u64 tmi_features(u64 index); -u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node); -u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id); -u64 tmi_mem_info_show(u64 mem_info_addr); - -u64 tmi_dev_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level); -u64 tmi_smmu_queue_create(u64 params_ptr); -u64 tmi_smmu_queue_write(uint64_t cmd0, uint64_t cmd1, u64 smmu_id); -u64 tmi_smmu_ste_create(u64 params_ptr); -u64 tmi_mmio_map(u64 rd, u64 map_addr, u64 level, u64 ttte); -u64 tmi_mmio_unmap(u64 rd, u64 map_addr, u64 level); -u64 tmi_mmio_write(u64 addr, u64 val, u64 bits, u64 dev_num); -u64 tmi_mmio_read(u64 addr, u64 bits, u64 dev_num); -u64 tmi_dev_delegate(u64 params); -u64 tmi_dev_attach(u64 vdev, u64 rd, u64 smmu_id); -u64 tmi_handle_s_evtq(u64 smmu_id); -u64 tmi_smmu_device_reset(u64 params); -u64 tmi_smmu_pcie_core_check(u64 smmu_base); -u64 tmi_smmu_write(u64 smmu_base, u64 reg_offset, u64 val, u64 bits); -u64 tmi_smmu_read(u64 smmu_base, u64 reg_offset, u64 bits); - -u64 mmio_va_to_pa(void *addr); -void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu); -int kvm_load_user_data(struct kvm *kvm, unsigned long arg); -unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu, - unsigned long target_affinity, unsigned long lowest_affinity_level); -int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu, - bool serror_pending, bool ext_dabt_pending); -int kvm_init_cvm_vm(struct kvm *kvm); -int kvm_enable_virtcca_cvm(struct kvm *kvm); -int kvm_cvm_map_ipa(struct kvm *kvm, phys_addr_t ipa, kvm_pfn_t pfn, - unsigned long map_size, enum kvm_pgtable_prot prot, int ret); -void virtcca_cvm_set_secure_flag(void *vdev, void *info); -#endif -#endif diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c deleted file mode 100644 index 43595e9373e2..000000000000 --- a/arch/arm64/kvm/tmi.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#include -#include -#include - -/** - * mmio_va_to_pa - To convert the virtual address of the mmio space - * to a physical address, it is necessary to implement this interface - * because the kernel insterface __pa has an error when converting the - * physical address of the virtual address of the mmio space - * @addr: MMIO virtual address - */ -u64 mmio_va_to_pa(void *addr) -{ - uint64_t pa, par_el1; - - asm volatile( - "AT S1E1W, %0\n" - ::"r"((uint64_t)(addr)) - ); - isb(); - asm volatile( - "mrs %0, par_el1\n" - : "=r"(par_el1) - ); - - pa = ((uint64_t)(addr) & (PAGE_SIZE - 1)) | - (par_el1 & ULL(0x000ffffffffff000)); - - if (par_el1 & UL(1 << 0)) - return (uint64_t)(addr); - else - return pa; -} -EXPORT_SYMBOL(mmio_va_to_pa); - -u64 tmi_version(void) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_VERSION_REQ, &res); - return res.a1; -} - -u64 tmi_data_create(u64 numa_set, u64 rd, u64 map_addr, u64 src, u64 level) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_DATA_CREATE, numa_set, rd, map_addr, src, level, &res); - return res.a1; -} - -u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_DATA_DESTROY, rd, map_addr, level, &res); - return res.a1; -} - -u64 tmi_cvm_activate(u64 rd) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_CVM_ACTIVATE, rd, &res); - return res.a1; -} - -u64 tmi_cvm_create(u64 params_ptr, u64 numa_set) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, params_ptr, numa_set, &res); - return res.a1; -} - -u64 tmi_cvm_destroy(u64 rd) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_CVM_DESTROY, rd, &res); - return res.a1; -} - -u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TEC_CREATE, numa_set, rd, mpidr, params_ptr, &res); - return res.a1; -} - -u64 tmi_tec_destroy(u64 tec) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TEC_DESTROY, tec, &res); - return res.a1; -} - -u64 tmi_tec_enter(u64 tec, u64 run_ptr) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TEC_ENTER, tec, run_ptr, &res); - return res.a1; -} - -u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TTT_CREATE, numa_set, rd, map_addr, level, &res); - return res.a1; -} - -u64 tmi_psci_complete(u64 calling_tec, u64 target_tec) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_PSCI_COMPLETE, calling_tec, target_tec, &res); - return res.a1; -} - -u64 tmi_features(u64 index) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_FEATURES, index, &res); - return res.a1; -} - -u64 tmi_mem_info_show(u64 mem_info_addr) -{ - struct arm_smccc_res res; - u64 pa_addr = __pa(mem_info_addr); - - arm_smccc_1_1_smc(TMI_TMM_MEM_INFO_SHOW, pa_addr, &res); - return res.a1; -} -EXPORT_SYMBOL_GPL(tmi_mem_info_show); - -u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_RANGE, rd, map_addr, size, cur_node, target_node, &res); - return res.a1; -} - -u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_RANGE, rd, map_addr, size, node_id, &res); - return res.a1; -} - -u64 tmi_tmm_inf_test(u64 x1, u64 x2, u64 x3, u64 x4, u64 x5) -{ - struct arm_smccc_res res; - u64 vttbr_el2_pa = __pa(x2); - u64 cvm_params_pa = __pa(x3); - u64 tec_params_pa = __pa(x4); - - arm_smccc_1_1_smc(TMI_TMM_INF_TEST, x1, vttbr_el2_pa, cvm_params_pa, tec_params_pa, x5, &res); - return res.a1; -} diff --git a/arch/arm64/kvm/virtcca_cvm.c b/arch/arm64/kvm/virtcca_cvm.c deleted file mode 100644 index cfea9cfe70a6..000000000000 --- a/arch/arm64/kvm/virtcca_cvm.c +++ /dev/null @@ -1,1237 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* Protects access to cvm_vmid_bitmap */ -static DEFINE_SPINLOCK(cvm_vmid_lock); -static unsigned long *cvm_vmid_bitmap; -DEFINE_STATIC_KEY_FALSE(virtcca_cvm_is_available); -#define SIMD_PAGE_SIZE 0x3000 - -int kvm_enable_virtcca_cvm(struct kvm *kvm) -{ - if (!static_key_enabled(&virtcca_cvm_is_available)) - return -EFAULT; - - kvm->arch.is_virtcca_cvm = true; - return 0; -} - -static int cvm_vmid_init(void) -{ - unsigned int vmid_count = 1 << kvm_get_vmid_bits(); - - cvm_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL); - if (!cvm_vmid_bitmap) { - kvm_err("%s: Couldn't allocate cvm vmid bitmap\n", __func__); - return -ENOMEM; - } - return 0; -} - -static unsigned long tmm_feat_reg0; - -static bool tmm_supports(unsigned long feature) -{ - return !!u64_get_bits(tmm_feat_reg0, feature); -} - -bool kvm_cvm_supports_sve(void) -{ - return tmm_supports(TMI_FEATURE_REGISTER_0_SVE_EN); -} - -bool kvm_cvm_supports_pmu(void) -{ - return tmm_supports(TMI_FEATURE_REGISTER_0_PMU_EN); -} - -u32 kvm_cvm_ipa_limit(void) -{ - return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_S2SZ); -} - -u32 kvm_cvm_get_num_brps(void) -{ - return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_BPS); -} - -u32 kvm_cvm_get_num_wrps(void) -{ - return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_WPS); -} - -static int cvm_vmid_reserve(void) -{ - int ret; - unsigned int vmid_count = 1 << kvm_get_vmid_bits(); - - spin_lock(&cvm_vmid_lock); - ret = bitmap_find_free_region(cvm_vmid_bitmap, vmid_count, 0); - spin_unlock(&cvm_vmid_lock); - - return ret; -} - -static void cvm_vmid_release(unsigned int vmid) -{ - spin_lock(&cvm_vmid_lock); - bitmap_release_region(cvm_vmid_bitmap, vmid, 0); - spin_unlock(&cvm_vmid_lock); -} - -static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) -{ - u64 shift = ARM64_HW_PGTABLE_LEVEL_SHIFT(pgt->start_level - 1); - u64 mask = BIT(pgt->ia_bits) - 1; - - return (addr & mask) >> shift; -} - -static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) -{ - struct kvm_pgtable pgt = { - .ia_bits = ia_bits, - .start_level = start_level, - }; - return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; -} - -/* - * the configurable physical numa range in QEMU is 0-127, - * but in real scenarios, 0-63 is sufficient. - */ -static u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm) -{ - int64_t i; - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info = &cvm->numa_info; - - for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { - if (test_bit(vcpu, (unsigned long *)numa_info->numa_nodes[i].cpu_id)) - return numa_info->numa_nodes[i].host_numa_nodes[0]; - } - return NO_NUMA; -} - -static u64 kvm_get_first_binded_numa_set(struct kvm *kvm) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info = &cvm->numa_info; - - if (numa_info->numa_cnt > 0) - return numa_info->numa_nodes[0].host_numa_nodes[0]; - return NO_NUMA; -} - -int kvm_arm_create_cvm(struct kvm *kvm) -{ - int ret; - struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; - unsigned int pgd_sz; - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - /* get affine host numa set by default vcpu 0 */ - u64 numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); - - if (!kvm_is_virtcca_cvm(kvm) || virtcca_cvm_state(kvm) != CVM_STATE_NONE) - return 0; - - if (!cvm->params) { - ret = -EFAULT; - goto out; - } - - ret = cvm_vmid_reserve(); - if (ret < 0) - goto out; - - cvm->cvm_vmid = ret; - - pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level); - - cvm->params->ttt_level_start = kvm->arch.mmu.pgt->start_level; - cvm->params->ttt_num_start = pgd_sz; - cvm->params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr); - cvm->params->vmid = cvm->cvm_vmid; - cvm->params->ns_vtcr = kvm->arch.vtcr; - cvm->params->vttbr_el2 = kvm->arch.mmu.pgd_phys; - memcpy(cvm->params->rpv, &cvm->cvm_vmid, sizeof(cvm->cvm_vmid)); - cvm->rd = tmi_cvm_create(__pa(cvm->params), numa_set); - if (!cvm->rd) { - kvm_err("KVM creates cVM failed: %d\n", cvm->cvm_vmid); - ret = -ENOMEM; - goto out; - } - - WRITE_ONCE(cvm->state, CVM_STATE_NEW); - ret = 0; -out: - kfree(cvm->params); - cvm->params = NULL; - if (ret < 0) { - kfree(cvm); - kvm->arch.virtcca_cvm = NULL; - } - return ret; -} - -void kvm_destroy_cvm(struct kvm *kvm) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - uint32_t cvm_vmid; -#ifdef CONFIG_HISI_VIRTCCA_CODA - struct arm_smmu_domain *arm_smmu_domain; - struct list_head smmu_domain_group_list; -#endif - - if (!cvm) - return; - -#ifdef CONFIG_HISI_VIRTCCA_CODA - /* Unmap the cvm with arm smmu domain */ - kvm_get_arm_smmu_domain(kvm, &smmu_domain_group_list); - list_for_each_entry(arm_smmu_domain, &smmu_domain_group_list, node) { - if (arm_smmu_domain->kvm && arm_smmu_domain->kvm == kvm) - arm_smmu_domain->kvm = NULL; - } -#endif - - cvm_vmid = cvm->cvm_vmid; - kfree(cvm->params); - cvm->params = NULL; - - if (virtcca_cvm_state(kvm) == CVM_STATE_NONE) - return; - - cvm_vmid_release(cvm_vmid); - - WRITE_ONCE(cvm->state, CVM_STATE_DYING); - - if (!tmi_cvm_destroy(cvm->rd)) - kvm_info("KVM has destroyed cVM: %d\n", cvm->cvm_vmid); - - cvm->is_mapped = false; - kfree(cvm); - kvm->arch.virtcca_cvm = NULL; -} - -static int kvm_cvm_ttt_create(struct virtcca_cvm *cvm, - unsigned long addr, - int level, - u64 numa_set) -{ - addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1)); - return tmi_ttt_create(numa_set, cvm->rd, addr, level); -} - -int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct virtcca_cvm *cvm, - unsigned long ipa, - int level, - int max_level, - struct kvm_mmu_memory_cache *mc) -{ - int ret = 0; - if (WARN_ON(level == max_level)) - return 0; - - while (level++ < max_level) { - u64 numa_set = kvm_get_first_binded_numa_set(kvm); - - ret = kvm_cvm_ttt_create(cvm, ipa, level, numa_set); - if (ret) - return -ENXIO; - } - - return 0; -} - -static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct virtcca_cvm *cvm, - unsigned long ipa, int level, struct page *src_page, u64 numa_set) -{ - phys_addr_t src_phys = 0; - int ret; - - if (src_page) - src_phys = page_to_phys(src_page); - ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level); - - if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { - /* Create missing RTTs and retry */ - int level_fault = TMI_RETURN_INDEX(ret); - - ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, level_fault, - level, NULL); - if (ret) - goto err; - ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level); - } - if (ret) - goto err; - - return 0; - -err: - kvm_err("Cvm create protected data page fail:%d\n", ret); - return ret; -} - -static u64 cvm_granule_size(u32 level) -{ - return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); -} - -static bool is_data_create_region(phys_addr_t ipa_base, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - if ((ipa_base >= args->populate_ipa_base1 && - ipa_base < args->populate_ipa_base1 + args->populate_ipa_size1) || - (ipa_base >= args->populate_ipa_base2 && - ipa_base < args->populate_ipa_base2 + args->populate_ipa_size2)) - return true; - return false; -} - -int kvm_cvm_populate_par_region(struct kvm *kvm, u64 numa_set, - phys_addr_t ipa_base, phys_addr_t ipa_end, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_memory_slot *memslot; - gfn_t base_gfn, end_gfn; - int idx; - phys_addr_t ipa; - int ret = 0; - int level = TMM_TTT_LEVEL_3; - unsigned long map_size = cvm_granule_size(level); - - base_gfn = gpa_to_gfn(ipa_base); - end_gfn = gpa_to_gfn(ipa_end); - - idx = srcu_read_lock(&kvm->srcu); - memslot = gfn_to_memslot(kvm, base_gfn); - if (!memslot) { - ret = -EFAULT; - goto out; - } - - /* We require the region to be contained within a single memslot */ - if (memslot->base_gfn + memslot->npages < end_gfn) { - ret = -EINVAL; - goto out; - } - - mmap_read_lock(current->mm); - - ipa = ipa_base; - while (ipa < ipa_end) { - struct page *page = NULL; - kvm_pfn_t pfn = 0; - - /* - * FIXME: This causes over mapping, but there's no good - * solution here with the ABI as it stands - */ - ipa = ALIGN_DOWN(ipa, map_size); - - if (is_data_create_region(ipa, args)) { - pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa)); - if (is_error_pfn(pfn)) { - ret = -EFAULT; - break; - } - - page = pfn_to_page(pfn); - } - - ret = kvm_cvm_create_protected_data_page(kvm, cvm, ipa, level, page, numa_set); - if (ret) - goto err_release_pfn; - - ipa += map_size; - if (pfn) - kvm_release_pfn_dirty(pfn); -err_release_pfn: - if (ret) { - if (pfn) - kvm_release_pfn_clean(pfn); - break; - } - } - - mmap_read_unlock(current->mm); -out: - srcu_read_unlock(&kvm->srcu, idx); - return ret; -} - -int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu) -{ - int ret = 0; - int i; - u64 numa_set; - struct tmi_tec_params *params_ptr = NULL; - struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu); - u64 mpidr = kvm_vcpu_get_mpidr_aff(vcpu); - struct virtcca_cvm *cvm = vcpu->kvm->arch.virtcca_cvm; - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - - mutex_lock(&vcpu->kvm->lock); - tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!tec->tec_run) { - ret = -ENOMEM; - goto tec_free; - } - params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!params_ptr) { - ret = -ENOMEM; - goto tec_free; - } - - for (i = 0; i < TEC_CREATE_NR_GPRS; ++i) - params_ptr->gprs[i] = vcpu_regs->regs[i]; - - params_ptr->pc = vcpu_regs->pc; - - if (vcpu->vcpu_id == 0) - params_ptr->flags = TMI_RUNNABLE; - else - params_ptr->flags = TMI_NOT_RUNNABLE; - params_ptr->ram_size = cvm->ram_size; - numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); - tec->tec = tmi_tec_create(numa_set, cvm->rd, mpidr, __pa(params_ptr)); - - tec->tec_created = true; - kfree(params_ptr); - mutex_unlock(&vcpu->kvm->lock); - return ret; - -tec_free: - kfree(tec->tec_run); - kfree(params_ptr); - mutex_unlock(&vcpu->kvm->lock); - return ret; -} - -static int config_cvm_hash_algo(struct tmi_cvm_params *params, - struct kvm_cap_arm_tmm_config_item *cfg) -{ - switch (cfg->hash_algo) { - case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256: - if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_256)) - return -EINVAL; - break; - case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512: - if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_512)) - return -EINVAL; - break; - default: - return -EINVAL; - } - params->measurement_algo = cfg->hash_algo; - return 0; -} - -static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct tmi_cvm_params *params; - int max_sve_vq; - - params = cvm->params; - max_sve_vq = u64_get_bits(tmm_feat_reg0, - TMI_FEATURE_REGISTER_0_SVE_VL); - - if (!kvm_cvm_supports_sve()) - return -EINVAL; - - if (cfg->sve_vq > max_sve_vq) - return -EINVAL; - - params->sve_vl = cfg->sve_vq; - params->flags |= TMI_CVM_PARAM_FLAG_SVE; - - return 0; -} - -static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct tmi_cvm_params *params; - int max_pmu_num_ctrs; - - params = cvm->params; - max_pmu_num_ctrs = u64_get_bits(tmm_feat_reg0, - TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS); - - if (!kvm_cvm_supports_pmu()) - return -EINVAL; - - if (cfg->num_pmu_cntrs > max_pmu_num_ctrs) - return -EINVAL; - - params->pmu_num_cnts = cfg->num_pmu_cntrs; - params->flags |= TMI_CVM_PARAM_FLAG_PMU; - - return 0; -} - -static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_cap_arm_tmm_config_item cfg; - int r = 0; - - if (virtcca_cvm_state(kvm) != CVM_STATE_NONE) - return -EBUSY; - - if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg))) - return -EFAULT; - - switch (cfg.cfg) { - case KVM_CAP_ARM_TMM_CFG_SVE: - r = config_cvm_sve(kvm, &cfg); - break; - case KVM_CAP_ARM_TMM_CFG_PMU: - r = config_cvm_pmu(kvm, &cfg); - break; - case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: - r = config_cvm_hash_algo(cvm->params, &cfg); - break; - default: - r = -EINVAL; - } - - return r; -} - -int kvm_cvm_map_range(struct kvm *kvm) -{ - int ret; - u64 curr_numa_set; - int idx; - u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info = &cvm->numa_info; - gpa_t gpa; - - curr_numa_set = kvm_get_first_binded_numa_set(kvm); - gpa = round_up(cvm->dtb_end, l2_granule); - for (idx = 0; idx < numa_info->numa_cnt; idx++) { - struct kvm_numa_node *numa_node = &numa_info->numa_nodes[idx]; - - if (idx) - gpa = numa_node->ipa_start; - if (gpa >= numa_node->ipa_start && - gpa < numa_node->ipa_start + numa_node->ipa_size) { - ret = tmi_ttt_map_range(cvm->rd, gpa, - numa_node->ipa_size - gpa + numa_node->ipa_start, - curr_numa_set, numa_node->host_numa_nodes[0]); - if (ret) { - kvm_err("tmi_ttt_map_range failed: %d.\n", ret); - return ret; - } - } - } - /* Vfio driver will pin memory in advance, - * if the ram already mapped, activate cvm - * does not need to map twice - */ - cvm->is_mapped = true; - return ret; -} - -static int kvm_activate_cvm(struct kvm *kvm) -{ -#ifdef CONFIG_HISI_VIRTCCA_CODA - int ret; - struct arm_smmu_domain *arm_smmu_domain; - struct list_head smmu_domain_group_list; -#endif - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - - if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - - if (!cvm->is_mapped && kvm_cvm_map_range(kvm)) - return -EFAULT; - -#ifdef CONFIG_HISI_VIRTCCA_CODA - kvm_get_arm_smmu_domain(kvm, &smmu_domain_group_list); - list_for_each_entry(arm_smmu_domain, &smmu_domain_group_list, node) { - if (arm_smmu_domain) { - ret = virtcca_tmi_dev_attach(arm_smmu_domain, kvm); - if (ret) - return ret; - } - } -#endif - - if (tmi_cvm_activate(cvm->rd)) { - kvm_err("tmi_cvm_activate failed!\n"); - return -ENXIO; - } - - WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE); - kvm_info("cVM%d is activated!\n", cvm->cvm_vmid); - return 0; -} - -static int kvm_populate_ram_region(struct kvm *kvm, u64 map_size, - phys_addr_t ipa_base, phys_addr_t ipa_end, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - phys_addr_t gpa; - u64 numa_set = kvm_get_first_binded_numa_set(kvm); - - for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { - if (kvm_cvm_populate_par_region(kvm, numa_set, gpa, gpa + map_size, args)) { - kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT); - return -EFAULT; - } - } - return 0; -} - -static int kvm_populate_ipa_cvm_range(struct kvm *kvm, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - phys_addr_t ipa_base1, ipa_end2; - - if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_size1, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_base2, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_size2, PAGE_SIZE)) - return -EINVAL; - - if (args->populate_ipa_base1 < cvm->loader_start || - args->populate_ipa_base2 < args->populate_ipa_base1 + args->populate_ipa_size1 || - cvm->dtb_end < args->populate_ipa_base2 + args->populate_ipa_size2) - return -EINVAL; - - if (args->flags & ~TMI_MEASURE_CONTENT) - return -EINVAL; - ipa_base1 = round_down(args->populate_ipa_base1, l2_granule); - ipa_end2 = round_up(args->populate_ipa_base2 + args->populate_ipa_size2, l2_granule); - - return kvm_populate_ram_region(kvm, l2_granule, ipa_base1, ipa_end2, args); -} - -int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) -{ - int r = 0; - - mutex_lock(&kvm->lock); - switch (cap->args[0]) { - case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST: - r = kvm_tmm_config_cvm(kvm, cap); - break; - case KVM_CAP_ARM_TMM_CREATE_RD: - r = kvm_arm_create_cvm(kvm); - break; - case KVM_CAP_ARM_TMM_POPULATE_CVM: { - struct kvm_cap_arm_tmm_populate_region_args args; - void __user *argp = u64_to_user_ptr(cap->args[1]); - - if (copy_from_user(&args, argp, sizeof(args))) { - r = -EFAULT; - break; - } - r = kvm_populate_ipa_cvm_range(kvm, &args); - break; - } - case KVM_CAP_ARM_TMM_ACTIVATE_CVM: - r = kvm_activate_cvm(kvm); - break; - default: - r = -EINVAL; - break; - } - mutex_unlock(&kvm->lock); - - return r; -} - -void kvm_destroy_tec(struct kvm_vcpu *vcpu) -{ - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - - if (!vcpu_is_tec(vcpu)) - return; - - if (tmi_tec_destroy(tec->tec) != 0) - kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id); - - tec->tec = 0; - kfree(tec->tec_run); -} - -static int tmi_check_version(void) -{ - u64 res; - int version_major; - int version_minor; - - res = tmi_version(); - if (res == SMCCC_RET_NOT_SUPPORTED) - return -ENXIO; - - version_major = TMI_ABI_VERSION_GET_MAJOR(res); - version_minor = TMI_ABI_VERSION_GET_MINOR(res); - - if (version_major != TMI_ABI_VERSION_MAJOR) { - kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major, - version_minor); - return -ENXIO; - } - - kvm_info("TMI ABI version %d,%d\n", version_major, version_minor); - return 0; -} - -int kvm_tec_enter(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run; - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - struct virtcca_cvm *cvm = vcpu->kvm->arch.virtcca_cvm; - - if (READ_ONCE(cvm->state) != CVM_STATE_ACTIVE) - return -EINVAL; - - run = tec->tec_run; - /* set/clear TWI TWE flags */ - if (vcpu->arch.hcr_el2 & HCR_TWI) - run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFI; - else - run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFI; - - if (vcpu->arch.hcr_el2 & HCR_TWE) - run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFE; - else - run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFE; - - return tmi_tec_enter(tec->tec, __pa(run)); -} - -int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target) -{ - int ret; - struct virtcca_cvm_tec *calling_tec = &calling->arch.tec; - struct virtcca_cvm_tec *target_tec = &target->arch.tec; - - ret = tmi_psci_complete(calling_tec->tec, target_tec->tec); - if (ret) - return -EINVAL; - return 0; -} - -int kvm_init_tmm(void) -{ - int ret; - - if (PAGE_SIZE != SZ_4K) - return 0; - - if (tmi_check_version()) - return 0; - - ret = cvm_vmid_init(); - if (ret) - return ret; - - tmm_feat_reg0 = tmi_features(0); - kvm_info("TMM feature0: 0x%lx\n", tmm_feat_reg0); - - static_branch_enable(&virtcca_cvm_is_available); - - return 0; -} - -static bool is_numa_ipa_range_valid(struct kvm_numa_info *numa_info) -{ - unsigned long i; - struct kvm_numa_node *numa_node, *prev_numa_node; - - prev_numa_node = NULL; - for (i = 0; i < numa_info->numa_cnt; i++) { - numa_node = &numa_info->numa_nodes[i]; - if (numa_node->ipa_start + numa_node->ipa_size < numa_node->ipa_start) - return false; - if (prev_numa_node && - numa_node->ipa_start < prev_numa_node->ipa_start + prev_numa_node->ipa_size) - return false; - prev_numa_node = numa_node; - } - if (numa_node->ipa_start + numa_node->ipa_size > CVM_IPA_MAX_VAL) - return false; - return true; -} - -int kvm_load_user_data(struct kvm *kvm, unsigned long arg) -{ - struct kvm_user_data user_data; - void __user *argp = (void __user *)arg; - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info; - - if (!kvm_is_virtcca_cvm(kvm)) - return -EFAULT; - - if (copy_from_user(&user_data, argp, sizeof(user_data))) - return -EINVAL; - - numa_info = &user_data.numa_info; - if (numa_info->numa_cnt > MAX_NUMA_NODE) - return -EINVAL; - - if (numa_info->numa_cnt > 0) { - unsigned long i, total_size = 0; - struct kvm_numa_node *numa_node = &numa_info->numa_nodes[0]; - unsigned long ipa_end = numa_node->ipa_start + numa_node->ipa_size; - - if (!is_numa_ipa_range_valid(numa_info)) - return -EINVAL; - if (user_data.loader_start < numa_node->ipa_start || - user_data.dtb_end > ipa_end) - return -EINVAL; - for (i = 0; i < numa_info->numa_cnt; i++) - total_size += numa_info->numa_nodes[i].ipa_size; - if (total_size != user_data.ram_size) - return -EINVAL; - } - - if (user_data.image_end <= user_data.loader_start || - user_data.initrd_start < user_data.image_end || - user_data.dtb_end < user_data.initrd_start || - user_data.ram_size < user_data.dtb_end - user_data.loader_start) - return -EINVAL; - - cvm->loader_start = user_data.loader_start; - cvm->image_end = user_data.image_end; - cvm->initrd_start = user_data.initrd_start; - cvm->dtb_end = user_data.dtb_end; - cvm->ram_size = user_data.ram_size; - memcpy(&cvm->numa_info, numa_info, sizeof(struct kvm_numa_info)); - - return 0; -} - -void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu) -{ - kvm_timer_vcpu_put(vcpu); - kvm_vgic_put(vcpu); - vcpu->cpu = -1; -} - -unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu, - unsigned long target_affinity, unsigned long lowest_affinity_level) -{ - struct kvm_vcpu *target_vcpu; - - if (lowest_affinity_level != 0) - return PSCI_RET_INVALID_PARAMS; - - target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, target_affinity); - if (!target_vcpu) - return PSCI_RET_INVALID_PARAMS; - - cvm_psci_complete(vcpu, target_vcpu); - return PSCI_RET_SUCCESS; -} - -int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu, - bool serror_pending, bool ext_dabt_pending) -{ - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - - if (serror_pending) - return -EINVAL; - - if (ext_dabt_pending) { - if (!(((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags & - TEC_ENTRY_FLAG_EMUL_MMIO)) - return -EINVAL; - - ((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags - &= ~TEC_ENTRY_FLAG_EMUL_MMIO; - ((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags - |= TEC_ENTRY_FLAG_INJECT_SEA; - } - return 0; -} - -int kvm_init_cvm_vm(struct kvm *kvm) -{ - struct tmi_cvm_params *params; - struct virtcca_cvm *cvm; - - if (kvm->arch.virtcca_cvm) { - kvm_info("cvm already create.\n"); - return 0; - } - - cvm = (struct virtcca_cvm *)kzalloc(sizeof(struct virtcca_cvm), GFP_KERNEL_ACCOUNT); - if (!cvm) - return -ENOMEM; - - kvm->arch.virtcca_cvm = cvm; - params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!params) { - kfree(kvm->arch.virtcca_cvm); - kvm->arch.virtcca_cvm = NULL; - return -ENOMEM; - } - - cvm->params = params; - WRITE_ONCE(cvm->state, CVM_STATE_NONE); - - return 0; -} - -#ifdef CONFIG_HISI_VIRTCCA_CODA -/* - * Coda (Confidential Device Assignment) feature - * enable devices to pass directly to confidential virtual machines - */ - -/** - * is_in_virtcca_ram_range - Check if the iova belongs - * to the cvm ram range - * @kvm: The handle of kvm - * @iova: Ipa address - * - * Returns: - * %true if the iova belongs to cvm ram - * %false if the iova is not within the scope of cvm ram - */ -bool is_in_virtcca_ram_range(struct kvm *kvm, uint64_t iova) -{ - if (!is_virtcca_cvm_enable()) - return false; - - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - - if (iova >= virtcca_cvm->loader_start && - iova < virtcca_cvm->loader_start + virtcca_cvm->ram_size) - return true; - - return false; -} -EXPORT_SYMBOL_GPL(is_in_virtcca_ram_range); - -/** - * is_virtcca_iova_need_vfio_dma - Whether the vfio need - * to map the dma address - * @kvm: The handle of kvm - * @iova: Ipa address - * - * Returns: - * %true if virtcca cvm ram is nort mapped or - * virtcca_cvm_ram is mapped and the iova does not - * belong to cvm ram range - * %false if virtcca_cvm_ram is mapped and the iova belong - * to cvm ram range - */ -bool is_virtcca_iova_need_vfio_dma(struct kvm *kvm, uint64_t iova) -{ - if (!is_virtcca_cvm_enable()) - return false; - - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - - if (!virtcca_cvm->is_mapped) - return true; - - return !is_in_virtcca_ram_range(kvm, iova); -} -EXPORT_SYMBOL_GPL(is_virtcca_iova_need_vfio_dma); - -static int kvm_cvm_dev_ttt_create(struct virtcca_cvm *cvm, - unsigned long addr, - int level, - u64 numa_set) -{ - addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1)); - return tmi_dev_ttt_create(numa_set, cvm->rd, addr, level); -} - -/* CVM create ttt level information about device */ -int kvm_cvm_create_dev_ttt_levels(struct kvm *kvm, struct virtcca_cvm *cvm, - unsigned long ipa, - int level, - int max_level, - struct kvm_mmu_memory_cache *mc) -{ - int ret = 0; - - while (level++ < max_level) { - u64 numa_set = kvm_get_first_binded_numa_set(kvm); - - ret = kvm_cvm_dev_ttt_create(cvm, ipa, level, numa_set); - if (ret) - return -ENXIO; - } - - return 0; -} - -/** - * cvm_map_max_level_size - MMIO Map according to largest possible granularity - * @map_start: The start of map address - * @map_end: The end of map address - * @map_size: Map range - * - * Returns: - * %level the map level - * %-ENXIO if no suitable mapping level was found - */ -static int cvm_map_max_level_size(unsigned long map_start, unsigned long map_end, - unsigned long *map_size) -{ - int level = 1; - - *map_size = tmm_granule_size(level); - if (IS_ALIGNED(map_start, *map_size) && - (map_start + *map_size <= map_end)) - return level; - - level++; - *map_size = tmm_granule_size(level); - if (IS_ALIGNED(map_start, *map_size) && - (map_start + *map_size <= map_end)) - return level; - - level++; - *map_size = tmm_granule_size(level); - if (IS_ALIGNED(map_start, *map_size) && - (map_start + *map_size <= map_end)) - return level; - - pr_err("level not allow to map size\n"); - return -ENXIO; -} - -/** - * cvm_map_unmap_ipa_range - Vfio driver map or - * unmap cvm ipa - * @kvm: The handle of kvm - * @ipa_base: Ipa address - * @pa: Physical address - * @map_size: Map range - * @is_map: Map type - * - * Returns: - * %0 if cvm map/unmap address successfully - * %-ENXIO if map/unmap failed - */ -int cvm_map_unmap_ipa_range(struct kvm *kvm, phys_addr_t ipa_base, - phys_addr_t pa, unsigned long map_size, uint32_t is_map) -{ - unsigned long map_start; - unsigned long map_end; - int level; - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - phys_addr_t rd = virtcca_cvm->rd; - unsigned long phys = pa; - int ret = 0; - - map_start = ipa_base; - map_end = map_start + map_size; - while (map_start < map_end) { - level = cvm_map_max_level_size(map_start, map_end, &map_size); - if (level < 0) { - ret = -ENXIO; - goto err; - } - if (is_map) - ret = tmi_mmio_map(rd, map_start, level, phys); - else - ret = tmi_mmio_unmap(rd, map_start, level); - - if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { - /* Create missing TTTs and retry */ - int level_fault = TMI_RETURN_INDEX(ret); - - if (is_map) { - ret = kvm_cvm_create_dev_ttt_levels(kvm, virtcca_cvm, map_start, - level_fault, CVM_TTT_MAX_LEVEL, NULL); - if (ret) - goto err; - ret = tmi_mmio_map(rd, map_start, level, phys); - } else { - ret = tmi_mmio_unmap(rd, map_start, level_fault); - map_size = tmm_granule_size(level_fault); - } - } - - if (ret) - goto err; - - map_start += map_size; - phys += map_size; - } - - return 0; - -err: - if (!tmi_cvm_destroy(rd)) - kvm_info("Vfio map failed, kvm has destroyed cVM: %d\n", virtcca_cvm->cvm_vmid); - return -ENXIO; -} - -/** - * kvm_cvm_map_ipa_mmio - Map the mmio address when page fault - * @kvm: The handle of kvm - * @ipa_base: Ipa address - * @pa: Physical address - * @map_size: Map range - * - * Returns: - * %0 if cvm map address successfully - * %-ENXIO if map failed - */ -int kvm_cvm_map_ipa_mmio(struct kvm *kvm, phys_addr_t ipa_base, - phys_addr_t pa, unsigned long map_size) -{ - unsigned long size; - gfn_t gfn; - kvm_pfn_t pfn; - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - phys_addr_t rd = virtcca_cvm->rd; - unsigned long ipa = ipa_base; - unsigned long phys = pa; - int ret = 0; - - if (WARN_ON(!IS_ALIGNED(ipa, map_size))) - return -EINVAL; - - for (size = 0; size < map_size; size += PAGE_SIZE) { - ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); - if (ret == TMI_ERROR_TTT_CREATED) { - ret = 0; - goto label; - } - if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { - /* Create missing TTTs and retry */ - int level_fault = TMI_RETURN_INDEX(ret); - - ret = kvm_cvm_create_dev_ttt_levels(kvm, virtcca_cvm, ipa, level_fault, - CVM_TTT_MAX_LEVEL, NULL); - - if (ret) - goto err; - ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); - } - - if (ret) - goto err; -label: - if (size + PAGE_SIZE >= map_size) - break; - - ipa += PAGE_SIZE; - gfn = gpa_to_gfn(ipa); - pfn = gfn_to_pfn(kvm, gfn); - kvm_set_pfn_accessed(pfn); - kvm_release_pfn_clean(pfn); - phys = (uint64_t)__pfn_to_phys(pfn); - - } - - return 0; - -err: - if (!tmi_cvm_destroy(rd)) - kvm_info("MMIO map failed, kvm has destroyed cVM: %d\n", virtcca_cvm->cvm_vmid); - return -ENXIO; -} - -/* Page fault map ipa */ -int kvm_cvm_map_ipa(struct kvm *kvm, phys_addr_t ipa, kvm_pfn_t pfn, - unsigned long map_size, enum kvm_pgtable_prot prot, int ret) -{ - if (!is_virtcca_cvm_enable() || !kvm_is_virtcca_cvm(kvm)) - return ret; - - struct page *dst_page = pfn_to_page(pfn); - phys_addr_t dst_phys = page_to_phys(dst_page); - - if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W))) - return -EFAULT; - - if (prot & KVM_PGTABLE_PROT_DEVICE) - return kvm_cvm_map_ipa_mmio(kvm, ipa, dst_phys, map_size); - - return 0; -} - -/* Set device secure flag */ -void virtcca_cvm_set_secure_flag(void *vdev, void *info) -{ - if (!is_virtcca_cvm_enable()) - return; - - if (!is_cc_dev(pci_dev_id(((struct vfio_pci_core_device *)vdev)->pdev))) - return; - - ((struct vfio_device_info *)info)->flags |= VFIO_DEVICE_FLAGS_SECURE; -} -EXPORT_SYMBOL_GPL(virtcca_cvm_set_secure_flag); - -/** - * cvm_arm_smmu_domain_set_kvm - Associate SMMU domain with CVM - * @dev: The Device under the iommu group - * - * Returns: - * %0 if smmu_domain has been associate cvm or associate cvm successfully - * %-ENXIO if the iommu group does not have smmu domain - */ -int cvm_arm_smmu_domain_set_kvm(struct device *dev, void *data) -{ - struct kvm *kvm; - struct iommu_domain *domain; - struct arm_smmu_domain *arm_smmu_domain = NULL; - - domain = iommu_get_domain_for_dev(dev); - if (!domain) - return -ENXIO; - - arm_smmu_domain = to_smmu_domain(domain); - if (arm_smmu_domain->kvm) - return 1; - - kvm = virtcca_arm_smmu_get_kvm(arm_smmu_domain); - if (kvm && kvm_is_virtcca_cvm(kvm)) - arm_smmu_domain->kvm = kvm; - - return 1; -} - -int virtcca_cvm_arm_smmu_domain_set_kvm(void *group) -{ - int ret; - - ret = iommu_group_for_each_dev((struct iommu_group *)group, - (void *)NULL, cvm_arm_smmu_domain_set_kvm); - return ret; -} -#endif -- Gitee From 9703da8e4efc84e6709ae2abd15370cda078cced Mon Sep 17 00:00:00 2001 From: hjx_gitff <1435901016@qq.com> Date: Mon, 10 Mar 2025 08:48:52 -0400 Subject: [PATCH 3/4] add cca base operations --- arch/arm64/include/asm/cca_base.h | 66 +++++++++++ arch/arm64/include/asm/cca_type.h | 14 +++ arch/arm64/include/asm/virtcca_cvm_host.h | 8 +- arch/arm64/kernel/virtcca_cvm_host.c | 4 +- arch/arm64/kvm/Makefile | 2 +- arch/arm64/kvm/cca_base.c | 137 ++++++++++++++++++++++ 6 files changed, 227 insertions(+), 4 deletions(-) create mode 100644 arch/arm64/include/asm/cca_base.h create mode 100644 arch/arm64/include/asm/cca_type.h create mode 100644 arch/arm64/kvm/cca_base.c diff --git a/arch/arm64/include/asm/cca_base.h b/arch/arm64/include/asm/cca_base.h new file mode 100644 index 000000000000..395db40b850d --- /dev/null +++ b/arch/arm64/include/asm/cca_base.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + */ +#ifndef __CCA_BASE_H +#define __CCA_BASE_H + +#include +#include +#include + +#include +#include +#include +#include +#include + +struct cca_operations { + int (*enable_cap) (struct kvm *, struct kvm_enable_cap *); + int (*init_realm_vm) (struct kvm *); + int (*realm_vm_enter) (struct kvm_vcpu *); + int (*realm_vm_exit) (struct kvm_vcpu *, int); + void (*init_sel2_hypervisor) (void); + int (*psci_complete) (struct kvm_vcpu *, struct kvm_vcpu *, unsigned long); + int (*create_vcpu) (struct kvm_vcpu *); + void (*destroy_vcpu) (struct kvm_vcpu *); + void (*destroy_vm) (struct kvm *); + int (*enable_realm) (struct kvm *); + int (*vcpu_set_events) (struct kvm_vcpu *, bool , bool); + struct rec_run *(*get_rec_run) (struct kvm_vcpu *); + u32 (*vgic_nr_lr) (void); +} ____cacheline_aligned; + +struct cca_share_pages_operations { + int (*alloc_shared_pages) (int, gfp_t, unsigned int); + void (*free_shared_pages) (void *, unsigned int); +} ____cacheline_aligned; + +int __init cca_operations_register(enum cca_cvm_type type, struct cca_operations *ops); +int __init cca_share_pages_ops_register(enum cca_cvm_type type, struct cca_share_pages_operations *ops); + +int kvm_get_cvm_type(void); + +int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); +void kvm_init_rme(void); + +int kvm_rec_enter(struct kvm_vcpu *vcpu); +int handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_ret); + +int kvm_init_realm_vm(struct kvm *kvm); +void kvm_destroy_realm(struct kvm *kvm); + +int kvm_create_rec(struct kvm_vcpu *vcpu); +void kvm_destroy_rec(struct kvm_vcpu *vcpu); + +int realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target, unsigned long status); + +int kvm_realm_vcpu_set_events(struct kvm_vcpu *vcpu, bool serror_pending, bool ext_dabt_pending); + +struct rec_run *kvm_get_rec_run(struct kvm_vcpu *vcpu); + +u32 kvm_realm_vgic_nr_lr(void); + +bool _kvm_is_realm(struct kvm *kvm); + +#endif /* __CCA_BASE_H */ diff --git a/arch/arm64/include/asm/cca_type.h b/arch/arm64/include/asm/cca_type.h new file mode 100644 index 000000000000..da9fd6e4c419 --- /dev/null +++ b/arch/arm64/include/asm/cca_type.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + */ +#ifndef __CCA_TYPE_H +#define __CCA_TYPE_H + +enum cca_cvm_type { + VIRTCCA_CVM, + ARMCCA_CVM, + CCA_CVM_MAX, +}; + +#endif /* __CCA_TYPE_H */ diff --git a/arch/arm64/include/asm/virtcca_cvm_host.h b/arch/arm64/include/asm/virtcca_cvm_host.h index 8913a07d0bd1..dee72b4dfe81 100644 --- a/arch/arm64/include/asm/virtcca_cvm_host.h +++ b/arch/arm64/include/asm/virtcca_cvm_host.h @@ -4,10 +4,12 @@ */ #ifndef __VIRTCCA_CVM_HOST_H #define __VIRTCCA_CVM_HOST_H +#include #ifdef CONFIG_HISI_VIRTCCA_HOST bool is_virtcca_cvm_enable(void); +void set_cca_cvm_type(int type); #else @@ -16,5 +18,7 @@ static inline bool is_virtcca_cvm_enable(void) return false; } -#endif /* CONFIG_HISI_VIRTCCA_GUEST */ -#endif /* __VIRTCCA_CVM_GUEST_H */ +static inline void set_cca_cvm_type(int type) {} + +#endif /* CONFIG_HISI_VIRTCCA_HOST */ +#endif /* __VIRTCCA_CVM_HOST_H */ diff --git a/arch/arm64/kernel/virtcca_cvm_host.c b/arch/arm64/kernel/virtcca_cvm_host.c index 4522782b1342..c70c90542427 100644 --- a/arch/arm64/kernel/virtcca_cvm_host.c +++ b/arch/arm64/kernel/virtcca_cvm_host.c @@ -28,8 +28,10 @@ static int __init setup_virtcca_cvm_host(char *str) if (ret) { pr_warn("Unable to parse cvm_guest.\n"); } else { - if (val) + if (val) { static_branch_enable(&virtcca_cvm_is_enable); + set_cca_cvm_type(VIRTCCA_CVM); + } } return ret; } diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index dacae3142005..fb78e8674c48 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -21,7 +21,7 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o pvsched.o \ vgic/vgic-mmio.o vgic/vgic-mmio-v2.o \ vgic/vgic-mmio-v3.o vgic/vgic-kvm-device.o \ vgic/vgic-its.o vgic/vgic-debug.o \ - rme.o rme-exit.o + rme.o rme-exit.o cca_base.o kvm-$(CONFIG_VIRT_PLAT_DEV) += vgic/shadow_dev.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o diff --git a/arch/arm64/kvm/cca_base.c b/arch/arm64/kvm/cca_base.c new file mode 100644 index 000000000000..1aa5490f8d6b --- /dev/null +++ b/arch/arm64/kvm/cca_base.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2024. Huawei Technologies Co., Ltd. All rights reserved. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +static int cca_cvm_type; +static struct cca_operations *g_cca_operations[CCA_CVM_MAX]; + +/* please use 'cca_cvm_type=$type' to enable cca cvm feature */ +static int __init setup_cca_cvm_type(char *str) +{ + int ret; + unsigned int val; + + if (!str) + return 0; + + ret = kstrtouint(str, 10, &val); + if (ret) { + pr_warn("Unable to parse cca cvm_type.\n"); + } else { + if (val) + cca_cvm_type = val - 1; + } + return ret; +} +early_param("cca_cvm_type", setup_cca_cvm_type); + +int __init cca_operations_register(enum cca_cvm_type type, struct cca_operations *ops) +{ + if (type >= CCA_CVM_MAX) + return -EINVAL; + + g_cca_operations[type] = ops; + return 0; +} + +int kvm_get_cvm_type(void) +{ + return cca_cvm_type; +} + +void set_cca_cvm_type(int type) +{ + cca_cvm_type = type; +} +EXPORT_SYMBOL_GPL(set_cca_cvm_type); + +int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +{ + if (g_cca_operations[cca_cvm_type]->enable_cap) + return g_cca_operations[cca_cvm_type]->enable_cap(kvm, cap); + return 0; +} + +int kvm_init_realm_vm(struct kvm *kvm) +{ + if (g_cca_operations[cca_cvm_type]->init_realm_vm) + return g_cca_operations[cca_cvm_type]->init_realm_vm(kvm); + return 0; +} + +int kvm_rec_enter(struct kvm_vcpu *vcpu) +{ + if (g_cca_operations[cca_cvm_type]->realm_vm_enter) + return g_cca_operations[cca_cvm_type]->realm_vm_enter(vcpu); + return 0; +} + +int handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_ret) +{ + if (g_cca_operations[cca_cvm_type]->realm_vm_exit) + return g_cca_operations[cca_cvm_type]->realm_vm_exit(vcpu, rec_run_ret); + return 0; +} + +void kvm_destroy_realm(struct kvm *kvm) +{ + if (g_cca_operations[cca_cvm_type]->destroy_vm) + g_cca_operations[cca_cvm_type]->destroy_vm(kvm); +} + +int kvm_create_rec(struct kvm_vcpu *vcpu) +{ + if (g_cca_operations[cca_cvm_type]->create_vcpu) + return g_cca_operations[cca_cvm_type]->create_vcpu(vcpu); + return 0; +} + +void kvm_destroy_rec(struct kvm_vcpu *vcpu) +{ + if (g_cca_operations[cca_cvm_type]->destroy_vcpu) + g_cca_operations[cca_cvm_type]->destroy_vcpu(vcpu); +} + +void kvm_init_rme(void) +{ + if (g_cca_operations[cca_cvm_type]->init_sel2_hypervisor) + g_cca_operations[cca_cvm_type]->init_sel2_hypervisor(); +} + +int realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target, unsigned long status) +{ + if (g_cca_operations[cca_cvm_type]->psci_complete) + return g_cca_operations[cca_cvm_type]->psci_complete(calling, target, status); + return 0; +} + +int kvm_realm_vcpu_set_events(struct kvm_vcpu *vcpu, bool serror_pending, bool ext_dabt_pending) +{ + if (g_cca_operations[cca_cvm_type]->vcpu_set_events) + return g_cca_operations[cca_cvm_type]->vcpu_set_events(vcpu, serror_pending, ext_dabt_pending); + return 0; +} + +struct rec_run *kvm_get_rec_run(struct kvm_vcpu *vcpu) +{ + if (g_cca_operations[cca_cvm_type]->get_rec_run) + return g_cca_operations[cca_cvm_type]->get_rec_run(vcpu); + return NULL; +} + +u32 kvm_realm_vgic_nr_lr(void) +{ + if (g_cca_operations[cca_cvm_type]->vgic_nr_lr) + return g_cca_operations[cca_cvm_type]->vgic_nr_lr(); + return 0; +} -- Gitee From 507d7badef709e91b1485cb0c42e781efcb7a5ae Mon Sep 17 00:00:00 2001 From: hjx_gitff <1435901016@qq.com> Date: Mon, 10 Mar 2025 08:49:27 -0400 Subject: [PATCH 4/4] adapt realm operation by cca_base --- arch/arm64/include/asm/cca_base.h | 2 - arch/arm64/include/asm/kvm_emulate.h | 11 +++++ arch/arm64/include/asm/kvm_rme.h | 20 ++++---- arch/arm64/kvm/guest.c | 27 ++--------- arch/arm64/kvm/inject_fault.c | 2 +- arch/arm64/kvm/mmio.c | 4 +- arch/arm64/kvm/mmu.c | 8 ++-- arch/arm64/kvm/pmu-emul.c | 3 +- arch/arm64/kvm/reset.c | 2 +- arch/arm64/kvm/rme-exit.c | 2 +- arch/arm64/kvm/rme.c | 70 ++++++++++++++++++++++++---- arch/arm64/kvm/vgic/vgic-v3.c | 2 +- arch/arm64/kvm/vgic/vgic.c | 10 ++-- 13 files changed, 103 insertions(+), 60 deletions(-) diff --git a/arch/arm64/include/asm/cca_base.h b/arch/arm64/include/asm/cca_base.h index 395db40b850d..6fd435c4dea2 100644 --- a/arch/arm64/include/asm/cca_base.h +++ b/arch/arm64/include/asm/cca_base.h @@ -61,6 +61,4 @@ struct rec_run *kvm_get_rec_run(struct kvm_vcpu *vcpu); u32 kvm_realm_vgic_nr_lr(void); -bool _kvm_is_realm(struct kvm *kvm); - #endif /* __CCA_BASE_H */ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 4b155fa39d2a..2cc8007c522f 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -20,6 +20,7 @@ #include #include #include +#include #define CURRENT_EL_SP_EL0_VECTOR 0x0 #define CURRENT_EL_SP_ELx_VECTOR 0x200 @@ -650,6 +651,11 @@ static inline bool kvm_is_realm(struct kvm *kvm) return false; } +static inline bool _kvm_is_realm(struct kvm *kvm) +{ + return kvm_is_realm(kvm) && (kvm_get_cvm_type() == ARMCCA_CVM); +} + static inline enum realm_state kvm_realm_state(struct kvm *kvm) { return READ_ONCE(kvm->arch.realm.state); @@ -677,6 +683,11 @@ static inline bool vcpu_is_rec(struct kvm_vcpu *vcpu) return false; } +static inline bool _vcpu_is_rec(struct kvm_vcpu *vcpu) +{ + return vcpu_is_rec(vcpu) && (kvm_get_cvm_type() == ARMCCA_CVM); +} + static inline bool kvm_arm_vcpu_rec_finalized(struct kvm_vcpu *vcpu) { return vcpu->arch.rec.mpidr != INVALID_HWID; diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h index 4b2e21d5f37e..ad33e465a06d 100644 --- a/arch/arm64/include/asm/kvm_rme.h +++ b/arch/arm64/include/asm/kvm_rme.h @@ -81,9 +81,9 @@ struct realm_rec { struct rec_run *run; }; -void kvm_init_rme(void); +void _kvm_init_rme(void); u32 kvm_realm_ipa_limit(void); -u32 kvm_realm_vgic_nr_lr(void); +u32 _kvm_realm_vgic_nr_lr(void); u8 kvm_realm_max_pmu_counters(void); unsigned int kvm_realm_sve_max_vl(void); @@ -91,15 +91,15 @@ u64 kvm_realm_reset_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val); bool kvm_rme_supports_sve(void); -int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); -int kvm_init_realm_vm(struct kvm *kvm); -void kvm_destroy_realm(struct kvm *kvm); +int _kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); +int _kvm_init_realm_vm(struct kvm *kvm); +void _kvm_destroy_realm(struct kvm *kvm); void kvm_realm_destroy_rtts(struct kvm *kvm, u32 ia_bits); -int kvm_create_rec(struct kvm_vcpu *vcpu); -void kvm_destroy_rec(struct kvm_vcpu *vcpu); +int _kvm_create_rec(struct kvm_vcpu *vcpu); +void _kvm_destroy_rec(struct kvm_vcpu *vcpu); -int kvm_rec_enter(struct kvm_vcpu *vcpu); -int handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_status); +int _kvm_rec_enter(struct kvm_vcpu *vcpu); +int _handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_status); void kvm_realm_unmap_range(struct kvm *kvm, unsigned long ipa, @@ -119,7 +119,7 @@ int realm_set_ipa_state(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long end, unsigned long ripas, unsigned long *top_ipa); -int realm_psci_complete(struct kvm_vcpu *calling, +int _realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target, unsigned long status); diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 295b813308d0..50c0dcb1dedc 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -880,7 +880,7 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) return -EINVAL; - if (kvm_is_realm(vcpu->kvm) && !validate_realm_set_reg(vcpu, reg)) + if (_kvm_is_realm(vcpu->kvm) && !validate_realm_set_reg(vcpu, reg)) return -EINVAL; switch (reg->id & KVM_REG_ARM_COPROC_MASK) { @@ -934,29 +934,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending; - if (vcpu_is_rec(vcpu)) { - /* Cannot inject SError into a Realm. */ - if (serror_pending) - return -EINVAL; - - /* - * If a data abort is pending, set the flag and let the RMM - * inject an SEA when the REC is scheduled to be run. - */ - if (ext_dabt_pending) { - /* - * Can only inject SEA into a Realm if the previous exit - * was due to a data abort of an Unprotected IPA. - */ - if (!(vcpu->arch.rec.run->enter.flags & REC_ENTER_EMULATED_MMIO)) - return -EINVAL; - - vcpu->arch.rec.run->enter.flags &= ~REC_ENTER_EMULATED_MMIO; - vcpu->arch.rec.run->enter.flags |= REC_ENTER_INJECT_SEA; - } - - return 0; - } + if (vcpu_is_rec(vcpu)) + return kvm_realm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending); if (serror_pending && has_esr) { if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 1542b2605113..ee19708fc611 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -165,7 +165,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr) */ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) { - if (unlikely(vcpu_is_rec(vcpu))) + if (unlikely(_vcpu_is_rec(vcpu))) vcpu->arch.rec.run->enter.flags |= REC_ENTER_INJECT_SEA; else if (vcpu_el1_is_32bit(vcpu)) inject_abt32(vcpu, false, addr); diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 4442e99153a7..f4f355e14e22 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -139,7 +139,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) data = vcpu_data_host_to_guest(vcpu, data, len); if (vcpu_is_rec(vcpu)) - vcpu->arch.rec.run->enter.gprs[0] = data; + (kvm_get_rec_run(vcpu))->enter.gprs[0] = data; else vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data); } @@ -149,7 +149,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) * in the guest. */ if (vcpu_is_rec(vcpu)) - vcpu->arch.rec.run->enter.flags |= REC_ENTER_EMULATED_MMIO; + (kvm_get_rec_run(vcpu))->enter.flags |= REC_ENTER_EMULATED_MMIO; else kvm_incr_pc(vcpu); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 522d37b7774f..1f4ded82901d 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -872,7 +872,7 @@ static int kvm_init_ipa_range(struct kvm *kvm, unsigned long type) u64 mmfr0, mmfr1; u32 phys_shift; - if (kvm_is_realm(kvm)) + if (_kvm_is_realm(kvm)) kvm_ipa_limit = kvm_realm_ipa_limit(); phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); @@ -1527,7 +1527,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (logging_active) { force_pte = true; vma_shift = PAGE_SHIFT; - } else if (kvm_is_realm(kvm)) { + } else if (_kvm_is_realm(kvm)) { // Force PTE level mappings for realms force_pte = true; vma_shift = PAGE_SHIFT; @@ -1630,7 +1630,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, * backed by a THP and thus use block mapping if possible. */ /* FIXME: We shouldn't need to disable this for realms */ - if (vma_pagesize == PAGE_SIZE && !(force_pte || device || kvm_is_realm(kvm))) { + if (vma_pagesize == PAGE_SIZE && !(force_pte || device || _kvm_is_realm(kvm))) { if (fault_status == ESR_ELx_FSC_PERM && fault_granule > PAGE_SIZE) vma_pagesize = fault_granule; @@ -1673,7 +1673,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, */ if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule) ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); - else if (kvm_is_realm(kvm)) + else if (_kvm_is_realm(kvm)) ret = realm_map_ipa(kvm, fault_ipa, pfn, vma_pagesize, prot, memcache); else ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index f3376d0594c3..220df5df4acb 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -15,6 +15,7 @@ #include #include #include +#include #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) @@ -341,7 +342,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) u64 reg = 0; if (vcpu_is_rec(vcpu)) - return vcpu->arch.rec.run->exit.pmu_ovf_status; + return (kvm_get_rec_run(vcpu))->exit.pmu_ovf_status; if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 21442efb2c8d..1d3c84f7c71d 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -162,7 +162,7 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) return false; - if (kvm_is_realm(vcpu->kvm) && + if (_kvm_is_realm(vcpu->kvm) && !(vcpu_is_rec(vcpu) && kvm_arm_vcpu_rec_finalized(vcpu) && READ_ONCE(vcpu->kvm->arch.realm.state) == REALM_STATE_ACTIVE)) return false; diff --git a/arch/arm64/kvm/rme-exit.c b/arch/arm64/kvm/rme-exit.c index 033f63449487..53b60bbfc20b 100644 --- a/arch/arm64/kvm/rme-exit.c +++ b/arch/arm64/kvm/rme-exit.c @@ -135,7 +135,7 @@ static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu) * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on * proper exit to userspace. */ -int handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_ret) +int _handle_rec_exit(struct kvm_vcpu *vcpu, int rec_run_ret) { struct realm_rec *rec = &vcpu->arch.rec; u8 esr_ec = ESR_ELx_EC(rec->run->exit.esr); diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c index c358d8f212c3..546dd32dfc5c 100644 --- a/arch/arm64/kvm/rme.c +++ b/arch/arm64/kvm/rme.c @@ -12,6 +12,7 @@ #include #include +#include static unsigned long rmm_feat_reg0; @@ -58,7 +59,7 @@ u32 kvm_realm_ipa_limit(void) return u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_S2SZ); } -u32 kvm_realm_vgic_nr_lr(void) +u32 _kvm_realm_vgic_nr_lr(void) { return u64_get_bits(rmm_feat_reg0, RMI_FEATURE_REGISTER_0_GICV3_NUM_LRS); } @@ -156,7 +157,7 @@ static void free_delegated_granule(phys_addr_t phys) free_page((unsigned long)phys_to_virt(phys)); } -int realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target, +int _realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target, unsigned long status) { int ret; @@ -1271,7 +1272,7 @@ static int kvm_rme_config_realm(struct kvm *kvm, struct kvm_enable_cap *cap) return r; } -int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) +int _kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) { int r = 0; @@ -1320,7 +1321,7 @@ int kvm_realm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) return r; } -void kvm_destroy_realm(struct kvm *kvm) +void _kvm_destroy_realm(struct kvm *kvm) { struct realm *realm = &kvm->arch.realm; size_t pgd_size = kvm_pgtable_stage2_pgd_size(kvm->arch.vtcr); @@ -1386,7 +1387,7 @@ static void kvm_complete_ripas_change(struct kvm_vcpu *vcpu) } while (top_ipa < top); } -int kvm_rec_enter(struct kvm_vcpu *vcpu) +int _kvm_rec_enter(struct kvm_vcpu *vcpu) { struct realm_rec *rec = &vcpu->arch.rec; @@ -1449,7 +1450,7 @@ static int alloc_rec_aux(struct page **aux_pages, return ret; } -int kvm_create_rec(struct kvm_vcpu *vcpu) +int _kvm_create_rec(struct kvm_vcpu *vcpu) { struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu); unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); @@ -1529,7 +1530,7 @@ int kvm_create_rec(struct kvm_vcpu *vcpu) return r; } -void kvm_destroy_rec(struct kvm_vcpu *vcpu) +void _kvm_destroy_rec(struct kvm_vcpu *vcpu) { struct realm *realm = &vcpu->kvm->arch.realm; struct realm_rec *rec = &vcpu->arch.rec; @@ -1555,7 +1556,7 @@ void kvm_destroy_rec(struct kvm_vcpu *vcpu) free_delegated_granule(rec_page_phys); } -int kvm_init_realm_vm(struct kvm *kvm) +int _kvm_init_realm_vm(struct kvm *kvm) { struct realm_params *params; @@ -1567,7 +1568,7 @@ int kvm_init_realm_vm(struct kvm *kvm) return 0; } -void kvm_init_rme(void) +void _kvm_init_rme(void) { if (PAGE_SIZE != SZ_4K) /* Only 4k page size on the host is supported */ @@ -1585,3 +1586,54 @@ void kvm_init_rme(void) static_branch_enable(&kvm_rme_is_available); } + +static int realm_vcpu_set_events(struct kvm_vcpu *vcpu, bool serror_pending, bool ext_dabt_pending) +{ + /* Cannot inject SError into a Realm. */ + if (serror_pending) + return -EINVAL; + + /* + * If a data abort is pending, set the flag and let the RMM + * inject an SEA when the REC is scheduled to be run. + */ + if (ext_dabt_pending) { + /* + * Can only inject SEA into a Realm if the previous exit + * was due to a data abort of an Unprotected IPA. + */ + if (!(vcpu->arch.rec.run->enter.flags & REC_ENTER_EMULATED_MMIO)) + return -EINVAL; + + vcpu->arch.rec.run->enter.flags &= ~REC_ENTER_EMULATED_MMIO; + vcpu->arch.rec.run->enter.flags |= REC_ENTER_INJECT_SEA; + } + + return 0; +} + +static struct rec_run *_kvm_get_rec_run(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.rec.run; +} + +static struct cca_operations armcca_operations = { + .enable_cap = _kvm_realm_enable_cap, + .init_realm_vm = _kvm_init_realm_vm, + .realm_vm_enter = _kvm_rec_enter, + .realm_vm_exit = _handle_rec_exit, + .init_sel2_hypervisor = _kvm_init_rme, + .psci_complete = _realm_psci_complete, + .destroy_vm = _kvm_destroy_realm, + .create_vcpu = _kvm_create_rec, + .destroy_vcpu = _kvm_destroy_rec, + .vcpu_set_events = realm_vcpu_set_events, + .get_rec_run = _kvm_get_rec_run, + .vgic_nr_lr = _kvm_realm_vgic_nr_lr, +}; + +static int __init armcca_register(void) +{ + return cca_operations_register(ARMCCA_CVM, &armcca_operations); +} +core_initcall(armcca_register); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index b45416bdbb65..a0356461b07f 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -772,7 +772,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu) struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; if (vcpu_is_rec(vcpu)) - cpu_if->vgic_vmcr = vcpu->arch.rec.run->exit.gicv3_vmcr; + cpu_if->vgic_vmcr = (kvm_get_rec_run(vcpu))->exit.gicv3_vmcr; WARN_ON(vgic_v4_put(vcpu)); diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index a398dbaa85db..f8158e07888f 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -909,11 +909,12 @@ static inline bool can_access_vgic_from_kernel(void) static inline void vgic_rmm_save_state(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; + struct rec_run *run = kvm_get_rec_run(vcpu); int i; for (i = 0; i < kvm_vcpu_vgic_nr_lr(vcpu); i++) { - cpu_if->vgic_lr[i] = vcpu->arch.rec.run->exit.gicv3_lrs[i]; - vcpu->arch.rec.run->enter.gicv3_lrs[i] = 0; + cpu_if->vgic_lr[i] = run->exit.gicv3_lrs[i]; + run->enter.gicv3_lrs[i] = 0; } } @@ -953,15 +954,16 @@ static inline void vgic_rmm_restore_state(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; int i; + struct rec_run *rec_run = kvm_get_rec_run(vcpu); for (i = 0; i < kvm_vcpu_vgic_nr_lr(vcpu); i++) { - vcpu->arch.rec.run->enter.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + rec_run->enter.gicv3_lrs[i] = cpu_if->vgic_lr[i]; /* * Also populate the rec.run->exit copies so that a late * decision to back out from entering the realm doesn't cause * the state to be lost */ - vcpu->arch.rec.run->exit.gicv3_lrs[i] = cpu_if->vgic_lr[i]; + rec_run->exit.gicv3_lrs[i] = cpu_if->vgic_lr[i]; } } -- Gitee