From df2ce7e841046f88c9a7e40ec7b12618f1e61b41 Mon Sep 17 00:00:00 2001 From: gongchangsui Date: Mon, 24 Feb 2025 02:28:10 -0500 Subject: [PATCH] Revert "kvm: add virtcca cvm host feature" This reverts commit 67e11ee6347c43a97e8987b03a5b3534cd8095d9. --- arch/arm64/configs/openeuler_defconfig | 3 +- arch/arm64/include/asm/kvm_emulate.h | 18 - arch/arm64/include/asm/kvm_host.h | 15 +- arch/arm64/include/asm/kvm_tmi.h | 414 -------- arch/arm64/include/asm/kvm_tmm.h | 37 +- arch/arm64/include/uapi/asm/kvm.h | 4 - arch/arm64/kvm/Kconfig | 6 +- arch/arm64/kvm/Makefile | 3 - arch/arm64/kvm/arch_timer.c | 93 -- arch/arm64/kvm/arm.c | 110 +-- arch/arm64/kvm/guest.c | 5 - arch/arm64/kvm/mmio.c | 14 +- arch/arm64/kvm/mmu.c | 10 +- arch/arm64/kvm/psci.c | 10 +- arch/arm64/kvm/reset.c | 11 - arch/arm64/kvm/tmi.c | 171 ---- arch/arm64/kvm/vgic/vgic-v3.c | 14 +- arch/arm64/kvm/vgic/vgic.c | 52 +- arch/arm64/kvm/virtcca_cvm.c | 1237 ------------------------ arch/arm64/kvm/virtcca_cvm_exit.c | 221 ----- include/kvm/arm_arch_timer.h | 4 - include/linux/kvm_host.h | 22 - include/uapi/linux/kvm.h | 13 - virt/kvm/kvm_main.c | 4 - 24 files changed, 40 insertions(+), 2451 deletions(-) delete mode 100644 arch/arm64/include/asm/kvm_tmi.h delete mode 100644 arch/arm64/kvm/tmi.c delete mode 100644 arch/arm64/kvm/virtcca_cvm.c delete mode 100644 arch/arm64/kvm/virtcca_cvm_exit.c diff --git a/arch/arm64/configs/openeuler_defconfig b/arch/arm64/configs/openeuler_defconfig index 495cf74a0922..4e23c7ddc5d1 100644 --- a/arch/arm64/configs/openeuler_defconfig +++ b/arch/arm64/configs/openeuler_defconfig @@ -762,8 +762,7 @@ CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_KVM_HISI_VIRT=y CONFIG_VIRTUALIZATION=y CONFIG_KVM=y -CONFIG_HISI_VIRTCCA_HOST=y -CONFIG_HISI_VIRTCCA_CODA=y +CONFIG_CVM_HOST=y # CONFIG_NVHE_EL2_DEBUG is not set CONFIG_KVM_ARM_MULTI_LPI_TRANSLATE_CACHE=y CONFIG_ARCH_VCPU_STAT=y diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f0b10cb2c87d..3aa1c7f56f65 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -642,22 +642,4 @@ static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) kvm_write_cptr_el2(val); } - -#ifdef CONFIG_HISI_VIRTCCA_HOST -static inline bool kvm_is_virtcca_cvm(struct kvm *kvm) -{ - if (static_branch_unlikely(&virtcca_cvm_is_available)) - return kvm->arch.is_virtcca_cvm; - return false; -} - -static inline enum virtcca_cvm_state virtcca_cvm_state(struct kvm *kvm) -{ - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - - if (!virtcca_cvm) - return 0; - return READ_ONCE(virtcca_cvm->state); -} -#endif #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4db5ee87988a..131f178acb1f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -27,7 +27,7 @@ #include #include #include -#ifdef CONFIG_HISI_VIRTCCA_HOST +#ifdef CONFIG_CVM_HOST #include #endif @@ -293,12 +293,9 @@ struct kvm_arch { u64 tlbi_dvmbm; #endif -#ifdef CONFIG_HISI_VIRTCCA_HOST - union { - struct cvm cvm; - struct virtcca_cvm *virtcca_cvm; - }; - bool is_virtcca_cvm; +#ifdef CONFIG_CVM_HOST + struct cvm cvm; + bool is_cvm; #endif }; @@ -640,8 +637,8 @@ struct kvm_vcpu_arch { cpumask_var_t pre_sched_cpus; #endif -#ifdef CONFIG_HISI_VIRTCCA_HOST - struct virtcca_cvm_tec tec; +#ifdef CONFIG_CVM_HOST + struct cvm_tec tec; #endif }; diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h deleted file mode 100644 index cab2b4cdd0f2..000000000000 --- a/arch/arm64/include/asm/kvm_tmi.h +++ /dev/null @@ -1,414 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#ifndef __TMM_TMI_H -#define __TMM_TMI_H -#ifdef CONFIG_HISI_VIRTCCA_HOST -#include -#include -#include -#include -#include - -#define NO_NUMA 0 /* numa bitmap */ - -#define TMM_TTT_LEVEL_2 2 -#define TMM_TTT_LEVEL_3 3 - -/* TMI error codes. */ -#define TMI_SUCCESS 0 -#define TMI_ERROR_INPUT 1 -#define TMI_ERROR_MEMORY 2 -#define TMI_ERROR_ALIAS 3 -#define TMI_ERROR_IN_USE 4 -#define TMI_ERROR_CVM_STATE 5 -#define TMI_ERROR_OWNER 6 -#define TMI_ERROR_TEC 7 -#define TMI_ERROR_TTT_WALK 8 -#define TMI_ERROR_TTT_ENTRY 9 -#define TMI_ERROR_NOT_SUPPORTED 10 -#define TMI_ERROR_INTERNAL 11 -#define TMI_ERROR_CVM_POWEROFF 12 -#define TMI_ERROR_TTT_CREATED 13 - -#define TMI_RETURN_STATUS(ret) ((ret) & 0xFF) -#define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF) - -#define TMI_FEATURE_REGISTER_0_S2SZ GENMASK(7, 0) -#define TMI_FEATURE_REGISTER_0_LPA2 BIT(8) -#define TMI_FEATURE_REGISTER_0_SVE_EN BIT(9) -#define TMI_FEATURE_REGISTER_0_SVE_VL GENMASK(13, 10) -#define TMI_FEATURE_REGISTER_0_NUM_BPS GENMASK(17, 14) -#define TMI_FEATURE_REGISTER_0_NUM_WPS GENMASK(21, 18) -#define TMI_FEATURE_REGISTER_0_PMU_EN BIT(22) -#define TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS GENMASK(27, 23) -#define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28) -#define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29) - -#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0) -#define TMI_CVM_PARAM_FLAG_SVE BIT(1) -#define TMI_CVM_PARAM_FLAG_PMU BIT(2) - -#define TMI_NOT_RUNNABLE 0 -#define TMI_RUNNABLE 1 - -/* - * The number of GPRs (starting from X0) that are - * configured by the host when a TEC is created. - */ -#define TEC_CREATE_NR_GPRS (8U) - -struct tmi_tec_params { - uint64_t gprs[TEC_CREATE_NR_GPRS]; - uint64_t pc; - uint64_t flags; - uint64_t ram_size; -}; - -struct tmi_smmu_ste_params { - uint64_t ns_src; /* non-secure STE source address */ - uint64_t sid; /* stream id */ - uint64_t smmu_id; /* smmu id */ -}; - -struct tmi_smmu_cfg_params { - uint64_t smmu_id; /* smmu id */ - uint64_t ioaddr; /* smmu base address */ - uint8_t strtab_base_RA_bit : 1; /* Read-Allocate hint */ - uint8_t q_base_RA_WA_bit : 1; /* Write-Allocate hint*/ - uint8_t is_cmd_queue : 1; /* Whether to configure command queue */ -}; - -#define TMI_SMMU_CMD_QUEUE 1 -#define TMI_SMMU_EVT_QUEUE 2 -struct tmi_smmu_queue_params { - uint64_t smmu_base_addr; /* smmu base address */ - uint64_t size; /* queue size */ - uint64_t smmu_id; /* smmu id */ - uint64_t type; /* cmdq or evtq */ -}; - -#define MAX_DEV_PER_PORT 256 -struct tmi_dev_delegate_params { - /* BDF of PCIe root bus, F=0. BD are used to calculate APB base and port number. */ - uint16_t root_bd; - uint16_t num_dev; /* number of attachable devices */ - uint32_t _reserved; /* padding for 64-bit alignment */ - uint16_t devs[MAX_DEV_PER_PORT]; /* BDF of each attachable device */ -}; - -#define TEC_ENTRY_FLAG_EMUL_MMIO (1UL << 0U) -#define TEC_ENTRY_FLAG_INJECT_SEA (1UL << 1U) -#define TEC_ENTRY_FLAG_TRAP_WFI (1UL << 2U) -#define TEC_ENTRY_FLAG_TRAP_WFE (1UL << 3U) - -#define TMI_EXIT_SYNC 0 -#define TMI_EXIT_IRQ 1 -#define TMI_EXIT_FIQ 2 -#define TMI_EXIT_PSCI 3 -#define TMI_EXIT_HOST_CALL 5 -#define TMI_EXIT_SERROR 6 - -/* - * The number of GPRs (starting from X0) per voluntary exit context. - * Per SMCCC. - */ - #define TEC_EXIT_NR_GPRS (31U) - -/* Maximum number of Interrupt Controller List Registers. */ -#define TEC_GIC_NUM_LRS (16U) - -struct tmi_tec_entry { - uint64_t flags; - uint64_t gprs[TEC_EXIT_NR_GPRS]; - uint64_t gicv3_lrs[TEC_GIC_NUM_LRS]; - uint64_t gicv3_hcr; -}; - -struct tmi_tec_exit { - uint64_t exit_reason; - uint64_t esr; - uint64_t far; - uint64_t hpfar; - uint64_t gprs[TEC_EXIT_NR_GPRS]; - uint64_t gicv3_hcr; - uint64_t gicv3_lrs[TEC_GIC_NUM_LRS]; - uint64_t gicv3_misr; - uint64_t gicv3_vmcr; - uint64_t cntv_ctl; - uint64_t cntv_cval; - uint64_t cntp_ctl; - uint64_t cntp_cval; - uint64_t imm; - uint64_t pmu_ovf_status; -}; - -struct tmi_tec_run { - struct tmi_tec_entry tec_entry; - struct tmi_tec_exit tec_exit; -}; - -#define TMI_FNUM_MIN_VALUE U(0x150) -#define TMI_FNUM_MAX_VALUE U(0x18F) - -/****************************************************************************** - * Bit definitions inside the function id as per the SMC calling convention - ******************************************************************************/ -#define FUNCID_TYPE_SHIFT 31 -#define FUNCID_CC_SHIFT 30 -#define FUNCID_OEN_SHIFT 24 -#define FUNCID_NUM_SHIFT 0 - -#define FUNCID_TYPE_MASK 0x1 -#define FUNCID_CC_MASK 0x1 -#define FUNCID_OEN_MASK 0x3f -#define FUNCID_NUM_MASK 0xffff - -#define FUNCID_TYPE_WIDTH 1 -#define FUNCID_CC_WIDTH 1 -#define FUNCID_OEN_WIDTH 6 -#define FUNCID_NUM_WIDTH 16 - -#define SMC_64 1 -#define SMC_32 0 -#define SMC_TYPE_FAST 1 -#define SMC_TYPE_STD 0 - -/***************************************************************************** - * Owning entity number definitions inside the function id as per the SMC - * calling convention - *****************************************************************************/ -#define OEN_ARM_START 0 -#define OEN_ARM_END 0 -#define OEN_CPU_START 1 -#define OEN_CPU_END 1 -#define OEN_SIP_START 2 -#define OEN_SIP_END 2 -#define OEN_OEM_START 3 -#define OEN_OEM_END 3 -#define OEN_STD_START 4 /* Standard Calls */ -#define OEN_STD_END 4 -#define OEN_TAP_START 48 /* Trusted Applications */ -#define OEN_TAP_END 49 -#define OEN_TOS_START 50 /* Trusted OS */ -#define OEN_TOS_END 63 -#define OEN_LIMIT 64 - -/* Get TMI fastcall std FID from function number */ -#define TMI_FID(smc_cc, func_num) \ - ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \ - ((smc_cc) << FUNCID_CC_SHIFT) | \ - (OEN_STD_START << FUNCID_OEN_SHIFT) | \ - ((func_num) << FUNCID_NUM_SHIFT)) - -#define U(_x) (_x##U) - -#define TMI_NO_MEASURE_CONTENT U(0) -#define TMI_MEASURE_CONTENT U(1) - -#define CVM_IPA_MAX_VAL (1UL << 48) - -/* - * SMC_TMM_INIT_COMPLETE is the only function in the TMI that originates from - * the CVM world and is handled by the SPMD. The remaining functions are - * always invoked by the Normal world, forward by SPMD and handled by the - * TMM. - */ -#define TMI_FNUM_VERSION_REQ U(0x260) -#define TMI_FNUM_MEM_INFO_SHOW U(0x261) -#define TMI_FNUM_DATA_CREATE U(0x262) -#define TMI_FNUM_DATA_DESTROY U(0x263) -#define TMI_FNUM_CVM_ACTIVATE U(0x264) -#define TMI_FNUM_CVM_CREATE U(0x265) -#define TMI_FNUM_CVM_DESTROY U(0x266) -#define TMI_FNUM_TEC_CREATE U(0x267) -#define TMI_FNUM_TEC_DESTROY U(0x268) -#define TMI_FNUM_TEC_ENTER U(0x269) -#define TMI_FNUM_TTT_CREATE U(0x26A) -#define TMI_FNUM_PSCI_COMPLETE U(0x26B) -#define TMI_FNUM_FEATURES U(0x26C) -#define TMI_FNUM_TTT_MAP_RANGE U(0x26D) -#define TMI_FNUM_TTT_UNMAP_RANGE U(0x26E) -#define TMI_FNUM_INF_TEST U(0x270) - -#define TMI_FNUM_SMMU_QUEUE_CREATE U(0x277) -#define TMI_FNUM_SMMU_QUEUE_WRITE U(0x278) -#define TMI_FNUM_SMMU_STE_CREATE U(0x279) -#define TMI_FNUM_MMIO_MAP U(0x27A) -#define TMI_FNUM_MMIO_UNMAP U(0x27B) -#define TMI_FNUM_MMIO_WRITE U(0x27C) -#define TMI_FNUM_MMIO_READ U(0x27D) -#define TMI_FNUM_DEV_DELEGATE U(0x27E) -#define TMI_FNUM_DEV_ATTACH U(0x27F) -#define TMI_FNUM_HANDLE_S_EVTQ U(0x280) -#define TMI_FNUM_SMMU_DEVICE_RESET U(0x281) -#define TMI_FNUM_SMMU_WRITE U(0x282) -#define TMI_FNUM_SMMU_READ U(0x283) -#define TMI_FNUM_SMMU_PCIE_CORE_CHECK U(0x284) -#define TMI_FNUM_DEV_TTT_CREATE U(0x285) - -/* TMI SMC64 PIDs handled by the SPMD */ -#define TMI_TMM_VERSION_REQ TMI_FID(SMC_64, TMI_FNUM_VERSION_REQ) -#define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE) -#define TMI_TMM_DATA_DESTROY TMI_FID(SMC_64, TMI_FNUM_DATA_DESTROY) -#define TMI_TMM_CVM_ACTIVATE TMI_FID(SMC_64, TMI_FNUM_CVM_ACTIVATE) -#define TMI_TMM_CVM_CREATE TMI_FID(SMC_64, TMI_FNUM_CVM_CREATE) -#define TMI_TMM_CVM_DESTROY TMI_FID(SMC_64, TMI_FNUM_CVM_DESTROY) -#define TMI_TMM_TEC_CREATE TMI_FID(SMC_64, TMI_FNUM_TEC_CREATE) -#define TMI_TMM_TEC_DESTROY TMI_FID(SMC_64, TMI_FNUM_TEC_DESTROY) -#define TMI_TMM_TEC_ENTER TMI_FID(SMC_64, TMI_FNUM_TEC_ENTER) -#define TMI_TMM_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_TTT_CREATE) -#define TMI_TMM_PSCI_COMPLETE TMI_FID(SMC_64, TMI_FNUM_PSCI_COMPLETE) -#define TMI_TMM_FEATURES TMI_FID(SMC_64, TMI_FNUM_FEATURES) -#define TMI_TMM_MEM_INFO_SHOW TMI_FID(SMC_64, TMI_FNUM_MEM_INFO_SHOW) -#define TMI_TMM_TTT_MAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_MAP_RANGE) -#define TMI_TMM_TTT_UNMAP_RANGE TMI_FID(SMC_64, TMI_FNUM_TTT_UNMAP_RANGE) -#define TMI_TMM_INF_TEST TMI_FID(SMC_64, TMI_FNUM_INF_TEST) - -#define TMI_TMM_SMMU_QUEUE_CREATE TMI_FID(SMC_64, TMI_FNUM_SMMU_QUEUE_CREATE) -#define TMI_TMM_SMMU_QUEUE_WRITE TMI_FID(SMC_64, TMI_FNUM_SMMU_QUEUE_WRITE) -#define TMI_TMM_SMMU_STE_CREATE TMI_FID(SMC_64, TMI_FNUM_SMMU_STE_CREATE) -#define TMI_TMM_MMIO_MAP TMI_FID(SMC_64, TMI_FNUM_MMIO_MAP) -#define TMI_TMM_MMIO_UNMAP TMI_FID(SMC_64, TMI_FNUM_MMIO_UNMAP) -#define TMI_TMM_MMIO_WRITE TMI_FID(SMC_64, TMI_FNUM_MMIO_WRITE) -#define TMI_TMM_MMIO_READ TMI_FID(SMC_64, TMI_FNUM_MMIO_READ) -#define TMI_TMM_DEV_DELEGATE TMI_FID(SMC_64, TMI_FNUM_DEV_DELEGATE) -#define TMI_TMM_DEV_ATTACH TMI_FID(SMC_64, TMI_FNUM_DEV_ATTACH) -#define TMI_TMM_HANDLE_S_EVTQ TMI_FID(SMC_64, TMI_FNUM_HANDLE_S_EVTQ) -#define TMI_TMM_SMMU_DEVICE_RESET TMI_FID(SMC_64, TMI_FNUM_SMMU_DEVICE_RESET) -#define TMI_TMM_SMMU_WRITE TMI_FID(SMC_64, TMI_FNUM_SMMU_WRITE) -#define TMI_TMM_SMMU_READ TMI_FID(SMC_64, TMI_FNUM_SMMU_READ) -#define TMI_TMM_SMMU_PCIE_CORE_CHECK TMI_FID(SMC_64, TMI_FNUM_SMMU_PCIE_CORE_CHECK) -#define TMI_TMM_DEV_TTT_CREATE TMI_FID(SMC_64, TMI_FNUM_DEV_TTT_CREATE) - -#define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16) -#define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF) - -#define TMI_ABI_VERSION_MAJOR U(0x2) - -/* KVM_CAP_ARM_TMM on VM fd */ -#define KVM_CAP_ARM_TMM_CONFIG_CVM_HOST 0 -#define KVM_CAP_ARM_TMM_CREATE_RD 1 -#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 -#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3 - -#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 -#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 - -#define KVM_CAP_ARM_TMM_RPV_SIZE 64 - -/* List of configuration items accepted for KVM_CAP_ARM_TMM_CONFIG_CVM_HOST */ -#define KVM_CAP_ARM_TMM_CFG_RPV 0 -#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1 -#define KVM_CAP_ARM_TMM_CFG_SVE 2 -#define KVM_CAP_ARM_TMM_CFG_DBG 3 -#define KVM_CAP_ARM_TMM_CFG_PMU 4 - -DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_available); - -struct kvm_cap_arm_tmm_config_item { - __u32 cfg; - union { - /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */ - struct { - __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE]; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */ - struct { - __u32 hash_algo; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */ - struct { - __u32 sve_vq; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */ - struct { - __u32 num_brps; - __u32 num_wrps; - }; - - /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */ - struct { - __u32 num_pmu_cntrs; - }; - /* Fix the size of the union */ - __u8 reserved[256]; - }; -}; - -#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) -struct kvm_cap_arm_tmm_populate_region_args { - __u64 populate_ipa_base1; - __u64 populate_ipa_size1; - __u64 populate_ipa_base2; - __u64 populate_ipa_size2; - __u32 flags; - __u32 reserved[3]; -}; - -static inline bool tmm_is_addr_ttt_level_aligned(uint64_t addr, int level) -{ - uint64_t mask = (1 << (12 + 9 * (3 - level))) - 1; - - return (addr & mask) == 0; -} - -#define ID_AA64PFR0_SEL2_MASK ULL(0xf) -#define ID_AA64PFR0_SEL2_SHIFT 36 - -static inline bool is_armv8_4_sel2_present(void) -{ - return ((read_sysreg(id_aa64pfr0_el1) >> ID_AA64PFR0_SEL2_SHIFT) & - ID_AA64PFR0_SEL2_MASK) == 1UL; -} - -u64 tmi_version(void); -u64 tmi_data_create(u64 data, u64 rd, u64 map_addr, u64 src, u64 level); -u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level); -u64 tmi_cvm_activate(u64 rd); -u64 tmi_cvm_create(u64 params_ptr, u64 numa_set); -u64 tmi_cvm_destroy(u64 rd); -u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr); -u64 tmi_tec_destroy(u64 tec); -u64 tmi_tec_enter(u64 tec, u64 run_ptr); -u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level); -u64 tmi_psci_complete(u64 calling_tec, u64 target_tec); -u64 tmi_features(u64 index); -u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node); -u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id); -u64 tmi_mem_info_show(u64 mem_info_addr); - -u64 tmi_dev_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level); -u64 tmi_smmu_queue_create(u64 params_ptr); -u64 tmi_smmu_queue_write(uint64_t cmd0, uint64_t cmd1, u64 smmu_id); -u64 tmi_smmu_ste_create(u64 params_ptr); -u64 tmi_mmio_map(u64 rd, u64 map_addr, u64 level, u64 ttte); -u64 tmi_mmio_unmap(u64 rd, u64 map_addr, u64 level); -u64 tmi_mmio_write(u64 addr, u64 val, u64 bits, u64 dev_num); -u64 tmi_mmio_read(u64 addr, u64 bits, u64 dev_num); -u64 tmi_dev_delegate(u64 params); -u64 tmi_dev_attach(u64 vdev, u64 rd, u64 smmu_id); -u64 tmi_handle_s_evtq(u64 smmu_id); -u64 tmi_smmu_device_reset(u64 params); -u64 tmi_smmu_pcie_core_check(u64 smmu_base); -u64 tmi_smmu_write(u64 smmu_base, u64 reg_offset, u64 val, u64 bits); -u64 tmi_smmu_read(u64 smmu_base, u64 reg_offset, u64 bits); - -u64 mmio_va_to_pa(void *addr); -void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu); -int kvm_load_user_data(struct kvm *kvm, unsigned long arg); -unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu, - unsigned long target_affinity, unsigned long lowest_affinity_level); -int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu, - bool serror_pending, bool ext_dabt_pending); -int kvm_init_cvm_vm(struct kvm *kvm); -int kvm_enable_virtcca_cvm(struct kvm *kvm); -int kvm_cvm_map_ipa(struct kvm *kvm, phys_addr_t ipa, kvm_pfn_t pfn, - unsigned long map_size, enum kvm_pgtable_prot prot, int ret); -void virtcca_cvm_set_secure_flag(void *vdev, void *info); -#endif -#endif diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index a26cade21586..fa84b2688222 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -7,25 +7,8 @@ #include -/* - * There is a conflict with the internal iova of CVM, - * so it is necessary to offset the msi iova. - * According to qemu file(hw/arm/virt.c), 0x0a001000 - 0x0b000000 - * iova is not being used, so it is used as the iova range for msi - * mapping. - */ -#define CVM_MSI_ORIG_IOVA 0x8000000 -#define CVM_MSI_MIN_IOVA 0x0a001000 -#define CVM_MSI_MAX_IOVA 0x0b000000 -#define CVM_MSI_IOVA_OFFSET 0x1000 - -#define CVM_RW_8_BIT 0x8 -#define CVM_RW_16_BIT 0x10 -#define CVM_RW_32_BIT 0x20 -#define CVM_RW_64_BIT 0x40 - -enum virtcca_cvm_state { - CVM_STATE_NONE = 1, +enum cvm_state { + CVM_STATE_NONE, CVM_STATE_NEW, CVM_STATE_ACTIVE, CVM_STATE_DYING @@ -53,7 +36,7 @@ struct tmi_cvm_params { }; struct cvm { - enum virtcca_cvm_state state; + enum cvm_state state; u32 cvm_vmid; u64 rd; u64 loader_start; @@ -65,6 +48,7 @@ struct cvm { bool is_cvm; }; +<<<<<<< HEAD struct virtcca_cvm { enum virtcca_cvm_state state; u32 cvm_vmid; @@ -79,10 +63,12 @@ struct virtcca_cvm { bool is_mapped; /* Whether the cvm RAM memory is mapped */ }; +======= +>>>>>>> parent of 67e11ee6347c... kvm: add virtcca cvm host feature /* * struct cvm_tec - Additional per VCPU data for a CVM */ -struct virtcca_cvm_tec { +struct cvm_tec { u64 tec; bool tec_created; void *tec_run; @@ -95,14 +81,17 @@ struct cvm_ttt_addr { int kvm_init_tmm(void); int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap); +int kvm_init_cvm_vm(struct kvm *kvm); void kvm_destroy_cvm(struct kvm *kvm); -int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu); +int kvm_create_tec(struct kvm_vcpu *vcpu); void kvm_destroy_tec(struct kvm_vcpu *vcpu); int kvm_tec_enter(struct kvm_vcpu *vcpu); int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status); int kvm_arm_create_cvm(struct kvm *kvm); void kvm_free_rd(struct kvm *kvm); +int cvm_create_rd(struct kvm *kvm); int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target); +int kvm_arch_tec_init(struct kvm_vcpu *vcpu); void kvm_cvm_unmap_destroy_range(struct kvm *kvm); int kvm_cvm_map_range(struct kvm *kvm); @@ -115,8 +104,8 @@ int kvm_cvm_map_ipa_mmio(struct kvm *kvm, phys_addr_t ipa_base, bool is_in_virtcca_ram_range(struct kvm *kvm, uint64_t iova); bool is_virtcca_iova_need_vfio_dma(struct kvm *kvm, uint64_t iova); -#define CVM_TTT_BLOCK_LEVEL 2 -#define CVM_TTT_MAX_LEVEL 3 +#define CVM_TTT_BLOCK_LEVEL 2 +#define CVM_TTT_MAX_LEVEL 3 #define CVM_MAP_IPA_RAM 1 #define CVM_MAP_IPA_SMMU 2 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 97941e582d83..f7ddd73a8c0f 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -110,7 +110,6 @@ struct kvm_regs { #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ #define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */ -#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */ struct kvm_vcpu_init { __u32 target; @@ -416,9 +415,6 @@ enum { #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 #define KVM_DEV_ARM_ITS_CTRL_RESET 4 -#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256 0 -#define KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512 1 - /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0 diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 52edbd7f6340..1fa6fba6077c 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -49,11 +49,11 @@ menuconfig KVM If unsure, say N. -config HISI_VIRTCCA_HOST - bool "Enable virtcca cvm host feature" +config CVM_HOST + bool "Enable cvm host feature" depends on KVM help - Support VIRTCCA CVM based on S-EL2 + Support CVM based on S-EL2 If unsure, say N. diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index eadf41417ffa..952eee572e23 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile @@ -24,9 +24,6 @@ kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o pvsched.o \ kvm-$(CONFIG_VIRT_PLAT_DEV) += vgic/shadow_dev.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o -kvm-$(CONFIG_HISI_VIRTCCA_HOST) += tmi.o -kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm.o -kvm-$(CONFIG_HISI_VIRTCCA_HOST) += virtcca_cvm_exit.o obj-$(CONFIG_KVM_HISI_VIRT) += hisilicon/ always-y := hyp_constants.h hyp-constants.s diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 3f1129d3df6a..43957fce50f6 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -176,77 +175,8 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) } } -#ifdef CONFIG_HISI_VIRTCCA_HOST -static bool cvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx) -{ - return timer_ctx && - ((timer_get_ctl(timer_ctx) & - (ARCH_TIMER_CTRL_IT_MASK | ARCH_TIMER_CTRL_ENABLE)) == ARCH_TIMER_CTRL_ENABLE); -} - -void kvm_cvm_timers_update(struct kvm_vcpu *vcpu) -{ - int i; - u64 cval, now; - bool status, level; - struct arch_timer_context *timer; - struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu; - - for (i = 0; i < NR_KVM_TIMERS; i++) { - timer = &arch_timer->timers[i]; - - if (!timer->loaded) { - if (!cvm_timer_irq_can_fire(timer)) - continue; - cval = timer_get_cval(timer); - now = kvm_phys_timer_read() - timer_get_offset(timer); - level = (cval <= now); - kvm_timer_update_irq(vcpu, level, timer); - } else { - status = timer_get_ctl(timer) & ARCH_TIMER_CTRL_IT_STAT; - level = cvm_timer_irq_can_fire(timer) && status; - if (level != timer->irq.level) - kvm_timer_update_irq(vcpu, level, timer); - } - } -} - -static void set_cvm_timers_loaded(struct kvm_vcpu *vcpu, bool loaded) -{ - int i; - struct arch_timer_cpu *arch_timer = &vcpu->arch.timer_cpu; - - for (i = 0; i < NR_KVM_TIMERS; i++) { - struct arch_timer_context *timer = &arch_timer->timers[i]; - - timer->loaded = loaded; - } -} - -static void kvm_timer_blocking(struct kvm_vcpu *vcpu); -static void kvm_timer_unblocking(struct kvm_vcpu *vcpu); - -static inline void cvm_vcpu_load_timer_callback(struct kvm_vcpu *vcpu) -{ - kvm_cvm_timers_update(vcpu); - kvm_timer_unblocking(vcpu); - set_cvm_timers_loaded(vcpu, true); -} - -static inline void cvm_vcpu_put_timer_callback(struct kvm_vcpu *vcpu) -{ - set_cvm_timers_loaded(vcpu, false); - if (rcuwait_active(kvm_arch_vcpu_get_wait(vcpu))) - kvm_timer_blocking(vcpu); -} -#endif - static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) { -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm_is_virtcca_cvm(ctxt->vcpu->kvm)) - return; -#endif if (!ctxt->offset.vm_offset) { WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); return; @@ -953,13 +883,6 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - cvm_vcpu_load_timer_callback(vcpu); - return; - } -#endif - if (unlikely(!timer->enabled)) return; @@ -1058,13 +981,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - cvm_vcpu_put_timer_callback(vcpu); - return; - } -#endif - if (unlikely(!timer->enabled)) return; @@ -1850,15 +1766,6 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) return -EINVAL; } -#ifdef CONFIG_HISI_VIRTCCA_HOST - /* - * We don't use mapped IRQs for CVM because the TMI doesn't allow - * us setting the LR.HW bit in the VGIC. - */ - if (vcpu_is_tec(vcpu)) - return 0; -#endif - get_timer_map(vcpu, &map); #ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9a61ccd41b40..a31aaf6b35d7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include @@ -175,12 +174,6 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, } mutex_unlock(&kvm->slots_lock); break; -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_CAP_ARM_TMM: - if (static_branch_unlikely(&virtcca_cvm_is_available)) - r = kvm_cvm_enable_cap(kvm, cap); - break; -#endif default: r = -EINVAL; break; @@ -202,14 +195,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { int ret; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm_arm_cvm_type(type)) { - ret = kvm_enable_virtcca_cvm(kvm); - if (ret) - return ret; - } -#endif - ret = kvm_sched_affinity_vm_init(kvm); if (ret) return ret; @@ -256,20 +241,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm_arm_cvm_type(type)) { - ret = kvm_init_cvm_vm(kvm); - if (ret) - goto out_free_stage2_pgd; - } -#endif - return 0; -#ifdef CONFIG_HISI_VIRTCCA_HOST -out_free_stage2_pgd: - kvm_free_stage2_pgd(&kvm->arch.mmu); -#endif err_free_cpumask: free_cpumask_var(kvm->arch.supported_cpus); err_unshare_kvm: @@ -304,10 +277,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_unshare_hyp(kvm, kvm + 1); kvm_arm_teardown_hypercalls(kvm); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm_is_virtcca_cvm(kvm)) - kvm_destroy_cvm(kvm); -#endif } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) @@ -379,12 +348,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = system_supports_mte(); break; case KVM_CAP_STEAL_TIME: -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (kvm && kvm_is_virtcca_cvm(kvm)) - r = 0; - else -#endif - r = kvm_arm_pvtime_supported(); + r = kvm_arm_pvtime_supported(); break; case KVM_CAP_ARM_EL1_32BIT: r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1); @@ -424,15 +388,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_VIRT_MSI_BYPASS: r = sdev_enable; break; -#endif -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_CAP_ARM_TMM: - if (!is_armv8_4_sel2_present()) { - r = -ENXIO; - break; - } - r = static_key_enabled(&virtcca_cvm_is_available); - break; #endif default: r = 0; @@ -580,23 +535,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->cpu = cpu; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - if (single_task_running()) - vcpu_clear_wfx_traps(vcpu); - else - vcpu_set_wfx_traps(vcpu); - } -#endif kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) - kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); - return; - } -#endif if (has_vhe()) kvm_vcpu_load_sysregs_vhe(vcpu); kvm_arch_vcpu_load_fp(vcpu); @@ -627,12 +567,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - kvm_cvm_vcpu_put(vcpu); - return; - } -#endif kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) @@ -807,9 +741,6 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) * Tell the rest of the code that there are userspace irqchip * VMs in the wild. */ -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (!kvm_is_virtcca_cvm(kvm)) -#endif static_branch_inc(&userspace_irqchip_in_use); } @@ -1190,12 +1121,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) trace_kvm_entry(*vcpu_pc(vcpu)); guest_timing_enter_irqoff(); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - ret = kvm_tec_enter(vcpu); - else -#endif - ret = kvm_arm_vcpu_enter_exit(vcpu); + ret = kvm_arm_vcpu_enter_exit(vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->stat.exits++; @@ -1249,17 +1175,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) local_irq_enable(); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (!vcpu_is_tec(vcpu)) { -#endif - trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); + trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); - /* Exit types that need handling before we can be preempted */ - handle_exit_early(vcpu, ret); + /* Exit types that need handling before we can be preempted */ + handle_exit_early(vcpu, ret); -#ifdef CONFIG_HISI_VIRTCCA_HOST - } -#endif preempt_enable(); /* @@ -1281,12 +1201,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ret = ARM_EXCEPTION_IL; } -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - ret = handle_cvm_exit(vcpu, ret); - else -#endif - ret = handle_exit(vcpu, ret); + ret = handle_exit(vcpu, ret); #ifdef CONFIG_ARCH_VCPU_STAT update_vcpu_stat_time(&vcpu->stat); #endif @@ -1799,11 +1714,6 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) struct kvm_device_attr attr; switch (ioctl) { -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_LOAD_USER_DATA: { - return kvm_load_user_data(kvm, arg); - } -#endif case KVM_CREATE_IRQCHIP: { int ret; if (!vgic_present) @@ -2690,14 +2600,6 @@ static __init int kvm_arm_init(void) in_hyp_mode = is_kernel_in_hyp_mode(); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (is_virtcca_cvm_enable() && in_hyp_mode) { - err = kvm_init_tmm(); - if (err) - return err; - } -#endif - if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || cpus_have_final_cap(ARM64_WORKAROUND_1508412)) kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 105f4e00ec8b..d3161a683838 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -26,7 +26,6 @@ #include #include #include -#include #include "trace.h" @@ -876,10 +875,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return kvm_cvm_vcpu_set_events(vcpu, serror_pending, ext_dabt_pending); -#endif if (serror_pending && has_esr) { if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) return -EINVAL; diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index bf5de05cb7a8..9b3e0aae5c40 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -6,7 +6,6 @@ #include #include -#include #include #include "trace.h" @@ -138,12 +137,6 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) &data); data = vcpu_data_host_to_guest(vcpu, data, len); vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)-> - tec_entry.gprs[0] = data; - } -#endif } /* @@ -213,12 +206,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) run->mmio.phys_addr = fault_ipa; run->mmio.len = len; vcpu->mmio_needed = 1; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_entry.flags |= - TEC_ENTRY_FLAG_EMUL_MMIO; - } -#endif + if (!ret) { /* We handled the access successfully in the kernel. */ if (!is_write) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 68efca6ef24e..afc0924c4f49 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -873,11 +873,13 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t u64 mmfr0, mmfr1; u32 phys_shift; +<<<<<<< HEAD #ifdef CONFIG_HISI_VIRTCCA_CODA if ((type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) && (!kvm_is_virtcca_cvm(kvm))) #else +======= +>>>>>>> parent of 67e11ee6347c... kvm: add virtcca cvm host feature if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) -#endif return -EINVAL; phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); @@ -1420,12 +1422,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, fault_granule = 1UL << ARM64_HW_PGTABLE_LEVEL_SHIFT(fault_level); write_fault = kvm_is_write_fault(vcpu); -#ifdef CONFIG_HISI_VIRTCCA_CODA - if (vcpu_is_tec(vcpu)) { - write_fault = true; - prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W; - } -#endif exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); vcpu->stat.mabt_exit_stat++; diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index b544418b68ed..1f69b667332b 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -12,7 +12,6 @@ #include #include -#include #include #include @@ -80,10 +79,6 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) return PSCI_RET_INVALID_PARAMS; spin_lock(&vcpu->arch.mp_state_lock); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - cvm_psci_complete(source_vcpu, vcpu); -#endif if (!kvm_arm_vcpu_stopped(vcpu)) { if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) ret = PSCI_RET_ALREADY_ON; @@ -146,10 +141,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) /* Ignore other bits of target affinity */ target_affinity &= target_affinity_mask; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return cvm_psci_vcpu_affinity_info(vcpu, target_affinity, lowest_affinity_level); -#endif + /* * If one or more VCPU matching target affinity are running * then ON else OFF diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index d38e74db97c2..7a65a35ee4ac 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -29,7 +29,6 @@ #include #include #include -#include /* Maximum phys_shift supported for any VM on this host */ static u32 __ro_after_init kvm_ipa_limit; @@ -140,12 +139,6 @@ int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) return -EPERM; return kvm_vcpu_finalize_sve(vcpu); -#ifdef CONFIG_HISI_VIRTCCA_HOST - case KVM_ARM_VCPU_TEC: - if (!kvm_is_virtcca_cvm(vcpu->kvm)) - return -EINVAL; - return kvm_finalize_vcpu_tec(vcpu); -#endif } return -EINVAL; @@ -169,10 +162,6 @@ void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu)); kfree(sve_state); kfree(vcpu->arch.ccsidr); -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - kvm_destroy_tec(vcpu); -#endif } static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c deleted file mode 100644 index 43595e9373e2..000000000000 --- a/arch/arm64/kvm/tmi.c +++ /dev/null @@ -1,171 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#include -#include -#include - -/** - * mmio_va_to_pa - To convert the virtual address of the mmio space - * to a physical address, it is necessary to implement this interface - * because the kernel insterface __pa has an error when converting the - * physical address of the virtual address of the mmio space - * @addr: MMIO virtual address - */ -u64 mmio_va_to_pa(void *addr) -{ - uint64_t pa, par_el1; - - asm volatile( - "AT S1E1W, %0\n" - ::"r"((uint64_t)(addr)) - ); - isb(); - asm volatile( - "mrs %0, par_el1\n" - : "=r"(par_el1) - ); - - pa = ((uint64_t)(addr) & (PAGE_SIZE - 1)) | - (par_el1 & ULL(0x000ffffffffff000)); - - if (par_el1 & UL(1 << 0)) - return (uint64_t)(addr); - else - return pa; -} -EXPORT_SYMBOL(mmio_va_to_pa); - -u64 tmi_version(void) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_VERSION_REQ, &res); - return res.a1; -} - -u64 tmi_data_create(u64 numa_set, u64 rd, u64 map_addr, u64 src, u64 level) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_DATA_CREATE, numa_set, rd, map_addr, src, level, &res); - return res.a1; -} - -u64 tmi_data_destroy(u64 rd, u64 map_addr, u64 level) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_DATA_DESTROY, rd, map_addr, level, &res); - return res.a1; -} - -u64 tmi_cvm_activate(u64 rd) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_CVM_ACTIVATE, rd, &res); - return res.a1; -} - -u64 tmi_cvm_create(u64 params_ptr, u64 numa_set) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_CVM_CREATE, params_ptr, numa_set, &res); - return res.a1; -} - -u64 tmi_cvm_destroy(u64 rd) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_CVM_DESTROY, rd, &res); - return res.a1; -} - -u64 tmi_tec_create(u64 numa_set, u64 rd, u64 mpidr, u64 params_ptr) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TEC_CREATE, numa_set, rd, mpidr, params_ptr, &res); - return res.a1; -} - -u64 tmi_tec_destroy(u64 tec) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TEC_DESTROY, tec, &res); - return res.a1; -} - -u64 tmi_tec_enter(u64 tec, u64 run_ptr) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TEC_ENTER, tec, run_ptr, &res); - return res.a1; -} - -u64 tmi_ttt_create(u64 numa_set, u64 rd, u64 map_addr, u64 level) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TTT_CREATE, numa_set, rd, map_addr, level, &res); - return res.a1; -} - -u64 tmi_psci_complete(u64 calling_tec, u64 target_tec) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_PSCI_COMPLETE, calling_tec, target_tec, &res); - return res.a1; -} - -u64 tmi_features(u64 index) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_FEATURES, index, &res); - return res.a1; -} - -u64 tmi_mem_info_show(u64 mem_info_addr) -{ - struct arm_smccc_res res; - u64 pa_addr = __pa(mem_info_addr); - - arm_smccc_1_1_smc(TMI_TMM_MEM_INFO_SHOW, pa_addr, &res); - return res.a1; -} -EXPORT_SYMBOL_GPL(tmi_mem_info_show); - -u64 tmi_ttt_map_range(u64 rd, u64 map_addr, u64 size, u64 cur_node, u64 target_node) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TTT_MAP_RANGE, rd, map_addr, size, cur_node, target_node, &res); - return res.a1; -} - -u64 tmi_ttt_unmap_range(u64 rd, u64 map_addr, u64 size, u64 node_id) -{ - struct arm_smccc_res res; - - arm_smccc_1_1_smc(TMI_TMM_TTT_UNMAP_RANGE, rd, map_addr, size, node_id, &res); - return res.a1; -} - -u64 tmi_tmm_inf_test(u64 x1, u64 x2, u64 x3, u64 x4, u64 x5) -{ - struct arm_smccc_res res; - u64 vttbr_el2_pa = __pa(x2); - u64 cvm_params_pa = __pa(x3); - u64 tec_params_pa = __pa(x4); - - arm_smccc_1_1_smc(TMI_TMM_INF_TEST, x1, vttbr_el2_pa, cvm_params_pa, tec_params_pa, x5, &res); - return res.a1; -} diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index dab599e857b5..69ca111e349d 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -10,7 +10,6 @@ #include #include #include -#include #include "vgic.h" @@ -681,10 +680,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info) (unsigned long long)info->vcpu.start); } else if (kvm_get_mode() != KVM_MODE_PROTECTED) { kvm_vgic_global_state.vcpu_base = info->vcpu.start; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (!static_branch_unlikely(&virtcca_cvm_is_available)) -#endif - kvm_vgic_global_state.can_emulate_gicv2 = true; + kvm_vgic_global_state.can_emulate_gicv2 = true; ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); if (ret) { kvm_err("Cannot register GICv2 KVM device.\n"); @@ -764,13 +760,7 @@ void vgic_v3_load(struct kvm_vcpu *vcpu) void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu) { struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) { - cpu_if->vgic_vmcr = - ((struct tmi_tec_run *)vcpu->arch.tec.tec_run)->tec_exit.gicv3_vmcr; - return; - } -#endif + if (likely(cpu_if->vgic_sre)) cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr); } diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index ec110006acf5..2459b0adea08 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -11,7 +11,6 @@ #include #include -#include #include "vgic.h" @@ -898,44 +897,12 @@ static inline bool can_access_vgic_from_kernel(void) return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe(); } -#ifdef CONFIG_HISI_VIRTCCA_HOST -static inline void vgic_tmm_save_state(struct kvm_vcpu *vcpu) -{ - int i; - struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; - - for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { - cpu_if->vgic_lr[i] = tec_run->tec_exit.gicv3_lrs[i]; - tec_run->tec_entry.gicv3_lrs[i] = 0; - } -} - -static inline void vgic_tmm_restore_state(struct kvm_vcpu *vcpu) -{ - int i; - struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3; - struct tmi_tec_run *tec_run = vcpu->arch.tec.tec_run; - - for (i = 0; i < kvm_vgic_global_state.nr_lr; ++i) { - tec_run->tec_entry.gicv3_lrs[i] = cpu_if->vgic_lr[i]; - tec_run->tec_exit.gicv3_lrs[i] = cpu_if->vgic_lr[i]; - } -} -#endif - static inline void vgic_save_state(struct kvm_vcpu *vcpu) { if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_save_state(vcpu); else -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - vgic_tmm_save_state(vcpu); - else -#endif - __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); - + __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); } /* Sync back the hardware VGIC state into our emulation after a guest's run. */ @@ -965,12 +932,7 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu) if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) vgic_v2_restore_state(vcpu); else -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - vgic_tmm_restore_state(vcpu); - else -#endif - __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); + __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); } /* Flush our emulation state into the GIC hardware before entering the guest. */ @@ -1011,10 +973,7 @@ void kvm_vgic_load(struct kvm_vcpu *vcpu) { if (unlikely(!vgic_initialized(vcpu->kvm))) return; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return; -#endif + if (kvm_vgic_global_state.type == VGIC_V2) vgic_v2_load(vcpu); else @@ -1025,10 +984,7 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu) { if (unlikely(!vgic_initialized(vcpu->kvm))) return; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return; -#endif + if (kvm_vgic_global_state.type == VGIC_V2) vgic_v2_put(vcpu); else diff --git a/arch/arm64/kvm/virtcca_cvm.c b/arch/arm64/kvm/virtcca_cvm.c deleted file mode 100644 index cfea9cfe70a6..000000000000 --- a/arch/arm64/kvm/virtcca_cvm.c +++ /dev/null @@ -1,1237 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -/* Protects access to cvm_vmid_bitmap */ -static DEFINE_SPINLOCK(cvm_vmid_lock); -static unsigned long *cvm_vmid_bitmap; -DEFINE_STATIC_KEY_FALSE(virtcca_cvm_is_available); -#define SIMD_PAGE_SIZE 0x3000 - -int kvm_enable_virtcca_cvm(struct kvm *kvm) -{ - if (!static_key_enabled(&virtcca_cvm_is_available)) - return -EFAULT; - - kvm->arch.is_virtcca_cvm = true; - return 0; -} - -static int cvm_vmid_init(void) -{ - unsigned int vmid_count = 1 << kvm_get_vmid_bits(); - - cvm_vmid_bitmap = bitmap_zalloc(vmid_count, GFP_KERNEL); - if (!cvm_vmid_bitmap) { - kvm_err("%s: Couldn't allocate cvm vmid bitmap\n", __func__); - return -ENOMEM; - } - return 0; -} - -static unsigned long tmm_feat_reg0; - -static bool tmm_supports(unsigned long feature) -{ - return !!u64_get_bits(tmm_feat_reg0, feature); -} - -bool kvm_cvm_supports_sve(void) -{ - return tmm_supports(TMI_FEATURE_REGISTER_0_SVE_EN); -} - -bool kvm_cvm_supports_pmu(void) -{ - return tmm_supports(TMI_FEATURE_REGISTER_0_PMU_EN); -} - -u32 kvm_cvm_ipa_limit(void) -{ - return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_S2SZ); -} - -u32 kvm_cvm_get_num_brps(void) -{ - return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_BPS); -} - -u32 kvm_cvm_get_num_wrps(void) -{ - return u64_get_bits(tmm_feat_reg0, TMI_FEATURE_REGISTER_0_NUM_WPS); -} - -static int cvm_vmid_reserve(void) -{ - int ret; - unsigned int vmid_count = 1 << kvm_get_vmid_bits(); - - spin_lock(&cvm_vmid_lock); - ret = bitmap_find_free_region(cvm_vmid_bitmap, vmid_count, 0); - spin_unlock(&cvm_vmid_lock); - - return ret; -} - -static void cvm_vmid_release(unsigned int vmid) -{ - spin_lock(&cvm_vmid_lock); - bitmap_release_region(cvm_vmid_bitmap, vmid, 0); - spin_unlock(&cvm_vmid_lock); -} - -static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) -{ - u64 shift = ARM64_HW_PGTABLE_LEVEL_SHIFT(pgt->start_level - 1); - u64 mask = BIT(pgt->ia_bits) - 1; - - return (addr & mask) >> shift; -} - -static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) -{ - struct kvm_pgtable pgt = { - .ia_bits = ia_bits, - .start_level = start_level, - }; - return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; -} - -/* - * the configurable physical numa range in QEMU is 0-127, - * but in real scenarios, 0-63 is sufficient. - */ -static u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm) -{ - int64_t i; - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info = &cvm->numa_info; - - for (i = 0; i < numa_info->numa_cnt && i < MAX_NUMA_NODE; i++) { - if (test_bit(vcpu, (unsigned long *)numa_info->numa_nodes[i].cpu_id)) - return numa_info->numa_nodes[i].host_numa_nodes[0]; - } - return NO_NUMA; -} - -static u64 kvm_get_first_binded_numa_set(struct kvm *kvm) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info = &cvm->numa_info; - - if (numa_info->numa_cnt > 0) - return numa_info->numa_nodes[0].host_numa_nodes[0]; - return NO_NUMA; -} - -int kvm_arm_create_cvm(struct kvm *kvm) -{ - int ret; - struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; - unsigned int pgd_sz; - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - /* get affine host numa set by default vcpu 0 */ - u64 numa_set = kvm_get_host_numa_set_by_vcpu(0, kvm); - - if (!kvm_is_virtcca_cvm(kvm) || virtcca_cvm_state(kvm) != CVM_STATE_NONE) - return 0; - - if (!cvm->params) { - ret = -EFAULT; - goto out; - } - - ret = cvm_vmid_reserve(); - if (ret < 0) - goto out; - - cvm->cvm_vmid = ret; - - pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level); - - cvm->params->ttt_level_start = kvm->arch.mmu.pgt->start_level; - cvm->params->ttt_num_start = pgd_sz; - cvm->params->s2sz = VTCR_EL2_IPA(kvm->arch.vtcr); - cvm->params->vmid = cvm->cvm_vmid; - cvm->params->ns_vtcr = kvm->arch.vtcr; - cvm->params->vttbr_el2 = kvm->arch.mmu.pgd_phys; - memcpy(cvm->params->rpv, &cvm->cvm_vmid, sizeof(cvm->cvm_vmid)); - cvm->rd = tmi_cvm_create(__pa(cvm->params), numa_set); - if (!cvm->rd) { - kvm_err("KVM creates cVM failed: %d\n", cvm->cvm_vmid); - ret = -ENOMEM; - goto out; - } - - WRITE_ONCE(cvm->state, CVM_STATE_NEW); - ret = 0; -out: - kfree(cvm->params); - cvm->params = NULL; - if (ret < 0) { - kfree(cvm); - kvm->arch.virtcca_cvm = NULL; - } - return ret; -} - -void kvm_destroy_cvm(struct kvm *kvm) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - uint32_t cvm_vmid; -#ifdef CONFIG_HISI_VIRTCCA_CODA - struct arm_smmu_domain *arm_smmu_domain; - struct list_head smmu_domain_group_list; -#endif - - if (!cvm) - return; - -#ifdef CONFIG_HISI_VIRTCCA_CODA - /* Unmap the cvm with arm smmu domain */ - kvm_get_arm_smmu_domain(kvm, &smmu_domain_group_list); - list_for_each_entry(arm_smmu_domain, &smmu_domain_group_list, node) { - if (arm_smmu_domain->kvm && arm_smmu_domain->kvm == kvm) - arm_smmu_domain->kvm = NULL; - } -#endif - - cvm_vmid = cvm->cvm_vmid; - kfree(cvm->params); - cvm->params = NULL; - - if (virtcca_cvm_state(kvm) == CVM_STATE_NONE) - return; - - cvm_vmid_release(cvm_vmid); - - WRITE_ONCE(cvm->state, CVM_STATE_DYING); - - if (!tmi_cvm_destroy(cvm->rd)) - kvm_info("KVM has destroyed cVM: %d\n", cvm->cvm_vmid); - - cvm->is_mapped = false; - kfree(cvm); - kvm->arch.virtcca_cvm = NULL; -} - -static int kvm_cvm_ttt_create(struct virtcca_cvm *cvm, - unsigned long addr, - int level, - u64 numa_set) -{ - addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1)); - return tmi_ttt_create(numa_set, cvm->rd, addr, level); -} - -int kvm_cvm_create_ttt_levels(struct kvm *kvm, struct virtcca_cvm *cvm, - unsigned long ipa, - int level, - int max_level, - struct kvm_mmu_memory_cache *mc) -{ - int ret = 0; - if (WARN_ON(level == max_level)) - return 0; - - while (level++ < max_level) { - u64 numa_set = kvm_get_first_binded_numa_set(kvm); - - ret = kvm_cvm_ttt_create(cvm, ipa, level, numa_set); - if (ret) - return -ENXIO; - } - - return 0; -} - -static int kvm_cvm_create_protected_data_page(struct kvm *kvm, struct virtcca_cvm *cvm, - unsigned long ipa, int level, struct page *src_page, u64 numa_set) -{ - phys_addr_t src_phys = 0; - int ret; - - if (src_page) - src_phys = page_to_phys(src_page); - ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level); - - if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { - /* Create missing RTTs and retry */ - int level_fault = TMI_RETURN_INDEX(ret); - - ret = kvm_cvm_create_ttt_levels(kvm, cvm, ipa, level_fault, - level, NULL); - if (ret) - goto err; - ret = tmi_data_create(numa_set, cvm->rd, ipa, src_phys, level); - } - if (ret) - goto err; - - return 0; - -err: - kvm_err("Cvm create protected data page fail:%d\n", ret); - return ret; -} - -static u64 cvm_granule_size(u32 level) -{ - return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); -} - -static bool is_data_create_region(phys_addr_t ipa_base, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - if ((ipa_base >= args->populate_ipa_base1 && - ipa_base < args->populate_ipa_base1 + args->populate_ipa_size1) || - (ipa_base >= args->populate_ipa_base2 && - ipa_base < args->populate_ipa_base2 + args->populate_ipa_size2)) - return true; - return false; -} - -int kvm_cvm_populate_par_region(struct kvm *kvm, u64 numa_set, - phys_addr_t ipa_base, phys_addr_t ipa_end, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_memory_slot *memslot; - gfn_t base_gfn, end_gfn; - int idx; - phys_addr_t ipa; - int ret = 0; - int level = TMM_TTT_LEVEL_3; - unsigned long map_size = cvm_granule_size(level); - - base_gfn = gpa_to_gfn(ipa_base); - end_gfn = gpa_to_gfn(ipa_end); - - idx = srcu_read_lock(&kvm->srcu); - memslot = gfn_to_memslot(kvm, base_gfn); - if (!memslot) { - ret = -EFAULT; - goto out; - } - - /* We require the region to be contained within a single memslot */ - if (memslot->base_gfn + memslot->npages < end_gfn) { - ret = -EINVAL; - goto out; - } - - mmap_read_lock(current->mm); - - ipa = ipa_base; - while (ipa < ipa_end) { - struct page *page = NULL; - kvm_pfn_t pfn = 0; - - /* - * FIXME: This causes over mapping, but there's no good - * solution here with the ABI as it stands - */ - ipa = ALIGN_DOWN(ipa, map_size); - - if (is_data_create_region(ipa, args)) { - pfn = gfn_to_pfn_memslot(memslot, gpa_to_gfn(ipa)); - if (is_error_pfn(pfn)) { - ret = -EFAULT; - break; - } - - page = pfn_to_page(pfn); - } - - ret = kvm_cvm_create_protected_data_page(kvm, cvm, ipa, level, page, numa_set); - if (ret) - goto err_release_pfn; - - ipa += map_size; - if (pfn) - kvm_release_pfn_dirty(pfn); -err_release_pfn: - if (ret) { - if (pfn) - kvm_release_pfn_clean(pfn); - break; - } - } - - mmap_read_unlock(current->mm); -out: - srcu_read_unlock(&kvm->srcu, idx); - return ret; -} - -int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu) -{ - int ret = 0; - int i; - u64 numa_set; - struct tmi_tec_params *params_ptr = NULL; - struct user_pt_regs *vcpu_regs = vcpu_gp_regs(vcpu); - u64 mpidr = kvm_vcpu_get_mpidr_aff(vcpu); - struct virtcca_cvm *cvm = vcpu->kvm->arch.virtcca_cvm; - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - - mutex_lock(&vcpu->kvm->lock); - tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!tec->tec_run) { - ret = -ENOMEM; - goto tec_free; - } - params_ptr = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!params_ptr) { - ret = -ENOMEM; - goto tec_free; - } - - for (i = 0; i < TEC_CREATE_NR_GPRS; ++i) - params_ptr->gprs[i] = vcpu_regs->regs[i]; - - params_ptr->pc = vcpu_regs->pc; - - if (vcpu->vcpu_id == 0) - params_ptr->flags = TMI_RUNNABLE; - else - params_ptr->flags = TMI_NOT_RUNNABLE; - params_ptr->ram_size = cvm->ram_size; - numa_set = kvm_get_host_numa_set_by_vcpu(vcpu->vcpu_id, vcpu->kvm); - tec->tec = tmi_tec_create(numa_set, cvm->rd, mpidr, __pa(params_ptr)); - - tec->tec_created = true; - kfree(params_ptr); - mutex_unlock(&vcpu->kvm->lock); - return ret; - -tec_free: - kfree(tec->tec_run); - kfree(params_ptr); - mutex_unlock(&vcpu->kvm->lock); - return ret; -} - -static int config_cvm_hash_algo(struct tmi_cvm_params *params, - struct kvm_cap_arm_tmm_config_item *cfg) -{ - switch (cfg->hash_algo) { - case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA256: - if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_256)) - return -EINVAL; - break; - case KVM_CAP_ARM_RME_MEASUREMENT_ALGO_SHA512: - if (!tmm_supports(TMI_FEATURE_REGISTER_0_HASH_SHA_512)) - return -EINVAL; - break; - default: - return -EINVAL; - } - params->measurement_algo = cfg->hash_algo; - return 0; -} - -static int config_cvm_sve(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct tmi_cvm_params *params; - int max_sve_vq; - - params = cvm->params; - max_sve_vq = u64_get_bits(tmm_feat_reg0, - TMI_FEATURE_REGISTER_0_SVE_VL); - - if (!kvm_cvm_supports_sve()) - return -EINVAL; - - if (cfg->sve_vq > max_sve_vq) - return -EINVAL; - - params->sve_vl = cfg->sve_vq; - params->flags |= TMI_CVM_PARAM_FLAG_SVE; - - return 0; -} - -static int config_cvm_pmu(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct tmi_cvm_params *params; - int max_pmu_num_ctrs; - - params = cvm->params; - max_pmu_num_ctrs = u64_get_bits(tmm_feat_reg0, - TMI_FEATURE_REGISTER_0_PMU_NUM_CTRS); - - if (!kvm_cvm_supports_pmu()) - return -EINVAL; - - if (cfg->num_pmu_cntrs > max_pmu_num_ctrs) - return -EINVAL; - - params->pmu_num_cnts = cfg->num_pmu_cntrs; - params->flags |= TMI_CVM_PARAM_FLAG_PMU; - - return 0; -} - -static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_cap_arm_tmm_config_item cfg; - int r = 0; - - if (virtcca_cvm_state(kvm) != CVM_STATE_NONE) - return -EBUSY; - - if (copy_from_user(&cfg, (void __user *)cap->args[1], sizeof(cfg))) - return -EFAULT; - - switch (cfg.cfg) { - case KVM_CAP_ARM_TMM_CFG_SVE: - r = config_cvm_sve(kvm, &cfg); - break; - case KVM_CAP_ARM_TMM_CFG_PMU: - r = config_cvm_pmu(kvm, &cfg); - break; - case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: - r = config_cvm_hash_algo(cvm->params, &cfg); - break; - default: - r = -EINVAL; - } - - return r; -} - -int kvm_cvm_map_range(struct kvm *kvm) -{ - int ret; - u64 curr_numa_set; - int idx; - u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info = &cvm->numa_info; - gpa_t gpa; - - curr_numa_set = kvm_get_first_binded_numa_set(kvm); - gpa = round_up(cvm->dtb_end, l2_granule); - for (idx = 0; idx < numa_info->numa_cnt; idx++) { - struct kvm_numa_node *numa_node = &numa_info->numa_nodes[idx]; - - if (idx) - gpa = numa_node->ipa_start; - if (gpa >= numa_node->ipa_start && - gpa < numa_node->ipa_start + numa_node->ipa_size) { - ret = tmi_ttt_map_range(cvm->rd, gpa, - numa_node->ipa_size - gpa + numa_node->ipa_start, - curr_numa_set, numa_node->host_numa_nodes[0]); - if (ret) { - kvm_err("tmi_ttt_map_range failed: %d.\n", ret); - return ret; - } - } - } - /* Vfio driver will pin memory in advance, - * if the ram already mapped, activate cvm - * does not need to map twice - */ - cvm->is_mapped = true; - return ret; -} - -static int kvm_activate_cvm(struct kvm *kvm) -{ -#ifdef CONFIG_HISI_VIRTCCA_CODA - int ret; - struct arm_smmu_domain *arm_smmu_domain; - struct list_head smmu_domain_group_list; -#endif - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - - if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - - if (!cvm->is_mapped && kvm_cvm_map_range(kvm)) - return -EFAULT; - -#ifdef CONFIG_HISI_VIRTCCA_CODA - kvm_get_arm_smmu_domain(kvm, &smmu_domain_group_list); - list_for_each_entry(arm_smmu_domain, &smmu_domain_group_list, node) { - if (arm_smmu_domain) { - ret = virtcca_tmi_dev_attach(arm_smmu_domain, kvm); - if (ret) - return ret; - } - } -#endif - - if (tmi_cvm_activate(cvm->rd)) { - kvm_err("tmi_cvm_activate failed!\n"); - return -ENXIO; - } - - WRITE_ONCE(cvm->state, CVM_STATE_ACTIVE); - kvm_info("cVM%d is activated!\n", cvm->cvm_vmid); - return 0; -} - -static int kvm_populate_ram_region(struct kvm *kvm, u64 map_size, - phys_addr_t ipa_base, phys_addr_t ipa_end, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - phys_addr_t gpa; - u64 numa_set = kvm_get_first_binded_numa_set(kvm); - - for (gpa = ipa_base; gpa < ipa_end; gpa += map_size) { - if (kvm_cvm_populate_par_region(kvm, numa_set, gpa, gpa + map_size, args)) { - kvm_err("kvm_cvm_populate_par_region failed: %d\n", -EFAULT); - return -EFAULT; - } - } - return 0; -} - -static int kvm_populate_ipa_cvm_range(struct kvm *kvm, - struct kvm_cap_arm_tmm_populate_region_args *args) -{ - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); - phys_addr_t ipa_base1, ipa_end2; - - if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) - return -EINVAL; - if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_size1, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_base2, PAGE_SIZE) || - !IS_ALIGNED(args->populate_ipa_size2, PAGE_SIZE)) - return -EINVAL; - - if (args->populate_ipa_base1 < cvm->loader_start || - args->populate_ipa_base2 < args->populate_ipa_base1 + args->populate_ipa_size1 || - cvm->dtb_end < args->populate_ipa_base2 + args->populate_ipa_size2) - return -EINVAL; - - if (args->flags & ~TMI_MEASURE_CONTENT) - return -EINVAL; - ipa_base1 = round_down(args->populate_ipa_base1, l2_granule); - ipa_end2 = round_up(args->populate_ipa_base2 + args->populate_ipa_size2, l2_granule); - - return kvm_populate_ram_region(kvm, l2_granule, ipa_base1, ipa_end2, args); -} - -int kvm_cvm_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) -{ - int r = 0; - - mutex_lock(&kvm->lock); - switch (cap->args[0]) { - case KVM_CAP_ARM_TMM_CONFIG_CVM_HOST: - r = kvm_tmm_config_cvm(kvm, cap); - break; - case KVM_CAP_ARM_TMM_CREATE_RD: - r = kvm_arm_create_cvm(kvm); - break; - case KVM_CAP_ARM_TMM_POPULATE_CVM: { - struct kvm_cap_arm_tmm_populate_region_args args; - void __user *argp = u64_to_user_ptr(cap->args[1]); - - if (copy_from_user(&args, argp, sizeof(args))) { - r = -EFAULT; - break; - } - r = kvm_populate_ipa_cvm_range(kvm, &args); - break; - } - case KVM_CAP_ARM_TMM_ACTIVATE_CVM: - r = kvm_activate_cvm(kvm); - break; - default: - r = -EINVAL; - break; - } - mutex_unlock(&kvm->lock); - - return r; -} - -void kvm_destroy_tec(struct kvm_vcpu *vcpu) -{ - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - - if (!vcpu_is_tec(vcpu)) - return; - - if (tmi_tec_destroy(tec->tec) != 0) - kvm_err("%s vcpu id : %d failed!\n", __func__, vcpu->vcpu_id); - - tec->tec = 0; - kfree(tec->tec_run); -} - -static int tmi_check_version(void) -{ - u64 res; - int version_major; - int version_minor; - - res = tmi_version(); - if (res == SMCCC_RET_NOT_SUPPORTED) - return -ENXIO; - - version_major = TMI_ABI_VERSION_GET_MAJOR(res); - version_minor = TMI_ABI_VERSION_GET_MINOR(res); - - if (version_major != TMI_ABI_VERSION_MAJOR) { - kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major, - version_minor); - return -ENXIO; - } - - kvm_info("TMI ABI version %d,%d\n", version_major, version_minor); - return 0; -} - -int kvm_tec_enter(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run; - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - struct virtcca_cvm *cvm = vcpu->kvm->arch.virtcca_cvm; - - if (READ_ONCE(cvm->state) != CVM_STATE_ACTIVE) - return -EINVAL; - - run = tec->tec_run; - /* set/clear TWI TWE flags */ - if (vcpu->arch.hcr_el2 & HCR_TWI) - run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFI; - else - run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFI; - - if (vcpu->arch.hcr_el2 & HCR_TWE) - run->tec_entry.flags |= TEC_ENTRY_FLAG_TRAP_WFE; - else - run->tec_entry.flags &= ~TEC_ENTRY_FLAG_TRAP_WFE; - - return tmi_tec_enter(tec->tec, __pa(run)); -} - -int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target) -{ - int ret; - struct virtcca_cvm_tec *calling_tec = &calling->arch.tec; - struct virtcca_cvm_tec *target_tec = &target->arch.tec; - - ret = tmi_psci_complete(calling_tec->tec, target_tec->tec); - if (ret) - return -EINVAL; - return 0; -} - -int kvm_init_tmm(void) -{ - int ret; - - if (PAGE_SIZE != SZ_4K) - return 0; - - if (tmi_check_version()) - return 0; - - ret = cvm_vmid_init(); - if (ret) - return ret; - - tmm_feat_reg0 = tmi_features(0); - kvm_info("TMM feature0: 0x%lx\n", tmm_feat_reg0); - - static_branch_enable(&virtcca_cvm_is_available); - - return 0; -} - -static bool is_numa_ipa_range_valid(struct kvm_numa_info *numa_info) -{ - unsigned long i; - struct kvm_numa_node *numa_node, *prev_numa_node; - - prev_numa_node = NULL; - for (i = 0; i < numa_info->numa_cnt; i++) { - numa_node = &numa_info->numa_nodes[i]; - if (numa_node->ipa_start + numa_node->ipa_size < numa_node->ipa_start) - return false; - if (prev_numa_node && - numa_node->ipa_start < prev_numa_node->ipa_start + prev_numa_node->ipa_size) - return false; - prev_numa_node = numa_node; - } - if (numa_node->ipa_start + numa_node->ipa_size > CVM_IPA_MAX_VAL) - return false; - return true; -} - -int kvm_load_user_data(struct kvm *kvm, unsigned long arg) -{ - struct kvm_user_data user_data; - void __user *argp = (void __user *)arg; - struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; - struct kvm_numa_info *numa_info; - - if (!kvm_is_virtcca_cvm(kvm)) - return -EFAULT; - - if (copy_from_user(&user_data, argp, sizeof(user_data))) - return -EINVAL; - - numa_info = &user_data.numa_info; - if (numa_info->numa_cnt > MAX_NUMA_NODE) - return -EINVAL; - - if (numa_info->numa_cnt > 0) { - unsigned long i, total_size = 0; - struct kvm_numa_node *numa_node = &numa_info->numa_nodes[0]; - unsigned long ipa_end = numa_node->ipa_start + numa_node->ipa_size; - - if (!is_numa_ipa_range_valid(numa_info)) - return -EINVAL; - if (user_data.loader_start < numa_node->ipa_start || - user_data.dtb_end > ipa_end) - return -EINVAL; - for (i = 0; i < numa_info->numa_cnt; i++) - total_size += numa_info->numa_nodes[i].ipa_size; - if (total_size != user_data.ram_size) - return -EINVAL; - } - - if (user_data.image_end <= user_data.loader_start || - user_data.initrd_start < user_data.image_end || - user_data.dtb_end < user_data.initrd_start || - user_data.ram_size < user_data.dtb_end - user_data.loader_start) - return -EINVAL; - - cvm->loader_start = user_data.loader_start; - cvm->image_end = user_data.image_end; - cvm->initrd_start = user_data.initrd_start; - cvm->dtb_end = user_data.dtb_end; - cvm->ram_size = user_data.ram_size; - memcpy(&cvm->numa_info, numa_info, sizeof(struct kvm_numa_info)); - - return 0; -} - -void kvm_cvm_vcpu_put(struct kvm_vcpu *vcpu) -{ - kvm_timer_vcpu_put(vcpu); - kvm_vgic_put(vcpu); - vcpu->cpu = -1; -} - -unsigned long cvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu, - unsigned long target_affinity, unsigned long lowest_affinity_level) -{ - struct kvm_vcpu *target_vcpu; - - if (lowest_affinity_level != 0) - return PSCI_RET_INVALID_PARAMS; - - target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, target_affinity); - if (!target_vcpu) - return PSCI_RET_INVALID_PARAMS; - - cvm_psci_complete(vcpu, target_vcpu); - return PSCI_RET_SUCCESS; -} - -int kvm_cvm_vcpu_set_events(struct kvm_vcpu *vcpu, - bool serror_pending, bool ext_dabt_pending) -{ - struct virtcca_cvm_tec *tec = &vcpu->arch.tec; - - if (serror_pending) - return -EINVAL; - - if (ext_dabt_pending) { - if (!(((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags & - TEC_ENTRY_FLAG_EMUL_MMIO)) - return -EINVAL; - - ((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags - &= ~TEC_ENTRY_FLAG_EMUL_MMIO; - ((struct tmi_tec_run *)tec->tec_run)->tec_entry.flags - |= TEC_ENTRY_FLAG_INJECT_SEA; - } - return 0; -} - -int kvm_init_cvm_vm(struct kvm *kvm) -{ - struct tmi_cvm_params *params; - struct virtcca_cvm *cvm; - - if (kvm->arch.virtcca_cvm) { - kvm_info("cvm already create.\n"); - return 0; - } - - cvm = (struct virtcca_cvm *)kzalloc(sizeof(struct virtcca_cvm), GFP_KERNEL_ACCOUNT); - if (!cvm) - return -ENOMEM; - - kvm->arch.virtcca_cvm = cvm; - params = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); - if (!params) { - kfree(kvm->arch.virtcca_cvm); - kvm->arch.virtcca_cvm = NULL; - return -ENOMEM; - } - - cvm->params = params; - WRITE_ONCE(cvm->state, CVM_STATE_NONE); - - return 0; -} - -#ifdef CONFIG_HISI_VIRTCCA_CODA -/* - * Coda (Confidential Device Assignment) feature - * enable devices to pass directly to confidential virtual machines - */ - -/** - * is_in_virtcca_ram_range - Check if the iova belongs - * to the cvm ram range - * @kvm: The handle of kvm - * @iova: Ipa address - * - * Returns: - * %true if the iova belongs to cvm ram - * %false if the iova is not within the scope of cvm ram - */ -bool is_in_virtcca_ram_range(struct kvm *kvm, uint64_t iova) -{ - if (!is_virtcca_cvm_enable()) - return false; - - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - - if (iova >= virtcca_cvm->loader_start && - iova < virtcca_cvm->loader_start + virtcca_cvm->ram_size) - return true; - - return false; -} -EXPORT_SYMBOL_GPL(is_in_virtcca_ram_range); - -/** - * is_virtcca_iova_need_vfio_dma - Whether the vfio need - * to map the dma address - * @kvm: The handle of kvm - * @iova: Ipa address - * - * Returns: - * %true if virtcca cvm ram is nort mapped or - * virtcca_cvm_ram is mapped and the iova does not - * belong to cvm ram range - * %false if virtcca_cvm_ram is mapped and the iova belong - * to cvm ram range - */ -bool is_virtcca_iova_need_vfio_dma(struct kvm *kvm, uint64_t iova) -{ - if (!is_virtcca_cvm_enable()) - return false; - - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - - if (!virtcca_cvm->is_mapped) - return true; - - return !is_in_virtcca_ram_range(kvm, iova); -} -EXPORT_SYMBOL_GPL(is_virtcca_iova_need_vfio_dma); - -static int kvm_cvm_dev_ttt_create(struct virtcca_cvm *cvm, - unsigned long addr, - int level, - u64 numa_set) -{ - addr = ALIGN_DOWN(addr, cvm_ttt_level_mapsize(level - 1)); - return tmi_dev_ttt_create(numa_set, cvm->rd, addr, level); -} - -/* CVM create ttt level information about device */ -int kvm_cvm_create_dev_ttt_levels(struct kvm *kvm, struct virtcca_cvm *cvm, - unsigned long ipa, - int level, - int max_level, - struct kvm_mmu_memory_cache *mc) -{ - int ret = 0; - - while (level++ < max_level) { - u64 numa_set = kvm_get_first_binded_numa_set(kvm); - - ret = kvm_cvm_dev_ttt_create(cvm, ipa, level, numa_set); - if (ret) - return -ENXIO; - } - - return 0; -} - -/** - * cvm_map_max_level_size - MMIO Map according to largest possible granularity - * @map_start: The start of map address - * @map_end: The end of map address - * @map_size: Map range - * - * Returns: - * %level the map level - * %-ENXIO if no suitable mapping level was found - */ -static int cvm_map_max_level_size(unsigned long map_start, unsigned long map_end, - unsigned long *map_size) -{ - int level = 1; - - *map_size = tmm_granule_size(level); - if (IS_ALIGNED(map_start, *map_size) && - (map_start + *map_size <= map_end)) - return level; - - level++; - *map_size = tmm_granule_size(level); - if (IS_ALIGNED(map_start, *map_size) && - (map_start + *map_size <= map_end)) - return level; - - level++; - *map_size = tmm_granule_size(level); - if (IS_ALIGNED(map_start, *map_size) && - (map_start + *map_size <= map_end)) - return level; - - pr_err("level not allow to map size\n"); - return -ENXIO; -} - -/** - * cvm_map_unmap_ipa_range - Vfio driver map or - * unmap cvm ipa - * @kvm: The handle of kvm - * @ipa_base: Ipa address - * @pa: Physical address - * @map_size: Map range - * @is_map: Map type - * - * Returns: - * %0 if cvm map/unmap address successfully - * %-ENXIO if map/unmap failed - */ -int cvm_map_unmap_ipa_range(struct kvm *kvm, phys_addr_t ipa_base, - phys_addr_t pa, unsigned long map_size, uint32_t is_map) -{ - unsigned long map_start; - unsigned long map_end; - int level; - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - phys_addr_t rd = virtcca_cvm->rd; - unsigned long phys = pa; - int ret = 0; - - map_start = ipa_base; - map_end = map_start + map_size; - while (map_start < map_end) { - level = cvm_map_max_level_size(map_start, map_end, &map_size); - if (level < 0) { - ret = -ENXIO; - goto err; - } - if (is_map) - ret = tmi_mmio_map(rd, map_start, level, phys); - else - ret = tmi_mmio_unmap(rd, map_start, level); - - if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { - /* Create missing TTTs and retry */ - int level_fault = TMI_RETURN_INDEX(ret); - - if (is_map) { - ret = kvm_cvm_create_dev_ttt_levels(kvm, virtcca_cvm, map_start, - level_fault, CVM_TTT_MAX_LEVEL, NULL); - if (ret) - goto err; - ret = tmi_mmio_map(rd, map_start, level, phys); - } else { - ret = tmi_mmio_unmap(rd, map_start, level_fault); - map_size = tmm_granule_size(level_fault); - } - } - - if (ret) - goto err; - - map_start += map_size; - phys += map_size; - } - - return 0; - -err: - if (!tmi_cvm_destroy(rd)) - kvm_info("Vfio map failed, kvm has destroyed cVM: %d\n", virtcca_cvm->cvm_vmid); - return -ENXIO; -} - -/** - * kvm_cvm_map_ipa_mmio - Map the mmio address when page fault - * @kvm: The handle of kvm - * @ipa_base: Ipa address - * @pa: Physical address - * @map_size: Map range - * - * Returns: - * %0 if cvm map address successfully - * %-ENXIO if map failed - */ -int kvm_cvm_map_ipa_mmio(struct kvm *kvm, phys_addr_t ipa_base, - phys_addr_t pa, unsigned long map_size) -{ - unsigned long size; - gfn_t gfn; - kvm_pfn_t pfn; - struct virtcca_cvm *virtcca_cvm = kvm->arch.virtcca_cvm; - phys_addr_t rd = virtcca_cvm->rd; - unsigned long ipa = ipa_base; - unsigned long phys = pa; - int ret = 0; - - if (WARN_ON(!IS_ALIGNED(ipa, map_size))) - return -EINVAL; - - for (size = 0; size < map_size; size += PAGE_SIZE) { - ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); - if (ret == TMI_ERROR_TTT_CREATED) { - ret = 0; - goto label; - } - if (TMI_RETURN_STATUS(ret) == TMI_ERROR_TTT_WALK) { - /* Create missing TTTs and retry */ - int level_fault = TMI_RETURN_INDEX(ret); - - ret = kvm_cvm_create_dev_ttt_levels(kvm, virtcca_cvm, ipa, level_fault, - CVM_TTT_MAX_LEVEL, NULL); - - if (ret) - goto err; - ret = tmi_mmio_map(rd, ipa, CVM_TTT_MAX_LEVEL, phys); - } - - if (ret) - goto err; -label: - if (size + PAGE_SIZE >= map_size) - break; - - ipa += PAGE_SIZE; - gfn = gpa_to_gfn(ipa); - pfn = gfn_to_pfn(kvm, gfn); - kvm_set_pfn_accessed(pfn); - kvm_release_pfn_clean(pfn); - phys = (uint64_t)__pfn_to_phys(pfn); - - } - - return 0; - -err: - if (!tmi_cvm_destroy(rd)) - kvm_info("MMIO map failed, kvm has destroyed cVM: %d\n", virtcca_cvm->cvm_vmid); - return -ENXIO; -} - -/* Page fault map ipa */ -int kvm_cvm_map_ipa(struct kvm *kvm, phys_addr_t ipa, kvm_pfn_t pfn, - unsigned long map_size, enum kvm_pgtable_prot prot, int ret) -{ - if (!is_virtcca_cvm_enable() || !kvm_is_virtcca_cvm(kvm)) - return ret; - - struct page *dst_page = pfn_to_page(pfn); - phys_addr_t dst_phys = page_to_phys(dst_page); - - if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W))) - return -EFAULT; - - if (prot & KVM_PGTABLE_PROT_DEVICE) - return kvm_cvm_map_ipa_mmio(kvm, ipa, dst_phys, map_size); - - return 0; -} - -/* Set device secure flag */ -void virtcca_cvm_set_secure_flag(void *vdev, void *info) -{ - if (!is_virtcca_cvm_enable()) - return; - - if (!is_cc_dev(pci_dev_id(((struct vfio_pci_core_device *)vdev)->pdev))) - return; - - ((struct vfio_device_info *)info)->flags |= VFIO_DEVICE_FLAGS_SECURE; -} -EXPORT_SYMBOL_GPL(virtcca_cvm_set_secure_flag); - -/** - * cvm_arm_smmu_domain_set_kvm - Associate SMMU domain with CVM - * @dev: The Device under the iommu group - * - * Returns: - * %0 if smmu_domain has been associate cvm or associate cvm successfully - * %-ENXIO if the iommu group does not have smmu domain - */ -int cvm_arm_smmu_domain_set_kvm(struct device *dev, void *data) -{ - struct kvm *kvm; - struct iommu_domain *domain; - struct arm_smmu_domain *arm_smmu_domain = NULL; - - domain = iommu_get_domain_for_dev(dev); - if (!domain) - return -ENXIO; - - arm_smmu_domain = to_smmu_domain(domain); - if (arm_smmu_domain->kvm) - return 1; - - kvm = virtcca_arm_smmu_get_kvm(arm_smmu_domain); - if (kvm && kvm_is_virtcca_cvm(kvm)) - arm_smmu_domain->kvm = kvm; - - return 1; -} - -int virtcca_cvm_arm_smmu_domain_set_kvm(void *group) -{ - int ret; - - ret = iommu_group_for_each_dev((struct iommu_group *)group, - (void *)NULL, cvm_arm_smmu_domain_set_kvm); - return ret; -} -#endif diff --git a/arch/arm64/kvm/virtcca_cvm_exit.c b/arch/arm64/kvm/virtcca_cvm_exit.c deleted file mode 100644 index 9654375a9c8c..000000000000 --- a/arch/arm64/kvm/virtcca_cvm_exit.c +++ /dev/null @@ -1,221 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2024, The Linux Foundation. All rights reserved. - */ -#include -#include -#include - -#include -#include -#include - -typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu); - -static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu, bool unmask_ctl) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = run->tec_exit.cntv_ctl; - __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = run->tec_exit.cntv_cval; - __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = run->tec_exit.cntp_ctl; - __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = run->tec_exit.cntp_cval; - - /* Because the timer mask is tainted by TMM, we don't know the - * true intent of the guest. Here, we assume mask is always - * cleared during WFI. - */ - if (unmask_ctl) { - __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK; - __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) &= ~ARCH_TIMER_CTRL_IT_MASK; - } - - kvm_cvm_timers_update(vcpu); -} - -static int tec_exit_reason_notimpl(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - pr_err("[vcpu %d] Unhandled exit reason from cvm (ESR: %#llx)\n", - vcpu->vcpu_id, run->tec_exit.esr); - return -ENXIO; -} - -/* The process is the same as kvm_handle_wfx, - * except the tracing and updating operation for pc, - * we copy kvm_handle_wfx process here - * to avoid changing kvm_handle_wfx function. - */ -static int tec_exit_wfx(struct kvm_vcpu *vcpu) -{ - u64 esr = kvm_vcpu_get_esr(vcpu); - - if (esr & ESR_ELx_WFx_ISS_WFE) { - vcpu->stat.wfe_exit_stat++; - } else { - vcpu->stat.wfi_exit_stat++; - } - - if (esr & ESR_ELx_WFx_ISS_WFxT) { - if (esr & ESR_ELx_WFx_ISS_RV) { - u64 val, now; - - now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT); - val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); - - if (now >= val) - goto out; - } else { - /* Treat WFxT as WFx if RN is invalid */ - esr &= ~ESR_ELx_WFx_ISS_WFxT; - } - } - - if (esr & ESR_ELx_WFx_ISS_WFE) { - kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu)); - } else { - if (esr & ESR_ELx_WFx_ISS_WFxT) - vcpu_set_flag(vcpu, IN_WFIT); - - kvm_vcpu_wfi(vcpu); - } - -out: - return 1; -} - -static int tec_exit_sys_reg(struct kvm_vcpu *vcpu) -{ - int ret; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - unsigned long esr = kvm_vcpu_get_esr(vcpu); - int rt = kvm_vcpu_sys_get_rt(vcpu); - bool is_write = !(esr & 1); - - if (is_write) - vcpu_set_reg(vcpu, rt, run->tec_exit.gprs[0]); - - ret = kvm_handle_sys_reg(vcpu); - - if (ret >= 0 && !is_write) - run->tec_entry.gprs[0] = vcpu_get_reg(vcpu, rt); - - return ret; -} - -static int tec_exit_sync_dabt(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - if (kvm_vcpu_dabt_iswrite(vcpu) && kvm_vcpu_dabt_isvalid(vcpu)) { - vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), - run->tec_exit.gprs[0]); - } - return kvm_handle_guest_abort(vcpu); -} - -static int tec_exit_sync_iabt(struct kvm_vcpu *vcpu) -{ - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n", - vcpu->vcpu_id, run->tec_exit.esr); - - return -ENXIO; -} - -static exit_handler_fn tec_exit_handlers[] = { - [0 ... ESR_ELx_EC_MAX] = tec_exit_reason_notimpl, - [ESR_ELx_EC_WFx] = tec_exit_wfx, - [ESR_ELx_EC_SYS64] = tec_exit_sys_reg, - [ESR_ELx_EC_DABT_LOW] = tec_exit_sync_dabt, - [ESR_ELx_EC_IABT_LOW] = tec_exit_sync_iabt -}; - -static int tec_exit_psci(struct kvm_vcpu *vcpu) -{ - int i; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) - vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]); - - return kvm_psci_call(vcpu); -} - -static int tec_exit_host_call(struct kvm_vcpu *vcpu) -{ - int ret, i; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - - vcpu->stat.hvc_exit_stat++; - - for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) - vcpu_set_reg(vcpu, i, run->tec_exit.gprs[i]); - - ret = kvm_smccc_call_handler(vcpu); - - if (ret < 0) { - vcpu_set_reg(vcpu, 0, ~0UL); - ret = 1; - } - for (i = 0; i < TEC_EXIT_NR_GPRS; ++i) - run->tec_entry.gprs[i] = vcpu_get_reg(vcpu, i); - - return ret; -} - -/* - * Return > 0 to return to guest, < 0 on error, 0(and set exit_reason) on - * proper exit to userspace - */ - -int handle_cvm_exit(struct kvm_vcpu *vcpu, int tec_run_ret) -{ - unsigned long status; - struct tmi_tec_run *run = vcpu->arch.tec.tec_run; - u8 esr_ec = ESR_ELx_EC(run->tec_exit.esr); - bool is_wfx; - - status = TMI_RETURN_STATUS(tec_run_ret); - - if (status == TMI_ERROR_CVM_POWEROFF) { - vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; - vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; - return 0; - } - - if (status == TMI_ERROR_CVM_STATE) { - vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; - return 0; - } - - if (tec_run_ret) - return -ENXIO; - - vcpu->arch.fault.esr_el2 = run->tec_exit.esr; - vcpu->arch.fault.far_el2 = run->tec_exit.far; - vcpu->arch.fault.hpfar_el2 = run->tec_exit.hpfar; - - is_wfx = (run->tec_exit.exit_reason == TMI_EXIT_SYNC) && (esr_ec == ESR_ELx_EC_WFx); - update_arch_timer_irq_lines(vcpu, is_wfx); - - run->tec_entry.flags = 0; - - switch (run->tec_exit.exit_reason) { - case TMI_EXIT_FIQ: - case TMI_EXIT_IRQ: - return 1; - case TMI_EXIT_PSCI: - return tec_exit_psci(vcpu); - case TMI_EXIT_SYNC: - return tec_exit_handlers[esr_ec](vcpu); - case TMI_EXIT_HOST_CALL: - return tec_exit_host_call(vcpu); - } - - kvm_pr_unimpl("Unsupported exit reason : 0x%llx\n", - run->tec_exit.exit_reason); - return 0; -} diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 9d3f034bd885..fee20f66da52 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -172,8 +172,4 @@ static inline bool has_cntpoff(void) return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); } -#ifdef CONFIG_HISI_VIRTCCA_HOST -/* Needed for S-EL2 */ -void kvm_cvm_timers_update(struct kvm_vcpu *vcpu); -#endif #endif diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 7db0aeba781e..fb4d88a15a8a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -544,28 +544,6 @@ static __always_inline void guest_state_exit_irqoff(void) instrumentation_end(); } -#ifdef CONFIG_HISI_VIRTCCA_HOST - -#define KVM_TYPE_CVM_BIT 8 -#define CVM_MAX_HALT_POLL_NS 100000 - -DECLARE_STATIC_KEY_FALSE(virtcca_cvm_is_available); - -static __always_inline bool vcpu_is_tec(struct kvm_vcpu *vcpu) -{ - if (static_branch_unlikely(&virtcca_cvm_is_available)) - return vcpu->arch.tec.tec_run; - - return false; -} - -static inline bool kvm_arm_cvm_type(unsigned long type) -{ - return type & (1UL << KVM_TYPE_CVM_BIT); -} - -#endif - static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index c4984d20726e..635881428079 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1517,19 +1517,6 @@ struct kvm_numa_info { #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) -#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) - -#define KVM_CAP_ARM_TMM 300 /* FIXME: Large number to prevent conflicts */ - -struct kvm_user_data { - __u64 loader_start; - __u64 image_end; - __u64 initrd_start; - __u64 dtb_end; - __u64 ram_size; - struct kvm_numa_info numa_info; -}; - /* enable ucontrol for s390 */ struct kvm_s390_ucas_mapping { __u64 user_addr; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 421f253903ca..8ec58ecfc8cc 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3550,10 +3550,6 @@ static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; -#ifdef CONFIG_HISI_VIRTCCA_HOST - if (vcpu_is_tec(vcpu)) - return CVM_MAX_HALT_POLL_NS; -#endif if (kvm->override_halt_poll_ns) { /* * Ensure kvm->max_halt_poll_ns is not read before -- Gitee