From f91dda77d87a722211eff013ed5d7ea21a5ec4c2 Mon Sep 17 00:00:00 2001 From: liuhao365 Date: Tue, 2 Sep 2025 16:29:29 +0800 Subject: [PATCH] [live migration 1]: add basic field and struct for migration --- arch/arm64/include/asm/kvm_tmi.h | 113 ++++++++++++++- arch/arm64/include/asm/kvm_tmm.h | 89 ++++++++++++ arch/arm64/include/uapi/asm/kvm.h | 18 +++ arch/arm64/kvm/Kconfig | 8 ++ arch/arm64/kvm/arm.c | 6 + arch/arm64/kvm/mmu.c | 84 +++++++++++ arch/arm64/kvm/tmi.c | 189 +++++++++++++++++++++++- arch/arm64/kvm/virtcca_cvm.c | 213 ++++++++++++++++++++++++++- arch/arm64/kvm/virtcca_mig.c | 229 ++++++++++++++++++++++++++++++ include/linux/kvm_host.h | 2 + include/uapi/linux/kvm.h | 4 + kernel/dma/swiotlb.c | 1 + tools/include/uapi/linux/kvm.h | 5 + virt/kvm/kvm_main.c | 52 +++++++ 14 files changed, 1007 insertions(+), 6 deletions(-) create mode 100644 arch/arm64/kvm/virtcca_mig.c diff --git a/arch/arm64/include/asm/kvm_tmi.h b/arch/arm64/include/asm/kvm_tmi.h index 6e599df75c19..21271ed37497 100644 --- a/arch/arm64/include/asm/kvm_tmi.h +++ b/arch/arm64/include/asm/kvm_tmi.h @@ -33,6 +33,7 @@ #define TMI_ERROR_TTT_CREATED 13 #define TMI_ERROR_TTT_DESTROY_AGAIN 14 #define TMI_ERROR_STE_CREATED 15 +#define TMI_ERROR_MIG_CAP 16 #define TMI_RETURN_STATUS(ret) ((ret) & 0xFF) #define TMI_RETURN_INDEX(ret) (((ret) >> 8) & 0xFF) @@ -48,9 +49,11 @@ #define TMI_FEATURE_REGISTER_0_HASH_SHA_256 BIT(28) #define TMI_FEATURE_REGISTER_0_HASH_SHA_512 BIT(29) -#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0) +#define TMI_CVM_PARAM_FLAG_LPA2 BIT(0) #define TMI_CVM_PARAM_FLAG_SVE BIT(1) #define TMI_CVM_PARAM_FLAG_PMU BIT(2) +#define TMI_CVM_PARAM_FLAG_MIG BIT(3) +#define TMI_CVM_PARAM_FLAG_MIGVM BIT(4) #define TMI_NOT_RUNNABLE 0 #define TMI_RUNNABLE 1 @@ -246,7 +249,7 @@ struct tmi_tec_run { #define TMI_FNUM_TTT_MAP_RANGE U(0x26D) #define TMI_FNUM_TTT_UNMAP_RANGE U(0x26E) #define TMI_FNUM_TTT_DESTROY U(0x26F) -#define TMI_FNUM_INF_TEST U(0x270) +#define TMI_FNUM_INF_TEST U(0x271) #define TMI_FNUM_KAE_INIT U(0x273) #define TMI_FNUM_KAE_ENABLE U(0x274) #define TMI_FNUM_INFO_SHOW U(0x275) @@ -269,6 +272,13 @@ struct tmi_tec_run { #define TMI_FNUM_DEVICE_CREATE U(0x286) #define TMI_FNUM_DEVICE_DESTROY U(0x287) +/* additional TMI call for migration */ +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +#define TMI_FNUM_MIG_CONTROL U(0x270) +#define TMI_FNUM_MIG_DATA U(0x272) +#define TMI_FNUM_MIG_ATTESTATION U(0x276) +#endif + /* TMI SMC64 PIDs handled by the SPMD */ #define TMI_TMM_VERSION_REQ TMI_FID(SMC_64, TMI_FNUM_VERSION_REQ) #define TMI_TMM_DATA_CREATE TMI_FID(SMC_64, TMI_FNUM_DATA_CREATE) @@ -309,6 +319,55 @@ struct tmi_tec_run { #define TMI_TMM_DEV_CREATE TMI_FID(SMC_64, TMI_FNUM_DEVICE_CREATE) #define TMI_TMM_DEV_DESTROY TMI_FID(SMC_64, TMI_FNUM_DEVICE_DESTROY) +/* additional TMI call for migration */ +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + +#define TMI_TMM_MIG_CONTROL TMI_FID(SMC_64, TMI_FNUM_MIG_CONTROL) +#define TMI_TMM_MIG_DATA TMI_FID(SMC_64, TMI_FNUM_MIG_DATA) +#define TMI_TMM_MIG_ATTESTATION TMI_FID(SMC_64, TMI_FNUM_MIG_ATTESTATION) + +typedef enum tmi_tmm_mig_control_fid_e { + TMI_TMM_GET_MIG_CONFIG, + TMI_TMM_MIG_STREAM_CREATE, + TMI_TMM_SET_TMM_MEMSLOT, + TMI_TMM_GET_SWIOTLB, + TMI_TMM_MIG_MEM_REGION_PROTECT, + TMI_TMM_MIG_IMPORT_COMMIT, + TMI_TMM_CHECKSUM_SRC, + TMI_TMM_CHECKSUM_DST +} tmi_tmm_mig_control_fid_t; + +typedef enum tmi_tmm_mig_data_fid_e{ + TMI_TMM_MIG_EXPORT_IMMUTABLE, + TMI_TMM_MIG_IMPORT_IMMUTABLE, + TMI_TMM_MIG_EXPORT_TRACK, + TMI_TMM_MIG_IMPORT_TRACK, + TMI_TMM_MIG_EXPORT_MEM, + TMI_TMM_MIG_IMPORT_MEM, + TMI_TMM_MIG_EXPORT_TEC, + TMI_TMM_MIG_IMPORT_TEC, + TMI_TMM_MIG_EXPORT_MUTABLE, + TMI_TMM_MIG_IMPORT_MUTABLE +} tmi_tmm_mig_data_fid_t; + +typedef enum tmi_tmm_mig_attestation_fid_e{ + TMI_TMM_MIGVM_INIT, + TMI_TMM_MIGVM_CLEAN, + TMI_TMM_MIG_BIND, + TMI_TMM_MIG_BIND_CLEAN, + TMI_TMM_MIG_BIND_PEEK +} tmi_tmm_mig_attestation_fid_t; + +#define KVM_CAP_ARM_TMM_MIGRATION_CAP_DISABLE 0 +#define KVM_CAP_ARM_TMM_MIGRATION_CAP_LIVE_SRC 1 +#define KVM_CAP_ARM_TMM_MIGRATION_CAP_LIVE_DST 2 + +#define KVM_CAP_ARM_TMM_MIGVM_DEFAULT 0 +#define KVM_CAP_ARM_TMM_MIGVM_ENABLE 1 + +#define TMI_ABI_VERSION_MIG U(0x3) +#endif + #define TMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16) #define TMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF) @@ -332,6 +391,8 @@ struct tmi_tec_run { #define KVM_CAP_ARM_TMM_CFG_DBG 3 #define KVM_CAP_ARM_TMM_CFG_PMU 4 #define KVM_CAP_ARM_TMM_CFG_KAE 5 +#define KVM_CAP_ARM_TMM_CFG_MIG 6 +#define KVM_CAP_ARM_TMM_CFG_MIG_CVM 7 #define KVM_CAP_ARM_TMM_MAX_KAE_VF_NUM 11 @@ -372,6 +433,19 @@ struct kvm_cap_arm_tmm_config_item { __u64 sec_addr[KVM_CAP_ARM_TMM_MAX_KAE_VF_NUM]; __u64 hpre_addr[KVM_CAP_ARM_TMM_MAX_KAE_VF_NUM]; }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_MIG */ + struct { + __u32 migration_cap; + __u32 migvm_pid; + __u32 migvm_cid; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_MIG_CVM */ + struct { + __u32 migration_migvm_cap; + }; + /* Fix the size of the union */ __u8 reserved[256]; }; @@ -452,5 +526,40 @@ int kvm_enable_virtcca_cvm(struct kvm *kvm); int kvm_cvm_map_ipa(struct kvm *kvm, phys_addr_t ipa, kvm_pfn_t pfn, unsigned long map_size, enum kvm_pgtable_prot prot, int ret); void virtcca_cvm_set_secure_flag(void *vdev, void *info); +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +/* return the migsc_pa */ +u64 tmi_mig_stream_create(u64 rd, u64 numa_set); +u64 tmi_get_mig_config(void); +struct arm_smccc_res tmi_export_immutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd); +u64 tmi_import_immutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd); +struct arm_smccc_res tmi_import_mem(uint64_t rd, uint64_t mig_mem_param); +struct arm_smccc_res tmi_export_mem(uint64_t rd, uint64_t mig_mem_param); +u64 tmi_import_track(uint64_t rd, uint64_t hpa_and_size_pa, uint64_t mig_cmd); +u64 tmi_export_track(uint64_t rd, uint64_t hpa_and_size_pa, uint64_t mig_cmd); +u64 tmi_import_commit(uint64_t rd); +u64 tmi_set_tmm_memslot(uint64_t rd, uint64_t mig_memslot_param); +u64 tmi_import_tec(uint64_t tec_pa, uint64_t mbmd_addr_and_size, + uint64_t page_list_pa, uint64_t stream_info_pa); +struct arm_smccc_res tmi_export_tec(uint64_t tec_pa, uint64_t mbmd_addr_and_size, + uint64_t page_list_pa, uint64_t stream_info_pa); +struct arm_smccc_res tmi_export_mutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd); +u64 tmi_import_mutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd); +u64 tmi_get_swiotlb(uint64_t rd, uint64_t swiotlb_start_addr, uint64_t swiotlb_end_addr); +u64 tmi_checksum_src(uint64_t rd); +u64 tmi_checksum_dst(uint64_t rd); +void virtcca_set_tmm_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot); +/* enable the migcvm ctl */ +int kvm_migcvm_ioctl(struct kvm *kvm, unsigned long arg); +struct arm_smccc_res tmi_mem_region_protect(u64 rd, u64 start, u64 end, bool protect); +u64 tmi_migvm_init(uint64_t rd, uint64_t numa_set); +u64 tmi_migvm_clean(uint64_t migvm_rd); +u64 tmi_bind_add(uint64_t rd, uint64_t migvm_rd); +u64 tmi_bind_clean(uint64_t rd, uint64_t migvm_rd); +struct arm_smccc_res tmi_bind_peek(uint64_t rd, uint64_t migvm_rd); +#endif #endif #endif diff --git a/arch/arm64/include/asm/kvm_tmm.h b/arch/arm64/include/asm/kvm_tmm.h index f6e773c4aa13..18820d65f005 100644 --- a/arch/arm64/include/asm/kvm_tmm.h +++ b/arch/arm64/include/asm/kvm_tmm.h @@ -52,11 +52,43 @@ struct tmi_cvm_params { s64 ttt_level_start; u64 ttt_num_start; u8 rpv[64]; /* Bits 512 */ +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + u32 migration_cap; /* check the base capability of CVM migration */ + u32 migvm_pid; /* the pid of migvm */ + u32 migvm_cid; /* vsock cid of migvm */ + u32 migration_migvm_cap; /* the type of CVM (support migration) */ +#endif u64 kae_vf_num; u64 sec_addr[MAX_KAE_VF_NUM]; u64 hpre_addr[MAX_KAE_VF_NUM]; }; +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +#define MIGCVM_SLOTS_MAX 1 /* migcvm to guestcvm now just 1 to 1 */ +#define SLOT_INCLUDES_MIGCVM 0 + +/* add the mig state correspond every cvm*/ + +struct cvm_binding_slot_migcvm { + /* Is migration source VM */ + uint8_t is_src; + /* vsock port for migCVM to connect to host */ + uint32_t vsock_port; +}; + +/* the guest cvm and migcvm both use this structure */ +#define KVM_CVM_MIGVM_VERSION 0 +struct mig_cvm { + /* used by migcvm */ + bool is_migvm; + /* used by guest cvm */ + uint8_t version; /* kvm version of migcvm*/ + uint64_t migvm_cid; /* vsock cid of migvm */ + uint32_t migvm_pid; /* pid of migcvm, from and used by guest cvm*/ + uint64_t migvm_rd; /* the PA of migcvm rd, used by guest migcvm */ +}; +#endif + struct cvm { enum virtcca_cvm_state state; u32 cvm_vmid; @@ -93,6 +125,13 @@ struct virtcca_cvm { struct kvm_numa_info numa_info; struct tmi_cvm_params *params; bool is_mapped; /* Whether the cvm RAM memory is mapped */ +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + struct virtcca_mig_state *mig_state; + struct mig_cvm *mig_cvm_info; +#endif + u64 swiotlb_start; + u64 swiotlb_end; + u64 ipa_start; }; /* @@ -119,9 +158,11 @@ int handle_cvm_exit(struct kvm_vcpu *vcpu, int rec_run_status); int kvm_arm_create_cvm(struct kvm *kvm); void kvm_free_rd(struct kvm *kvm); int cvm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target); +u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm); void kvm_cvm_unmap_destroy_range(struct kvm *kvm); int kvm_cvm_map_range(struct kvm *kvm); +int kvm_cvm_mig_map_range(struct kvm *kvm); int virtcca_cvm_arm_smmu_domain_set_kvm(void *group); int cvm_map_unmap_ipa_range(struct kvm *kvm, phys_addr_t ipa_base, phys_addr_t pa, unsigned long map_size, uint32_t is_map); @@ -157,4 +198,52 @@ static inline unsigned long cvm_ttt_level_mapsize(int level) return (1UL << CVM_TTT_LEVEL_SHIFT(level)); } +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +/* virtcca MIG sub-ioctl() commands. */ +enum kvm_cvm_cmd_id { + /* virtcca MIG migcvm commands. */ + KVM_CVM_MIGCVM_PREBIND = 0, + KVM_CVM_MIGCVM_BIND, + KVM_CVM_GET_BIND_INFO, + /* virtcca MIG stream commands. */ + KVM_CVM_MIG_STREAM_START, + KVM_CVM_MIG_EXPORT_STATE_IMMUTABLE, + KVM_CVM_MIG_IMPORT_STATE_IMMUTABLE, + KVM_CVM_MIG_EXPORT_MEM, + KVM_CVM_MIG_IMPORT_MEM, + KVM_CVM_MIG_EXPORT_TRACK, + KVM_CVM_MIG_IMPORT_TRACK, + KVM_CVM_MIG_EXPORT_PAUSE, + KVM_CVM_MIG_EXPORT_STATE_MUTABLE, + KVM_CVM_MIG_IMPORT_STATE_MUTABLE, + KVM_CVM_MIG_EXPORT_STATE_TEC, + KVM_CVM_MIG_IMPORT_STATE_TEC, + KVM_CVM_MIG_EXPORT_ABORT, + KVM_CVM_MIG_IMPORT_END, + KVM_CVM_MIG_CRC, + KVM_CVM_MIG_GET_MIG_INFO, + + KVM_CVM_MIG_CMD_NR_MAX, +}; + +struct kvm_virtcca_mig_cmd { + /* enum kvm_tdx_cmd_id */ + __u32 id; + /* flags for sub-commend. If sub-command doesn't use this, set zero. */ + __u32 flags; + /* + * data for each sub-command. An immediate or a pointer to the actual + * data in process virtual address. If sub-command doesn't use it, + * set zero. + */ + __u64 data; + /* + * Auxiliary error code. The sub-command may return TDX SEAMCALL + * status code in addition to -Exxx. + * Defined for consistency with struct kvm_sev_cmd. + */ + __u64 error; +}; +void print_stream_info(struct kvm *kvm); +#endif #endif diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 97941e582d83..bdb89a011cce 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -509,6 +509,24 @@ struct kvm_smccc_filter { #define KVM_HYPERCALL_EXIT_SMC (1U << 0) #define KVM_HYPERCALL_EXIT_16BIT (1U << 1) +/* mig virtcca head*/ +#define KVM_DEV_VIRTCCA_MIG_ATTR 0x1 + +struct kvm_dev_virtcca_mig_attr { +#define KVM_DEV_VIRTCCA_MIG_ATTR_VERSION 0 + __u32 version; +/* 4KB buffer can hold 512 entries at most */ +#define VIRTCCA_MIG_BUF_LIST_PAGES_MAX 512 + __u32 buf_list_pages; + __u32 max_migs; +}; + +#define VIRTCCA_MIG_STREAM_MBMD_MAP_OFFSET 0 +#define VIRTCCA_MIG_STREAM_GPA_LIST_MAP_OFFSET 1 +#define VIRTCCA_MIG_STREAM_MAC_LIST_MAP_OFFSET 2 +#define VIRTCCA_MIG_STREAM_BUF_LIST_MAP_OFFSET 4 + + #endif #endif /* __ARM_KVM_H__ */ diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 52edbd7f6340..f45b89464f26 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -57,6 +57,14 @@ config HISI_VIRTCCA_HOST If unsure, say N. +config HISI_VIRTCCA_MIG_HOST + bool "Enable cvm host live migration" + depends on HISI_VIRTCCA_HOST + help + Support VIRTCCA CVM host live migration + + If unsure, say N. + config NVHE_EL2_DEBUG bool "Debug mode for non-VHE EL2 object" depends on KVM diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 5bd31fc0e446..71bbca44b050 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1913,6 +1913,12 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) case KVM_LOAD_USER_DATA: { return kvm_load_user_data(kvm, arg); } +#endif +/* add the migcvm ioctl*/ +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + case KVM_CVM_MIG_IOCTL: { + return kvm_migcvm_ioctl(kvm, arg); + } #endif case KVM_CREATE_IRQCHIP: { int ret; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 3830aa0b07a0..375f1a38ee4b 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1174,6 +1174,83 @@ static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot) write_unlock(&kvm->mmu_lock); } +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +void virtcca_enable_log_dirty(struct kvm *kvm, uint64_t start, uint64_t end) +{ + struct arm_smccc_res res; + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + uint64_t ns_mem_start, ns_mem_end; + uint64_t s_start = cvm->ipa_start; + uint64_t s_end = cvm->ipa_start + cvm->ram_size; + /* no overlap */ + + if (start < s_start || end > s_end) { + pr_err("ns world mig: virtcca_enable_log_dirty start %lx end %lx\n", start, end); + goto handle_ns_mem; + } + + if (start >= cvm->swiotlb_end || end <= cvm->swiotlb_start) { + res = tmi_mem_region_protect(cvm->rd, start, end, true); + if (res.a1 != 0) { + pr_err("tmi_mem_region_protect failed!\n"); + } + return; + } + + if (start <= cvm->swiotlb_start && end <= cvm->swiotlb_end) { + res = tmi_mem_region_protect(cvm->rd, start, cvm->swiotlb_start, true); + if (res.a1 != 0) { + pr_err("tmi_mem_region_protect failed!\n"); + } + ns_mem_start = cvm->swiotlb_start; + ns_mem_end = end; + goto handle_ns_mem; + } + + /* + * The scope of selected dirty page address fully encompasses swiotlb. + * Since the max number for dirty page logging is 64 pages (256K). + * this branch should not happen. + */ + if (start <= cvm->swiotlb_start && end >= cvm->swiotlb_end) { + res = tmi_mem_region_protect(cvm->rd, start, cvm->swiotlb_start, true); + if (res.a1 != 0) { + pr_err("tmi_mem_region_protect failed!\n"); + } + + res = tmi_mem_region_protect(cvm->rd, cvm->swiotlb_end, end, true); + if (res.a1 != 0) { + pr_err("tmi_mem_region_protect failed!\n"); + } + ns_mem_start = cvm->swiotlb_start; + ns_mem_end = cvm->swiotlb_end; + goto handle_ns_mem; + } + + if (start >= cvm->swiotlb_start && end <= cvm->swiotlb_end) { + ns_mem_start = start; + ns_mem_end = end; + goto handle_ns_mem; + } + + if (start >= cvm->swiotlb_start && end >= cvm->swiotlb_end) { + res = tmi_mem_region_protect(cvm->rd, cvm->swiotlb_end, end, true); + if (res.a1 != 0) { + pr_err("tmi_mem_region_protect failed!\n"); + } + ns_mem_start = start; + ns_mem_end = cvm->swiotlb_end; + goto handle_ns_mem; + } + return; + +handle_ns_mem: + stage2_wp_range(&kvm->arch.mmu, ns_mem_start, ns_mem_end); + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + kvm_mmu_split_huge_pages(kvm, ns_mem_start, ns_mem_end); +} +#endif + /* * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages. * @kvm: The KVM pointer @@ -1195,6 +1272,13 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, lockdep_assert_held_write(&kvm->mmu_lock); +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + if (kvm_is_virtcca_cvm(kvm)) { + virtcca_enable_log_dirty(kvm, (uint64_t)start, (uint64_t)end); + return; + } +#endif + stage2_wp_range(&kvm->arch.mmu, start, end); /* diff --git a/arch/arm64/kvm/tmi.c b/arch/arm64/kvm/tmi.c index 3c1425c62d31..0a681f7c6c52 100644 --- a/arch/arm64/kvm/tmi.c +++ b/arch/arm64/kvm/tmi.c @@ -395,10 +395,195 @@ u64 tmi_dev_create(u64 params) return res.a1; } -u64 tmi_dev_destroy(u64 dev_num, u64 is_clear) +u64 tmi_dev_destroy(u64 dev_num, u64 clean) { struct arm_smccc_res res; - arm_smccc_1_1_smc(TMI_TMM_DEV_DESTROY, dev_num, is_clear, &res); + arm_smccc_1_1_smc(TMI_TMM_DEV_DESTROY, dev_num, clean, &res); return res.a1; } + +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +/* additional TMI call for migration */ +u64 tmi_get_mig_config(void) +{ + struct arm_smccc_res res; + + /* calculate the max number of these pages(rd,vcpu) */ + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_GET_MIG_CONFIG, &res); + return res.a1; +} + +u64 tmi_mig_stream_create(u64 rd, u64 numa_set) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_MIG_STREAM_CREATE, rd, numa_set, &res); + return res.a1; +} + +u64 tmi_set_tmm_memslot(uint64_t rd, uint64_t mig_memslot_param) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_SET_TMM_MEMSLOT, rd, mig_memslot_param, &res); + return res.a1; +} + +u64 tmi_get_swiotlb(uint64_t rd, uint64_t swiotlb_start_addr, uint64_t swiotlb_end_addr) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_GET_SWIOTLB, rd, __pa(swiotlb_start_addr), __pa(swiotlb_end_addr), &res); + return res.a1; +} + +struct arm_smccc_res tmi_mem_region_protect(u64 rd, u64 start, u64 end, bool protect) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_MIG_MEM_REGION_PROTECT, rd, start, end, true, &res); + return res; +} + +u64 tmi_import_commit(uint64_t rd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_MIG_IMPORT_COMMIT, rd, &res); + return res.a1; +} + +u64 tmi_checksum_src(uint64_t rd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_CHECKSUM_SRC, rd, &res); + return res.a1; +} + +u64 tmi_checksum_dst(uint64_t rd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_CONTROL, TMI_TMM_CHECKSUM_DST, rd, &res); + return res.a1; +} + +struct arm_smccc_res tmi_export_immutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_EXPORT_IMMUTABLE, rd, hpa_and_size_pa, + page_or_list, mig_cmd, &res); + return res; +} + +u64 tmi_import_immutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_IMPORT_IMMUTABLE, rd, hpa_and_size_pa, page_or_list, mig_cmd, &res); + return res.a1; +} + +u64 tmi_export_track(uint64_t rd, uint64_t hpa_and_size_pa, uint64_t mig_cmd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_EXPORT_TRACK, rd, hpa_and_size_pa, mig_cmd, &res); + return res.a1; +} + +u64 tmi_import_track(uint64_t rd, uint64_t hpa_and_size_pa, uint64_t mig_cmd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_IMPORT_TRACK, rd, hpa_and_size_pa, mig_cmd, &res); + return res.a1; +} + +struct arm_smccc_res tmi_import_mem(uint64_t rd, uint64_t mig_mem_param) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_IMPORT_MEM, rd, mig_mem_param, &res); + return res; +} + +struct arm_smccc_res tmi_export_mem(uint64_t rd, uint64_t mig_mem_param) +{ + struct arm_smccc_res res; + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_EXPORT_MEM, rd, mig_mem_param, &res); + return res; +} + +struct arm_smccc_res tmi_export_tec(u64 tec_pa, u64 mbmd_addr_and_size, u64 page_list_pa, u64 stream_info_pa) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_EXPORT_TEC, tec_pa, mbmd_addr_and_size, page_list_pa, stream_info_pa, &res); + return res; +} + +u64 tmi_import_tec(u64 tec_pa, u64 mbmd_addr_and_size, u64 page_list_pa, u64 stream_info_pa) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_IMPORT_TEC, tec_pa, mbmd_addr_and_size, page_list_pa, stream_info_pa, &res); + return res.a1; +} + +struct arm_smccc_res tmi_export_mutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_EXPORT_MUTABLE, rd, hpa_and_size_pa, page_or_list, mig_cmd, &res); + return res; +} + +u64 tmi_import_mutable(uint64_t rd, uint64_t hpa_and_size_pa, + uint64_t page_or_list, uint64_t mig_cmd) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_DATA, TMI_TMM_MIG_IMPORT_MUTABLE, rd, hpa_and_size_pa, + page_or_list, mig_cmd, &res); + return res.a1; +} + +u64 tmi_migvm_init(uint64_t rd, uint64_t numa_set) +{ + struct arm_smccc_res res; + + arm_smccc_1_1_smc(TMI_TMM_MIG_ATTESTATION, TMI_TMM_MIGVM_INIT, rd, numa_set, &res); + return res.a1; +} +u64 tmi_migvm_clean(uint64_t migvm_rd) +{ + struct arm_smccc_res res; + arm_smccc_1_1_smc(TMI_TMM_MIG_ATTESTATION, TMI_TMM_MIGVM_CLEAN, migvm_rd, &res); + return res.a1; +} +u64 tmi_bind_add(uint64_t rd, uint64_t migvm_rd) +{ + struct arm_smccc_res res; + arm_smccc_1_1_smc(TMI_TMM_MIG_ATTESTATION, TMI_TMM_MIG_BIND, rd, migvm_rd, &res); + return res.a1; +} +u64 tmi_bind_clean(uint64_t rd, uint64_t migvm_rd) +{ + struct arm_smccc_res res; + arm_smccc_1_1_smc(TMI_TMM_MIG_ATTESTATION, TMI_TMM_MIG_BIND_CLEAN, rd, migvm_rd, &res); + return res.a1; +} +struct arm_smccc_res tmi_bind_peek(uint64_t rd, uint64_t migvm_rd) +{ + struct arm_smccc_res res; + arm_smccc_1_1_smc(TMI_TMM_MIG_ATTESTATION, TMI_TMM_MIG_BIND_PEEK, rd, migvm_rd, &res); + return res; +} +#endif \ No newline at end of file diff --git a/arch/arm64/kvm/virtcca_cvm.c b/arch/arm64/kvm/virtcca_cvm.c index 3a5d1b50f2fd..5b78115459e9 100644 --- a/arch/arm64/kvm/virtcca_cvm.c +++ b/arch/arm64/kvm/virtcca_cvm.c @@ -18,6 +18,9 @@ #include #include +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +#include "virtcca_mig.c" +#endif /* Protects access to cvm_vmid_bitmap */ static DEFINE_SPINLOCK(cvm_vmid_lock); @@ -27,6 +30,7 @@ DEFINE_STATIC_KEY_FALSE(virtcca_cvm_is_available); #define UEFI_MAX_SIZE 0x8000000 #define UEFI_DTB_START 0x40000000 #define DTB_MAX_SIZE 0x200000 +#define MIG_GPA_ADDRESS 0x40000000 bool is_virtcca_available(void) { @@ -112,7 +116,7 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level) * the configurable physical numa range in QEMU is 0-127, * but in real scenarios, 0-63 is sufficient. */ -static u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm) +u64 kvm_get_host_numa_set_by_vcpu(u64 vcpu, struct kvm *kvm) { int64_t i; struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; @@ -174,6 +178,44 @@ int kvm_arm_create_cvm(struct kvm *kvm) goto out; } +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + if(cvm->params->migration_cap) { + ret = kvm_virtcca_mig_stream_ops_init(); /* init the migration main struct */ + if (ret) { + kvm_err("KVM support migration kvm_virtcca_mig_stream_ops_init failed: %d\n", cvm->cvm_vmid); + ret = -ENOMEM; + goto out; + } + + ret = virtcca_mig_capabilities_setup(cvm); + if (ret) { + kvm_err("KVM support migration virtcca_mig_capabilities_setup failed: %d\n", cvm->cvm_vmid); + ret = -ENOMEM; + goto out; + } + + ret = virtcca_mig_state_create(cvm); /* this state might along with the protected memory */ + if (ret) { + kvm_err("KVM support migration virtcca_mig_state_create failed: %d\n", cvm->cvm_vmid); + ret = -ENOMEM; + goto out; + } + } else { + pr_warn("warning : Migration Capability is not set\n"); + } + + if(cvm->params->migration_migvm_cap) { + ret = virtcca_migvm_init(cvm, numa_set); + if (ret) { + kvm_err("KVM support migration virtcca_migvm_init failed: %d\n", cvm->cvm_vmid); + ret = -ENOMEM; + goto out; + } + } else { + pr_warn("Info : This CVM is normal CVM\n"); + } +#endif + WRITE_ONCE(cvm->state, CVM_STATE_NEW); ret = 0; out: @@ -191,6 +233,30 @@ void kvm_destroy_cvm(struct kvm *kvm) struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; int ret; uint32_t cvm_vmid; + +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + if (cvm->mig_state) { + virtcca_mig_state_release(cvm); + kvm_virtcca_mig_stream_ops_exit(); /* disable mig config */ + } + /* disable migvm structure*/ + if (cvm->mig_cvm_info) { + if (cvm->mig_cvm_info->is_migvm) { + ret = virtcca_migvm_destroy(cvm); + if (ret) { + pr_warn("KVM destroy cVM mig binding_slot failed\n"); + } + cvm->mig_cvm_info->is_migvm = false; + } + if(cvm->mig_cvm_info->migvm_rd && !cvm->mig_cvm_info->is_migvm) { + ret = tmi_bind_clean(cvm->rd, cvm->mig_cvm_info->migvm_rd); + if (ret) { + pr_warn("KVM destroy cVM mig tmi_bind_clean failed\n"); + } + } + } +#endif + #ifdef CONFIG_HISI_VIRTCCA_CODA struct arm_smmu_domain *arm_smmu_domain; struct list_head smmu_domain_group_list; @@ -404,6 +470,10 @@ int kvm_finalize_vcpu_tec(struct kvm_vcpu *vcpu) struct virtcca_cvm *cvm = vcpu->kvm->arch.virtcca_cvm; struct virtcca_cvm_tec *tec = &vcpu->arch.tec; + if (tec->tec_created) { + return 0; + } + mutex_lock(&vcpu->kvm->lock); tec->tec_run = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); if (!tec->tec_run) { @@ -521,6 +591,35 @@ static int config_cvm_kae(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *c return 0; } +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +/* Get the qemu's transport migration config */ +static int config_cvm_migration(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct tmi_cvm_params *params; + + params = cvm->params; + + params->migration_cap = cfg->migration_cap; + params->migvm_pid = cfg->migvm_pid; + params->migvm_cid = cfg->migvm_cid; /* vsock cid of migvm */ + params->flags |= TMI_CVM_PARAM_FLAG_MIG; + return 0; +} + +static int config_cvm_migvm(struct kvm *kvm, struct kvm_cap_arm_tmm_config_item *cfg) +{ + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct tmi_cvm_params *params; + + params = cvm->params; + + params->migration_migvm_cap = cfg->migration_migvm_cap; + params->flags |= TMI_CVM_PARAM_FLAG_MIGVM; + return 0; +} +#endif + static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) { struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; @@ -546,6 +645,14 @@ static int kvm_tmm_config_cvm(struct kvm *kvm, struct kvm_enable_cap *cap) case KVM_CAP_ARM_TMM_CFG_KAE: r = config_cvm_kae(kvm, &cfg); break; +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + case KVM_CAP_ARM_TMM_CFG_MIG: /* enable the mig config of cvm */ + r = config_cvm_migration(kvm, &cfg); + break; + case KVM_CAP_ARM_TMM_CFG_MIG_CVM: + r = config_cvm_migvm(kvm, &cfg); + break; +#endif default: r = -EINVAL; @@ -590,6 +697,46 @@ int kvm_cvm_map_range(struct kvm *kvm) return ret; } + +int kvm_cvm_mig_map_range(struct kvm *kvm) +{ + int ret = 0; + u64 curr_numa_set; + int idx; + u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); + struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; + struct kvm_numa_info *numa_info = &cvm->numa_info; + gpa_t gpa; + + curr_numa_set = kvm_get_first_binded_numa_set(kvm); + + gpa = MIG_GPA_ADDRESS; + + for (idx = 0; idx < numa_info->numa_cnt; idx++) { + struct kvm_numa_node *numa_node = &numa_info->numa_nodes[idx]; + + if (idx) + gpa = numa_node->ipa_start; + if (gpa >= numa_node->ipa_start && + gpa < numa_node->ipa_start + numa_node->ipa_size) { + ret = tmi_ttt_map_range(cvm->rd, gpa, + numa_node->ipa_size - gpa + numa_node->ipa_start, + curr_numa_set, numa_node->host_numa_nodes[0]); + if (ret) { + kvm_err("tmi_ttt_map_range failed: %d.\n", ret); + return ret; + } + } + } + /* Vfio driver will pin memory in advance, + * if the ram already mapped, activate cvm + * does not need to map twice + */ + cvm->is_mapped = true; + return ret; +} + + static int kvm_activate_cvm(struct kvm *kvm) { #ifdef CONFIG_HISI_VIRTCCA_CODA @@ -599,6 +746,20 @@ static int kvm_activate_cvm(struct kvm *kvm) #endif struct virtcca_cvm *cvm = kvm->arch.virtcca_cvm; +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + if(cvm->mig_state) { + if (cvm->mig_state->is_src == VIRTCCA_MIG_CAP_DST) { + tmi_checksum_dst(cvm->rd); + return 0; + } + } +#endif + + if (virtcca_cvm_state(kvm) == CVM_STATE_ACTIVE) { + kvm_info("cVM%d is already activated!\n", cvm->cvm_vmid); + return 0; + } + if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) return -EINVAL; @@ -656,6 +817,15 @@ static int kvm_populate_ipa_cvm_range(struct kvm *kvm, u64 l2_granule = cvm_granule_size(TMM_TTT_LEVEL_2); phys_addr_t ipa_base1, ipa_end2; +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + if (cvm->mig_state) { + if (cvm->mig_state->is_src == 2) { + kvm_err("the ipa range is populated before migraion \n"); + return 0; + } + } +#endif + if (virtcca_cvm_state(kvm) != CVM_STATE_NEW) return -EINVAL; if (!IS_ALIGNED(args->populate_ipa_base1, PAGE_SIZE) || @@ -673,6 +843,8 @@ static int kvm_populate_ipa_cvm_range(struct kvm *kvm, ipa_base1 = round_down(args->populate_ipa_base1, l2_granule); ipa_end2 = round_up(args->populate_ipa_base2 + args->populate_ipa_size2, l2_granule); + cvm->ipa_start = ipa_base1; + /* uefi boot, uefi image and uefi ram from 0 to 128M */ if (ipa_base1 == UEFI_LOADER_START) { phys_addr_t ipa_base2 = round_down(args->populate_ipa_base2, l2_granule); @@ -751,12 +923,19 @@ static int tmi_check_version(void) version_major = TMI_ABI_VERSION_GET_MAJOR(res); version_minor = TMI_ABI_VERSION_GET_MINOR(res); - +#ifndef CONFIG_HISI_VIRTCCA_MIG_HOST + if (version_major != TMI_ABI_VERSION_MAJOR) { + kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major, + version_minor); + return -ENXIO; + } +#else /* add tmm version check: */ if (version_major != TMI_ABI_VERSION_MAJOR) { kvm_err("Unsupported TMI_ABI (version %d %d)\n", version_major, version_minor); return -ENXIO; } +#endif kvm_info("TMI ABI version %d,%d\n", version_major, version_minor); return 0; @@ -857,6 +1036,36 @@ static inline bool is_dtb_info_has_extend_data(u64 dtb_info) return dtb_info & 0x1; } +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +int kvm_migcvm_ioctl(struct kvm *kvm, unsigned long arg) +{ + struct kvm_virtcca_mig_cmd cvm_cmd; + int ret = 0; + void __user *argp = (void __user *)arg; + + if (!kvm_is_virtcca_cvm(kvm)) + return -EFAULT; + + if (copy_from_user(&cvm_cmd, argp, sizeof(struct kvm_virtcca_mig_cmd))) + return -EINVAL; + + if (cvm_cmd.id < KVM_CVM_MIGCVM_PREBIND || cvm_cmd.id >= KVM_CVM_MIG_STREAM_START) + return -EINVAL; + + switch (cvm_cmd.id) { + case KVM_CVM_MIGCVM_BIND: + ret = virtcca_binding_with_migvm_pid(kvm, &cvm_cmd); + break; + case KVM_CVM_GET_BIND_INFO: + ret = virtcca_get_bind_info(kvm, &cvm_cmd); + break; + default: + return -EINVAL; + } + return ret; +} +#endif + int kvm_load_user_data(struct kvm *kvm, unsigned long arg) { struct kvm_user_data user_data; diff --git a/arch/arm64/kvm/virtcca_mig.c b/arch/arm64/kvm/virtcca_mig.c new file mode 100644 index 000000000000..e4144fd64b04 --- /dev/null +++ b/arch/arm64/kvm/virtcca_mig.c @@ -0,0 +1,229 @@ +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VIRTCCA_MIG_CAP_SRC 1 +#define VIRTCCA_MIG_CAP_DST 2 + +struct virtcca_bind_info { + int16_t version; + bool premig_done; +}; + +struct virtcca_mig_mbmd_data { /* both kvm and tmm can access */ + __u16 size; + __u16 mig_version; + __u16 migs_index; /* corresponding stream idx */ + __u8 mb_type; + __u8 rsvd0; /* reserve bit */ + __u32 mb_counter; + __u32 mig_epoch; + __u64 iv_counter; + __u8 type_specific_info[]; +} __packed; + +struct virtcca_mig_mbmd { + struct virtcca_mig_mbmd_data *data; + uint64_t hpa_and_size; /* Host physical address and size of the mbmd */ +}; + +#define VIRTCCA_MIG_EPOCH_START_TOKEN 0xffffffff + +/* + * The buffer list specifies a list of 4KB pages to be used by TDH_EXPORT_MEM + * and TDH_IMPORT_MEM to export and import guest memory pages. Each entry + * is 64-bit and points to a physical address of a 4KB page used as buffer. The + * list itself is a 4KB page, so it can hold up to 512 entries. + */ +union virtcca_mig_buf_list_entry { + uint64_t val; + struct { + uint64_t rsvd0 : 12; + uint64_t pfn : 40; + uint64_t rsvd1 : 11; + uint64_t invalid : 1; + }; +}; + +struct virtcca_mig_buf_list { + union virtcca_mig_buf_list_entry *entries; + // uint64_t *entries; + hpa_t hpa; +}; + +/* + * The page list specifies a list of 4KB pages to be used by the non-memory + * states export and import, i.e. TDH_EXPORT_STATE_* and TDH_IMPORT_STATE_*. + * Each entry is 64-bit and specifies the physical address of a 4KB buffer. + * The list itself is a 4KB page, so it can hold up to 512 entries. + */ +union virtcca_mig_page_list_info { + uint64_t val; + struct { + uint64_t rsvd0 : 12; + uint64_t pfn : 40; + uint64_t rsvd1 : 3; + uint64_t last_entry : 9; + }; +}; + +struct virtcca_mig_page_list { + hpa_t *entries; + union virtcca_mig_page_list_info info; +}; + + +/* check physical_mask */ +#define TDX_SPTE_PFN_MASK 0xffffffffff000 + +union virtcca_mig_gpa_list_entry { + uint64_t val; + struct{ + uint64_t level : 2; /* Bits 1:0 : Mapping level */ + uint64_t pending : 1; /* Bit 2 : Page is pending */ + uint64_t reserved_0 : 4; /* Bits 6:3 */ + uint64_t l2_map : 3; /* Bits 9:7 : L2 mapping flags */ + uint64_t mig_type : 2; /* Bits 11:10: Migration type */ + uint64_t gfn : 40; /* Bits 51:12 */ +#define GPA_LIST_OP_NOP 0 +#define GPA_LIST_OP_EXPORT 1 +#define GPA_LIST_OP_CANCEL 2 + uint64_t operation : 2; /* Bits 53:52 */ + uint64_t reserved_1 : 2; /* Bits 55:54 */ +#define GPA_LIST_S_SUCCESS 0 + uint64_t status : 5; /* Bits 56:52 */ + uint64_t reserved_2 : 3; /* Bits 63:61 */ + }; +}; + +#define VIRTCCA_MIG_GPA_LIST_MAX_ENTRIES \ + (PAGE_SIZE / sizeof(union virtcca_mig_gpa_list_entry)) + +/* + * The GPA list specifies a list of GPAs to be used by TDH_EXPORT_MEM and + * TDH_IMPORT_MEM, TDH_EXPORT_BLOCKW, and TDH_EXPORT_RESTORE. The list itself + * is 4KB, so it can hold up to 512 such 64-bit entries. + */ +union virtcca_mig_ipa_list_info { + uint64_t val; + struct { + uint64_t rsvd0 : 3; + uint64_t first_entry : 9; + uint64_t pfn : 40; + uint64_t rsvd1 : 3; + uint64_t last_entry : 9; + + }; +}; + +struct virtcca_mig_gpa_list { + union virtcca_mig_gpa_list_entry *entries; + union virtcca_mig_ipa_list_info info; +}; + +/* + * A MAC list specifies a list of MACs over 4KB migrated pages and their GPA + * entries. It is used by TDH_EXPORT_MEM and TDH_IMPORT_MEM. Each entry is + * 128-bit containing a single AES-GMAC-256 of a migrated page. The list itself + * is a 4KB page, so it can hold up to 256 entries. To support the export and + * import of 512 pages, two such MAC lists are needed to be passed to the TDX + * module. + */ +struct virtcca_mig_mac_list { + void *entries; + hpa_t hpa; +}; + +union virtcca_mig_stream_info { + uint64_t val; + struct { + uint64_t index : 16; + uint64_t rsvd : 47; + uint64_t resume : 1; + }; + struct { + uint64_t rsvd1 : 63; + uint64_t in_order : 1; + }; +}; + +struct virtcca_mig_stream { + uint16_t idx; /* stream id */ + uint32_t buf_list_pages; /* ns memory page number of buf_list 5> PAGE_SHIFT); memset(vaddr, 0, bytes); + swiotlb_unmap_notify(io_tlb_default_mem.defpool.start, bytes); io_tlb_default_mem.for_alloc = true; } #endif diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index bd1a496b5448..34f3243767c1 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -1448,6 +1448,8 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_RISCV_AIA, #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA + KVM_DEV_TYPE_VIRTCCA_MIG_STREAM = 0x00C, +#define KVM_DEV_TYPE_VIRTCCA_MIG_STREAM KVM_DEV_TYPE_VIRTCCA_MIG_STREAM KVM_DEV_TYPE_LA_IOAPIC = 0x100, #define KVM_DEV_TYPE_LA_IOAPIC KVM_DEV_TYPE_LA_IOAPIC KVM_DEV_TYPE_LA_IPI, @@ -1668,6 +1670,9 @@ struct kvm_enc_region { __u64 size; }; +/*virtcca migration*/ +#define KVM_CVM_MIG_IOCTL _IOWR(KVMIO, 0xf2, struct kvm_virtcca_mig_cmd) + #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 4b7378445812..6929cd63816c 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -70,6 +70,11 @@ #include #include +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +#include +#include +#endif + /* Worst case buffer size needed for holding an integer. */ #define ITOA_MAX_LEN 12 @@ -1428,6 +1433,19 @@ static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) return 0; } +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +static int virtcca_kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) +{ + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); + + memslot->dirty_bitmap = kmalloc(2 * dirty_bytes, GFP_KERNEL_ACCOUNT); + if (!memslot->dirty_bitmap) + return -ENOMEM; + + return 0; +} +#endif + static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id) { struct kvm_memslots *active = __kvm_memslots(kvm, as_id); @@ -1652,6 +1670,16 @@ static int kvm_prepare_memory_region(struct kvm *kvm, new->dirty_bitmap = NULL; else if (old && old->dirty_bitmap) new->dirty_bitmap = old->dirty_bitmap; +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST + else if (kvm_is_virtcca_cvm(kvm) && kvm_use_dirty_bitmap(kvm)) { + r = virtcca_kvm_alloc_dirty_bitmap(new); + if (r) + return r; + virtcca_set_tmm_memslot(kvm, new); + if (kvm_dirty_log_manual_protect_and_init_set(kvm)) + bitmap_set(new->dirty_bitmap, 0, new->npages); + } +#endif else if (kvm_use_dirty_bitmap(kvm)) { r = kvm_alloc_dirty_bitmap(new); if (r) @@ -6285,6 +6313,30 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void) return &kvm_running_vcpu; } +/* + * kvm_get_target_kvm - get the target kvm from vm_list using pid + * + * Returns: the target kvm struct on success, NULL if not found. + */ +#ifdef CONFIG_HISI_VIRTCCA_MIG_HOST +struct kvm *kvm_get_target_kvm(pid_t pid) +{ + struct kvm *kvm, *target_kvm = NULL; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + if (kvm->userspace_pid == pid) { + target_kvm = kvm; + break; + } + } + mutex_unlock(&kvm_lock); + + return target_kvm; +} +EXPORT_SYMBOL_GPL(kvm_get_target_kvm); +#endif + #ifdef CONFIG_GUEST_PERF_EVENTS static unsigned int kvm_guest_state(void) { -- Gitee