diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 8fb3f2eee75fd9f08553314bfb4c962c32d64aa4..557191ff913428deca7219a3f8eabfb86980c177 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -26,6 +26,10 @@
#include "qapi/error.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
+#ifdef CONFIG_UB
+#include "hw/ub/ub.h"
+#include "hw/ub/ub_usi.h"
+#endif // CONFIG_UB
#include "hw/s390x/adapter.h"
#include "exec/gdbstub.h"
#include "sysemu/kvm_int.h"
@@ -53,6 +57,7 @@
#include "sysemu/stats.h"
#include "sysemu/kvm.h"
+#include "qemu/log.h"
/* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD
@@ -2038,6 +2043,64 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
}
+#ifdef CONFIG_UB
+int kvm_irqchip_add_usi_route(KVMRouteChange *c, USIMessage msg,
+ uint32_t devid, UBDevice *udev)
+{
+ struct kvm_irq_routing_entry kroute = {};
+ int virq;
+ KVMState *s = c->s;
+
+ virq = kvm_irqchip_get_virq(s);
+ if (virq < 0) {
+ qemu_log("kvm irqchip get virq failed\n");
+ return virq;
+ }
+
+ kroute.gsi = virq;
+ kroute.type = KVM_IRQ_ROUTING_MSI;
+ kroute.flags = 0;
+ kroute.u.msi.address_lo = (uint32_t)msg.address;
+ kroute.u.msi.address_hi = msg.address >> 32;
+ kroute.u.msi.data = le32_to_cpu(msg.data);
+ kroute.flags = KVM_MSI_VALID_DEVID;
+ kroute.u.msi.devid = devid;
+
+ if (udev && kvm_arch_fixup_usi_route(&kroute, msg.address, msg.data, udev)) {
+ kvm_irqchip_release_virq(s, virq);
+ return -EINVAL;
+ }
+
+ kvm_add_routing_entry(s, &kroute);
+ c->changes++;
+
+ return virq;
+}
+
+int kvm_irqchip_update_usi_route(KVMRouteChange *c, int virq, USIMessage msg, UBDevice *udev)
+{
+ struct kvm_irq_routing_entry kroute = {};
+
+ qemu_log("ub device(%s %s) virq(%d) start update usi route.\n",
+ udev->name, udev->qdev.id, virq);
+ kroute.gsi = virq;
+ kroute.type = KVM_IRQ_ROUTING_MSI;
+ kroute.flags = 0;
+ kroute.u.msi.address_lo = (uint32_t)msg.address;
+ kroute.u.msi.address_hi = msg.address >> 32;
+ kroute.u.msi.data = le32_to_cpu(msg.data);
+ kroute.flags = KVM_MSI_VALID_DEVID;
+ kroute.u.msi.devid = ub_interrupt_id(udev);
+
+ if (udev && kvm_arch_fixup_usi_route(&kroute, msg.address, msg.data, udev)) {
+ qemu_log("failed to fixup usi route: addr(0x%lx) data(%u).\n", msg.address, msg.data);
+ return -EINVAL;
+ }
+
+ return kvm_update_routing_entry(c, &kroute);
+}
+#endif // CONFIG_UB
+
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
{
struct kvm_irq_routing_entry kroute = {};
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index de914a91362903ff7e9dfebf9f80c5142edbe28c..2c8c8df07c7745de9f04dac6c00edd441e758086 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -1755,8 +1755,10 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms)
static void create_ub(VirtMachineState *vms)
{
DeviceState *ubc;
+ DeviceState *ummu;
MemoryRegion *mmio_reg;
MemoryRegion *mmio_alias;
+ BusControllerState *ubc_state;
if (ub_cfg_addr_map_table_init() < 0) {
qemu_log("failed to init ub cfg addr map table\n");
@@ -1795,6 +1797,19 @@ static void create_ub(VirtMachineState *vms)
vms->memmap[VIRT_UB_IDEV_ERS].size);
memory_region_add_subregion(get_system_memory(),
vms->memmap[VIRT_UB_IDEV_ERS].base, mmio_alias);
+ if (vms->ummu) {
+ ummu = qdev_new(TYPE_UB_UMMU);
+ ubc_state = BUS_CONTROLLER(ubc);
+ object_property_set_link(OBJECT(ummu), "primary-bus", OBJECT(ubc_state->bus), &error_abort);
+ /* default set ummu nestd */
+ object_property_set_bool(OBJECT(ummu), "nested", true, &error_abort);
+ qdev_prop_set_uint64(ummu, "ub-ummu-reg-size", UMMU_REG_SIZE);
+ sysbus_realize_and_unref(SYS_BUS_DEVICE(ummu), &error_fatal);
+ sysbus_mmio_map(SYS_BUS_DEVICE(ummu), 0,
+ vms->memmap[VIRT_UBC_BASE_REG].base + UMMU_REG_OFFSET);
+ } else {
+ qemu_log("ummu disabled.\n");
+ }
}
#endif // CONFIG_UB
static void create_pcie(VirtMachineState *vms)
diff --git a/hw/ub/meson.build b/hw/ub/meson.build
index d629174ef8bf508712ce317920f7a8f5ff6173ba..2991d45416b46c173e200f08cdd0fbfc7c739bb2 100644
--- a/hw/ub/meson.build
+++ b/hw/ub/meson.build
@@ -2,10 +2,12 @@ ub_ss = ss.source_set()
ub_ss.add(files(
'ub.c',
'ub_ubc.c',
+ 'ub_ummu.c',
'ub_config.c',
'ub_acpi.c',
'ub_enum.c',
'ub_common.c',
+ 'ub_usi.c',
'ub_cna_mgmt.c',
'ub_sec.c',
))
diff --git a/hw/ub/trace-events b/hw/ub/trace-events
index d24c754de1a46ce36db149941349a43e76e476f0..986dab9e48450f443a197760455dcf0df2db3612 100644
--- a/hw/ub/trace-events
+++ b/hw/ub/trace-events
@@ -1,3 +1,29 @@
+# See docs/devel/tracing.rst for syntax documentation.
+
+# ub_ummu.c
+ummu_mcmdq_reg_writel(uint32_t idx, uint32_t prod, uint32_t cons) "mcmdq process: idx(%u), prod(%u), cons(%u)"
+mcmdq_process_task(uint32_t mcmdq_idx, const char *cmd) "mcmdq_idx: %u, cmd: %s"
+mcmdq_cmd_sync_handler(uint32_t mcmdq_idx, uint64_t usi_addr, uint32_t usi_data) "CMD_SYNC: mcmdq_idx(%u) usi_addr(0x%lx) usi_data(0x%x)"
+mcmdq_cmd_cfgi_tect_handler(uint32_t mcmdq_idx, uint32_t tecte_tag) "CMD_CFGI_TECT: mcmdq_idx(%u) tecte_tag(%u)"
+mcmdq_cmd_cfgi_tect_range_handler(uint32_t mcmdq_idx, uint32_t tecte_tag, uint32_t range) "CMD_CFGI_TECT_RANGE: mcmdq_idx(%u) tecte_tag(%u) range(%u)"
+mcmdq_cmd_cfgi_tct_handler(uint32_t mcmdq_idx, uint32_t tecte_tag) "CMD_CFGI_TCT: mcmdq_idx(%u) tecte_tag(%u)"
+mcmdq_cmd_cfgi_tct_all_handler(uint32_t mcmdq_idx) "CMD_CFGI_TCT_ALL: mcmdq_idx(%u)"
+mcmdq_cmd_plbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_PLBIx: mcmdq_idx(%u) cmd(%s)"
+mcmdq_cmd_tlbi_x_process(uint32_t mcmdq_idx, const char *cmd) "CMD_TLBIx: mcmdq_idx(%u) cmd(%s)"
+mcmdq_cmd_create_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid, uint32_t tecte_tag) "CMD_CREATE_KVTBL: mcmdq_idx(%u) dest_eid(%u) tecte_tag(%u)"
+mcmdq_cmd_delete_kvtbl(uint32_t mcmdq_idx, uint32_t dest_eid) "CMD_DELETE_KVTBL: mcmdq_idx(%u) dest_eid(%u)"
+mcmdq_cmd_null(uint32_t mcmdq_idx, uint64_t addr, void *hva, uint64_t size, uint64_t rb_size) "CMD_NULL: mcmdq_idx(%u) addr(0x%lx) hva(%p) size(0x%lx) rb_size(0x%lx)"
+ummu_mcmdq_base_reg_writell(uint8_t idx, uint64_t base, uint8_t log2size) "idx(%u) base(0x%lx) log2size(0x%x)"
+ummu_eventq_req_writell(uint64_t base, uint8_t log2size) "base(0x%lx) log2size(0x%x)"
+ummu_eventq_usi_reg_writell(uint64_t addr) "set eventq usi addr 0x%lx"
+ummu_glberr_usi_reg_writell(uint64_t addr) "set glb err usi addr 0x%lx"
+ummu_mapt_ctx_base_reg_writell(uint64_t addr) "config mapt ctx base 0x%lx"
+ummu_glb_int_enable(int type, int virq) "int type(%d) virq(%d)"
+ummu_config_tecte(int valid, int mode) "tecte: valid(%d), st_mode(0x%x)"
+ummu_invalid_single_tecte(uint32_t tecte_tag) "tecte_tag: %u"
+ummu_dev_install_nested_tecte(uint64_t tecte0, uint64_t tecte1) "installed tecte[0]: 0x%lx, tecte[1]: 0x%lx"
+ummu_install_nested_tecte(long long unsigned int tecte0, long long unsigned int tecte1) "installed tecte[0]: 0x%llx, tecte[1]: 0x%llx"
+
# ub.c
ub_update_mappings(int i, uint64_t region_size, uint64_t old_addr, uint64_t new_addr) "region[%d], size: 0x%lx, old_addr: 0x%lx, new_addr: 0x%lx"
ub_update_mappings_add(uint64_t new_addr) "commit region addr to 0x%lx"
diff --git a/hw/ub/ub.c b/hw/ub/ub.c
index 21481b950cffe09c90477e0f09a25c3a4fa6a729..6d42abfe275fc7220cff69c10507f8b19bf757e3 100644
--- a/hw/ub/ub.c
+++ b/hw/ub/ub.c
@@ -625,6 +625,16 @@ BusControllerState *container_of_ubbus(UBBus *bus)
return NULL;
}
+AddressSpace *ub_device_iommu_address_space(UBDevice *dev)
+{
+ UBBus *bus = ub_get_bus(dev);
+
+ if (bus->iommu_ops && bus->iommu_ops->get_address_space) {
+ return bus->iommu_ops->get_address_space(bus, bus->iommu_opaque, dev->eid);
+ }
+ return &address_space_memory;
+}
+
UBDevice *ub_find_device_by_id(const char *id)
{
BusControllerState *ubc = NULL;
@@ -898,6 +908,14 @@ static int ub_dev_init_port_info_by_cmd(Error **errp)
return 0;
}
+uint32_t ub_interrupt_id(UBDevice *udev)
+{
+ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_CAP4_INT_TYPE2, true);
+ UbCfg1IntType2Cap *cfg1_int_cap = (UbCfg1IntType2Cap *)(udev->config + offset);
+
+ return cfg1_int_cap->interrupt_id;
+}
+
/*
* now all ub device add, finally setup for all ub device.
* 1. check ub device bus instance type
@@ -916,4 +934,26 @@ int ub_dev_finally_setup(VirtMachineState *vms, Error **errp)
ub_set_ubinfo_in_ubc_table(vms);
return 0;
-}
\ No newline at end of file
+}
+
+void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque)
+{
+ /*
+ * If called, ub_setup_iommu() should provide a minimum set of
+ * useful callbacks for the bus.
+ */
+ bus->iommu_ops = ops;
+ bus->iommu_opaque = opaque;
+}
+
+uint32_t ub_dev_get_token_id(UBDevice *udev)
+{
+ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG1_DEV_TOKEN_ID_OFFSET, true);
+ return *(uint32_t *)(udev->config + offset);
+}
+
+uint32_t ub_dev_get_ueid(UBDevice *udev)
+{
+ uint64_t offset = ub_cfg_offset_to_emulated_offset(UB_CFG0_DEV_UEID_OFFSET, true);
+ return *(uint32_t *)(udev->config + offset);
+}
diff --git a/hw/ub/ub_common.c b/hw/ub/ub_common.c
index 3f8dff2a45ba41cc73722fcc44dde3357f7438e2..368d420463c4adbcb5b1a2162e70c5aa68335b50 100644
--- a/hw/ub/ub_common.c
+++ b/hw/ub/ub_common.c
@@ -86,4 +86,15 @@ uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe)
ub_set_long(s->msgq_reg + CQ_PI, ++pi % depth);
return pi;
-}
\ No newline at end of file
+}
+
+bool ub_guid_is_none(UbGuid *guid)
+{
+ if (guid->seq_num == 0 &&
+ guid->device_id == 0 && guid->version == 0 &&
+ guid->type == 0 && guid->vendor == 0) {
+ return true;
+ }
+
+ return false;
+}
diff --git a/hw/ub/ub_ubc.c b/hw/ub/ub_ubc.c
index 6d2441f3800ba513860a3b93310a8d39923f737d..0fc5255c051fe671cbb7d108ea7ea476d260c8dd 100644
--- a/hw/ub/ub_ubc.c
+++ b/hw/ub/ub_ubc.c
@@ -26,11 +26,13 @@
#include "hw/ub/ub.h"
#include "hw/ub/ub_bus.h"
#include "hw/ub/ub_ubc.h"
+#include "hw/ub/ub_ummu.h"
#include "hw/ub/ub_config.h"
#include "hw/ub/hisi/ubc.h"
#include "hw/ub/hisi/ub_mem.h"
#include "hw/ub/hisi/ub_fm.h"
#include "migration/vmstate.h"
+#include "hw/ub/ubus_instance.h"
static uint64_t ub_msgq_reg_read(void *opaque, hwaddr addr, unsigned len)
{
@@ -156,6 +158,7 @@ static void ub_bus_controller_realize(DeviceState *dev, Error **errp)
g_free(name);
}
+static void ub_bus_instance_guid_unlock(BusControllerDev *ubc_dev);
static void ub_bus_controller_unrealize(DeviceState *dev)
{
BusControllerState *s = BUS_CONTROLLER(dev);
@@ -164,6 +167,7 @@ static void ub_bus_controller_unrealize(DeviceState *dev)
QLIST_REMOVE(s, node);
ub_unregister_root_bus(s->bus);
ub_reg_free(dev);
+ ub_bus_instance_guid_unlock(s->ubc_dev);
}
static bool ub_bus_controller_needed(void *opaque)
@@ -408,6 +412,68 @@ static bool ub_ubc_is_empty(UBBus *bus)
return true;
}
+#define UB_BUSINSTANCE_GUID_LOCK_DIR "/run/libvirt/qemu"
+
+static int ub_bus_instance_guid_lock(UbGuid *guid)
+{
+ char path[256] = {0};
+ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0};
+ int lock_fd;
+
+ ub_device_get_str_from_guid(guid, guid_str, UB_DEV_GUID_STRING_LENGTH + 1);
+ snprintf(path, sizeof(path), "%s/ub-bus-instance-%s.lock", UB_BUSINSTANCE_GUID_LOCK_DIR, guid_str);
+ lock_fd = open(path, O_RDONLY | O_CREAT, 0600);
+ if (lock_fd < 0) {
+ qemu_log("failed to open lock file %s: %s\n", path, strerror(errno));
+ return -1;
+ }
+
+ if (flock(lock_fd, LOCK_EX | LOCK_NB)) {
+ qemu_log("lock %s failed: %s\n", path, strerror(errno));
+ close(lock_fd);
+ return -1;
+ }
+
+ return lock_fd;
+}
+
+static void ub_bus_instance_guid_unlock(BusControllerDev *ubc_dev)
+{
+ char guid_str[UB_DEV_GUID_STRING_LENGTH + 1] = {0};
+
+ if (ubc_dev->bus_instance_lock_fd <= 0) {
+ return;
+ }
+
+ ub_device_get_str_from_guid(&ubc_dev->bus_instance_guid, guid_str,
+ UB_DEV_GUID_STRING_LENGTH + 1);
+ qemu_log("unlock ub bus instance lock for guid: %s\n", guid_str);
+ if (flock(ubc_dev->bus_instance_lock_fd, LOCK_UN)) {
+ qemu_log("failed to unlock for bus instance guid %s: %s\n",
+ guid_str, strerror(errno));
+ }
+ close(ubc_dev->bus_instance_lock_fd);
+}
+
+static int ub_bus_instance_process(BusControllerDev *ubc_dev, Error **errp)
+{
+ int lock_fd;
+
+ if (ub_guid_is_none(&ubc_dev->bus_instance_guid)) {
+ error_setg(errp, "ubc bus instance guid is required");
+ return -1;
+ }
+
+ lock_fd = ub_bus_instance_guid_lock(&ubc_dev->bus_instance_guid);
+ if (lock_fd < 0) {
+ error_setg(errp, "ubc bus instance guid lock failed, it may used by other vm");
+ return -1;
+ }
+
+ ubc_dev->bus_instance_lock_fd = lock_fd;
+ return 0;
+}
+
static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp)
{
UBBus *bus = UB_BUS(qdev_get_parent_bus(DEVICE(dev)));
@@ -433,6 +499,16 @@ static void ub_bus_controller_dev_realize(UBDevice *dev, Error **errp)
dev->dev_type = UB_TYPE_IBUS_CONTROLLER;
ub_bus_controller_dev_config_space_init(dev);
+ if (0 > ummu_associating_with_ubc(ubc)) {
+ qemu_log("failed to associating ubc with ummu. %s\n", dev->name);
+ }
+
+ qemu_log("set type UB_TYPE_CONTROLLER, ubc %p, "
+ "ubc->ubc_dev %p, bus %p\n", ubc, ubc->ubc_dev, bus);
+ if (ub_bus_instance_process(ubc->ubc_dev, errp)) {
+ qemu_log("ub bus instance process failed\n");
+ return;
+ }
}
static Property ub_bus_controller_dev_properties[] = {
diff --git a/hw/ub/ub_ummu.c b/hw/ub/ub_ummu.c
new file mode 100644
index 0000000000000000000000000000000000000000..a2ea586bc24cf7114c79fca218acde19bdfd56cf
--- /dev/null
+++ b/hw/ub/ub_ummu.c
@@ -0,0 +1,2348 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/arm/virt.h"
+#include "hw/qdev-properties.h"
+#include "hw/ub/ub.h"
+#include "hw/ub/hisi/ummu.h"
+#include "hw/ub/ub_bus.h"
+#include "hw/ub/ub_ubc.h"
+#include "hw/ub/ub_ummu.h"
+#include "hw/ub/ub_config.h"
+#include "hw/ub/hisi/ubc.h"
+#include "migration/vmstate.h"
+#include "ub_ummu_internal.h"
+#include "sysemu/dma.h"
+#include "hw/arm/mmu-translate-common.h"
+#include "hw/ub/ub_ubc.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+
+static const char *const mcmdq_cmd_strings[MCMDQ_CMD_MAX] = {
+ [CMD_SYNC] = "CMD_SYNC",
+ [CMD_STALL_RESUME] = "CMD_STALL_RESUME",
+ [CMD_PREFET_CFG] = "CMD_PREFET_CFG",
+ [CMD_CFGI_TECT] = "CMD_CFGI_TECT",
+ [CMD_CFGI_TECT_RANGE] = "CMD_CFGI_TECT_RANGE",
+ [CMD_CFGI_TCT] = "CMD_CFGI_TCT",
+ [CMD_CFGI_TCT_ALL] = "CMD_CFGI_TCT_ALL",
+ [CMD_CFGI_VMS_PIDM] = "CMD_CFGI_VMS_PIDM",
+ [CMD_PLBI_OS_EID] = "CMD_PLBI_OS_EID",
+ [CMD_PLBI_OS_EIDTID] = "CMD_PLBI_OS_EIDTID",
+ [CMD_PLBI_OS_VA] = "CMD_PLBI_OS_VA",
+ [CMD_TLBI_OS_ALL] = "CMD_TLBI_OS_ALL",
+ [CMD_TLBI_OS_TID] = "CMD_TLBI_OS_TID",
+ [CMD_TLBI_OS_VA] = "CMD_TLBI_OS_VA",
+ [CMD_TLBI_OS_VAA] = "CMD_TLBI_OS_VAA",
+ [CMD_TLBI_HYP_ALL] = "CMD_TLBI_HYP_ALL",
+ [CMD_TLBI_HYP_TID] = "CMD_TLBI_HYP_TID",
+ [CMD_TLBI_HYP_VA] = "CMD_TLBI_HYP_VA",
+ [CMD_TLBI_HYP_VAA] = "CMD_TLBI_HYP_VAA",
+ [CMD_TLBI_S1S2_VMALL] = "CMD_TLBI_S1S2_VMALL",
+ [CMD_TLBI_S2_IPA] = "CMD_TLBI_S2_IPA",
+ [CMD_TLBI_NS_OS_ALL] = "CMD_TLBI_NS_OS_ALL",
+ [CMD_RESUME] = "CMD_RESUME",
+ [CMD_CREATE_KVTBL] = "CMD_CREATE_KVTBL",
+ [CMD_DELETE_KVTBL] = "CMD_DELETE_KVTBL",
+ [CMD_TLBI_OS_ALL_U] = "CMD_TLBI_OS_ALL_U",
+ [CMD_TLBI_OS_ASID_U] = "CMD_TLBI_OS_ASID_U",
+ [CMD_TLBI_OS_VA_U] = "CMD_TLBI_OS_VA_U",
+ [CMD_TLBI_OS_VAA_U] = "CMD_TLBI_OS_VAA_U",
+ [CMD_TLBI_HYP_ASID_U] = "CMD_TLBI_HYP_ASID_U",
+ [CMD_TLBI_HYP_VA_U] = "CMD_TLBI_HYP_VA_U",
+ [CMD_TLBI_S1S2_VMALL_U] = "CMD_TLBI_S1S2_VMALL_U",
+ [CMD_TLBI_S2_IPA_U] = "CMD_TLBI_S2_IPA_U",
+};
+
+static const char *const ummu_event_type_strings[EVT_MAX] = {
+ [EVT_NONE] = "EVT_NONE",
+ [EVT_UT] = "EVT_UT",
+ [EVT_BAD_DSTEID] = "EVT_BAD_DSTEID",
+ [EVT_TECT_FETCH] = "EVT_TECT_FETCH",
+ [EVT_BAD_TECT] = "EVT_BAD_TECT",
+ [EVT_RESERVE_0] = "EVT_RESERVE_0",
+ [EVT_BAD_TOKENID] = "EVT_BAD_TOKENID",
+ [EVT_TCT_FETCH] = "EVT_TCT_FETCH",
+ [EVT_BAD_TCT] = "EVT_BAD_TCT",
+ [EVT_A_PTW_EABT] = "EVT_A_PTW_EABT",
+ [EVT_A_TRANSLATION] = "EVT_A_TRANSLATION",
+ [EVT_A_ADDR_SIZE] = "EVT_A_ADDR_SIZE",
+ [EVT_ACCESS] = "EVT_ACCESS",
+ [EVT_A_PERMISSION] = "EVT_A_PERMISSION",
+ [EVT_TBU_CONFLICT] = "EVT_TBU_CONFLICT",
+ [EVT_CFG_CONFLICT] = "EVT_CFG_CONFLICT",
+ [EVT_VMS_FETCH] = "EVT_VMS_FETCH",
+ [EVT_P_PTW_EABT] = "EVT_P_PTW_EABT",
+ [EVT_P_CFG_ERROR] = "EVT_P_CFG_ERROR",
+ [EVT_P_PERMISSION] = "EVT_P_PERMISSION",
+ [EVT_RESERVE_1] = "EVT_RESERVE_1",
+ [EVT_EBIT_DENY] = "EVT_EBIT_DENY",
+ [EVT_CREATE_DSTEID_TECT_RELATION_RESULT] = "EVT_CREATE_DSTEID_TECT_RELATION_RESULT",
+ [EVT_DELETE_DSTEID_TECT_RELATION_RESULT] = "EVT_DELETE_DSTEID_TECT_RELATION_RESULT"
+};
+
+QLIST_HEAD(, UMMUState) ub_umms;
+UMMUState *ummu_find_by_bus_num(uint8_t bus_num)
+{
+ UMMUState *ummu;
+ QLIST_FOREACH(ummu, &ub_umms, node) {
+ if (ummu->bus_num == bus_num) {
+ return ummu;
+ }
+ }
+ return NULL;
+}
+
+static void ummu_cr0_process_task(UMMUState *u)
+{
+ u->ctrl0_ack = u->ctrl[0];
+}
+
+static uint64_t ummu_mcmdq_reg_readl(UMMUState *u, hwaddr offset)
+{
+ uint8_t mcmdq_idx;
+ uint64_t val = UINT64_MAX;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return val;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case MCMDQ_PROD_BASE_ADDR:
+ val = u->mcmdqs[mcmdq_idx].queue.prod;
+ break;
+ case MCMDQ_CONS_BASE_ADDR:
+ val = u->mcmdqs[mcmdq_idx].queue.cons;
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit mcmdq reg read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static int ummu_mapt_get_cmdq_base(UMMUState *u, dma_addr_t base_addr, uint32_t qid, MAPTCmdqBase *base)
+{
+ int ret, i;
+ dma_addr_t addr = base_addr + qid * MAPT_CMDQ_CTXT_BASE_BYTES;
+
+ ret = dma_memory_read(&address_space_memory, addr, base, sizeof(*base),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("Cannot fetch mapt cmdq ctx at address=0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(base->word); i++) {
+ le32_to_cpus(&base->word[i]);
+ }
+
+ return 0;
+}
+
+static int ummu_mapt_update_cmdq_base(UMMUState *u, dma_addr_t base_addr, uint32_t qid, MAPTCmdqBase *base)
+{
+ int i;
+ dma_addr_t addr = base_addr + qid * MAPT_CMDQ_CTXT_BASE_BYTES;
+
+ for (i = 0; i < ARRAY_SIZE(base->word); i++, addr += sizeof(uint32_t)) {
+ uint32_t tmp = cpu_to_le32(base->word[i]);
+ if (dma_memory_write(&address_space_memory, addr, &tmp,
+ sizeof(uint32_t), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log("dma failed to wirte to addr 0x%lx\n", addr);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static uint64_t ummu_mapt_ctrlr_page_read_process(UMMUState *u, hwaddr offset)
+{
+ MAPTCmdqBase base;
+ uint32_t qid = ummu_mapt_cmdq_get_qid(u, offset);
+ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base);
+ int ret;
+ uint64_t val = UINT64_MAX;
+
+ if (!addr) {
+ /* mapt ctrlr page not init, return default val 0 */
+ return 0;
+ }
+
+ ret = ummu_mapt_get_cmdq_base(u, addr, qid, &base);
+ if (ret) {
+ qemu_log("failed to get mapt cmdq base.\n");
+ return val;
+ }
+
+ switch (offset & UCMDQ_UCPLQ_CI_PI_MASK) {
+ case UCMDQ_PI:
+ val = ummu_mapt_cmdq_base_get_ucmdq_pi(&base);
+ break;
+ case UCMDQ_CI:
+ val = ummu_mapt_cmdq_base_get_ucmdq_ci(&base);
+ break;
+ case UCPLQ_PI:
+ val = ummu_mapt_cmdq_base_get_ucplq_pi(&base);
+ break;
+ case UCPLQ_CI:
+ val = ummu_mapt_cmdq_base_get_ucplq_ci(&base);
+ break;
+ default:
+ qemu_log("cannot process addr(0x%lx) mpat ctrlr page read.\n", offset);
+ return val;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_readw(UMMUState *u, hwaddr offset)
+{
+ uint64_t val = UINT64_MAX;
+
+ switch (offset) {
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ val = ummu_mapt_ctrlr_page_read_process(u, offset);
+ break;
+ default:
+ qemu_log("ummu cannot handle 16-bit read access at: 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_readl(UMMUState *u, hwaddr offset)
+{
+ uint64_t val = UINT64_MAX;
+
+ switch (offset) {
+ case A_CAP0...A_CAP6:
+ val = u->cap[(offset - A_CAP0) / 4];
+ break;
+ case A_CTRL0:
+ val = u->ctrl[0];
+ break;
+ case A_CTRL0_ACK:
+ val = u->ctrl0_ack;
+ break;
+ case A_CTRL1:
+ val = u->ctrl[1];
+ break;
+ case A_CTRL2:
+ val = u->ctrl[2];
+ break;
+ case A_CTRL3:
+ val = u->ctrl[3];
+ break;
+ case A_TECT_BASE_CFG:
+ val = u->tect_base_cfg;
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ val = ummu_mcmdq_reg_readl(u, offset);
+ break;
+ case A_EVENT_QUE_PI:
+ val = u->eventq.queue.prod;
+ break;
+ case A_EVENT_QUE_CI:
+ val = u->eventq.queue.cons;
+ break;
+ case A_EVENT_QUE_USI_DATA:
+ val = u->eventq.usi_data;
+ break;
+ case A_EVENT_QUE_USI_ATTR:
+ val = u->eventq.usi_attr;
+ break;
+ case A_GLB_INT_EN:
+ val = 0;
+ /* glb err interrupt bit enabled int bit 0 */
+ if (ummu_glb_err_int_en(u)) {
+ val |= 0x1;
+ }
+
+ /* event que interrupt bit enabled in bit 1 */
+ if (ummu_event_que_int_en(u)) {
+ val |= (1 << 1);
+ }
+ break;
+ case A_GLB_ERR:
+ val = u->glb_err.glb_err;
+ break;
+ case A_GLB_ERR_RESP:
+ val = u->glb_err.glb_err_resp;
+ break;
+ case A_GLB_ERR_INT_USI_DATA:
+ val = u->glb_err.usi_data;
+ break;
+ case A_GLB_ERR_INT_USI_ATTR:
+ val = u->glb_err.usi_attr;
+ break;
+ case A_RELEASE_UM_QUEUE_ID:
+ val = u->release_um_queue_id;
+ break;
+ case A_RELEASE_UM_QUEUE:
+ val = u->release_um_queue;
+ break;
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ val = ummu_mapt_ctrlr_page_read_process(u, offset);
+ break;
+ case A_UMCMD_PAGE_SEL:
+ val = u->ucmdq_page_sel;
+ break;
+ case A_UMMU_USER_CONFIG0...A_UMMU_USER_CONFIG11:
+ case A_UMMU_MEM_USI_DATA:
+ case A_UMMU_MEM_USI_ATTR:
+ case A_UMMU_INT_MASK:
+ case A_UMMU_DSTEID_CAM_TABLE_BASE_CFG:
+ /* do nothing, reg return val 0 */
+ val = 0;
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_mcmdq_reg_readll(UMMUState *u, hwaddr offset)
+{
+ uint8_t mcmdq_idx;
+ uint64_t val = UINT64_MAX;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return val;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case A_MCMD_QUE_BASE:
+ val = u->mcmdqs[mcmdq_idx].queue.base;
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit mcmdq reg read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_readll(UMMUState *u, hwaddr offset)
+{
+ uint64_t val = UINT64_MAX;
+
+ switch (offset) {
+ case A_TECT_BASE0:
+ val = u->tect_base;
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ val = ummu_mcmdq_reg_readll(u, offset);
+ break;
+ case A_EVENT_QUE_BASE0:
+ val = u->eventq.queue.base;
+ break;
+ case A_EVENT_QUE_USI_ADDR0:
+ val = u->eventq.usi_addr;
+ break;
+ case A_GLB_ERR_INT_USI_ADDR0:
+ val = u->glb_err.usi_addr;
+ break;
+ case A_MAPT_CMDQ_CTXT_BADDR0:
+ val = u->mapt_cmdq_ctxt_base;
+ break;
+ case A_UMMU_MEM_USI_ADDR0:
+ /* do nothing, reg return val 0 */
+ val = 0;
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit read access at 0x%lx\n", offset);
+ break;
+ }
+
+ return val;
+}
+
+static uint64_t ummu_reg_read(void *opaque, hwaddr offset, unsigned size)
+{
+ UMMUState *u = opaque;
+ uint64_t val = UINT64_MAX;
+
+ switch (size) {
+ case 2:
+ val = ummu_reg_readw(u, offset);
+ break;
+ case 4:
+ val = ummu_reg_readl(u, offset);
+ break;
+ case 8:
+ val = ummu_reg_readll(u, offset);
+ break;
+ default:
+ break;
+ }
+
+ return val;
+}
+
+static void mcmdq_cmd_sync_usi_irq(uint64_t addr, uint32_t data)
+{
+ cpu_physical_memory_rw(addr, &data, sizeof(uint32_t), true);
+}
+
+static void mcmdq_cmd_sync_sev_irq(void)
+{
+ qemu_log("cannot support CMD_SYNC SEV event.\n");
+}
+
+static void ummu_glb_usi_notify(UMMUState *u, UMMUUSIVectorType type)
+{
+ USIMessage msg;
+
+ if (type == UMMU_USI_VECTOR_GERROR) {
+ msg = ummu_get_gerror_usi_message(u);
+ } else {
+ msg = ummu_get_eventq_usi_message(u);
+ }
+
+ usi_send_message(&msg, UMMU_INTERRUPT_ID, NULL);
+}
+
+static void mcmdq_cmd_sync_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ uint32_t cm = CMD_SYNC_CM(cmd);
+
+ trace_mcmdq_cmd_sync_handler(mcmdq_idx, CMD_SYNC_USI_ADDR(cmd), CMD_SYNC_USI_DATA(cmd));
+ if (cm & CMD_SYNC_CM_USI) {
+ mcmdq_cmd_sync_usi_irq(CMD_SYNC_USI_ADDR(cmd), CMD_SYNC_USI_DATA(cmd));
+ } else if (cm & CMD_SYNC_CM_SEV) {
+ mcmdq_cmd_sync_sev_irq();
+ }
+}
+
+static void mcmdq_cmd_create_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ UMMUKVTblEntry *entry = NULL;
+ uint32_t dst_eid = CMD_CREATE_KVTBL_DEST_EID(cmd);
+ uint32_t tecte_tag = CMD_CREATE_KVTBL_TECTE_TAG(cmd);
+
+ trace_mcmdq_cmd_create_kvtbl(mcmdq_idx, dst_eid, tecte_tag);
+
+ QLIST_FOREACH(entry, &u->kvtbl, list) {
+ if (entry->dst_eid == dst_eid) {
+ qemu_log("update kvtlb dst_eid(0x%x) tecte_tag from 0x%x to 0x%x\n",
+ dst_eid, entry->tecte_tag, tecte_tag);
+ entry->tecte_tag = tecte_tag;
+ return;
+ }
+ }
+
+ entry = g_malloc(sizeof(UMMUKVTblEntry));
+ if (!entry) {
+ qemu_log("failed to malloc for kvtbl entry for dst_eid(0x%x)\n", dst_eid);
+ return;
+ }
+
+ entry->dst_eid = dst_eid;
+ entry->tecte_tag = tecte_tag;
+ QLIST_INSERT_HEAD(&u->kvtbl, entry, list);
+}
+
+static void mcmdq_cmd_delete_kvtbl(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ UMMUKVTblEntry *entry = NULL;
+ uint32_t dst_eid = CMD_DELETE_KVTBL_DEST_EID(cmd);
+
+ trace_mcmdq_cmd_delete_kvtbl(mcmdq_idx, dst_eid);
+
+ QLIST_FOREACH(entry, &u->kvtbl, list) {
+ if (entry->dst_eid == dst_eid) {
+ break;
+ }
+ }
+
+ if (entry) {
+ QLIST_REMOVE(entry, list);
+ g_free(entry);
+ } else {
+ qemu_log("cannot find dst_eid(0x%x) entry in kvtbl.\n", dst_eid);
+ }
+}
+
+static gboolean ummu_invalid_tecte(gpointer key, gpointer value, gpointer user_data)
+{
+ UMMUDevice *ummu_dev = (UMMUDevice *)key;
+ UMMUTransCfg *cfg = (UMMUTransCfg *)value;
+ UMMUTecteRange *range = (UMMUTecteRange *)user_data;
+
+ if (range->invalid_all ||
+ (cfg->tecte_tag >= range->start && cfg->tecte_tag <= range->end)) {
+ qemu_log("ummu start invalidate udev(%s) cached config.\n", ummu_dev->udev->qdev.id);
+ return true;
+ }
+
+ return false;
+}
+
+static void ummu_invalid_single_tecte(UMMUState *u, uint32_t tecte_tag)
+{
+ UMMUTecteRange tecte_range = { .invalid_all = false, };
+
+ trace_ummu_invalid_single_tecte(tecte_tag);
+ tecte_range.start = tecte_tag;
+ tecte_range.end = tecte_tag;
+ g_hash_table_foreach_remove(u->configs, ummu_invalid_tecte, &tecte_range);
+}
+
+static void ummu_uninstall_nested_tecte(gpointer key, gpointer value, gpointer opaque)
+{
+ UMMUDevice *ummu_dev = (UMMUDevice *)value;
+
+ ummu_dev_uninstall_nested_tecte(ummu_dev);
+}
+
+/* V | ST_MODE(.CONFIG) | TCRC_SEL(.STRW) */
+#define INSTALL_TECTE0_WORD0_MASK (GENMASK(0, 0) | GENMASK(1, 3) | GENMASK(22, 21))
+#define INSTALL_TECTE0_WORD1_MASK 0
+/* TCT_MAXNUM(.S1CDMax) | TCT_PTR[31:6](.S1ContextPtr) */
+#define INSTALL_TECTE1_WORD0_MASK (GENMASK(4, 0) | GENMASK(31, 6))
+/* TCT_PTR[51:32](.S1ContextPtr) | TCT_FMT(.S1Fmt) | TCT_STALL_EN(.S1STALLD) |
+ * TCT_Ptr_MD0(.S1CIR) | TCT_Ptr_MD1(.S1COR) | TCT_Ptr_MSD(.S1CSH) */
+#define INSTALL_TECTE1_WORD1_MASK (GENMASK(19, 0) | \
+ GENMASK(21, 20) | \
+ GENMASK(24, 24) | \
+ GENMASK(27, 26) | \
+ GENMASK(29, 28) | \
+ GENMASK(31, 30))
+
+static void ummu_install_nested_tecte(gpointer key, gpointer value, gpointer opaque)
+{
+ UMMUDevice *ummu_dev = (UMMUDevice *)value;
+ TECTE *tecte = (TECTE *)opaque;
+ struct iommu_hwpt_ummu iommu_config = {};
+ int ret;
+
+ if (ummu_dev->udev->dev_type != UB_TYPE_DEVICE &&
+ ummu_dev->udev->dev_type != UB_TYPE_IDEVICE) {
+ return;
+ }
+
+ if (!ummu_dev->vdev && ummu_dev->idev && ummu_dev->viommu) {
+ UMMUVdev *vdev = g_new0(UMMUVdev, 1);
+ /* default set virt_id to 0 */
+ vdev->core = iommufd_backend_alloc_vdev(ummu_dev->idev, ummu_dev->viommu->core, ummu_dev->udev->eid);
+ if (!vdev->core) {
+ error_report("failed to allocate a vDEVICE");
+ g_free(vdev);
+ return;
+ }
+ ummu_dev->vdev = vdev;
+ }
+
+ iommu_config.tecte[0] = (uint64_t)tecte->word[0] & INSTALL_TECTE0_WORD0_MASK;
+ iommu_config.tecte[0] |= ((uint64_t)tecte->word[1] & INSTALL_TECTE0_WORD1_MASK) << 32;
+ iommu_config.tecte[1] = (uint64_t)tecte->word[2] & INSTALL_TECTE1_WORD0_MASK;
+ iommu_config.tecte[1] |= ((uint64_t)tecte->word[3] & INSTALL_TECTE1_WORD1_MASK) << 32;
+ trace_ummu_install_nested_tecte(iommu_config.tecte[0], iommu_config.tecte[1]);
+ ret = ummu_dev_install_nested_tecte(ummu_dev, IOMMU_HWPT_DATA_UMMU,
+ sizeof(iommu_config), &iommu_config);
+ if (ret && ret != -ENOENT) {
+ error_report("Unable to alloc Stage-1 HW Page Table: %d", ret);
+ } else if (ret == 0) {
+ qemu_log("install nested tecte success.\n");
+ }
+}
+
+static int ummu_find_tecte(UMMUState *ummu, uint32_t tecte_tag, TECTE *tecte);
+static void ummu_config_tecte(UMMUState *u, uint32_t tecte_tag)
+{
+ TECTE tecte;
+ int ret;
+
+ ret = ummu_find_tecte(u, tecte_tag, &tecte);
+ if (ret) {
+ qemu_log("failed to find tecte\n");
+ return;
+ }
+
+ trace_ummu_config_tecte(TECTE_VALID(&tecte), TECTE_ST_MODE(&tecte));
+ if (!TECTE_VALID(&tecte) || TECTE_ST_MODE(&tecte) != TECTE_ST_MODE_S1) {
+ g_hash_table_foreach(u->ummu_devs, ummu_uninstall_nested_tecte, NULL);
+ return;
+ }
+
+ g_hash_table_foreach(u->ummu_devs, ummu_install_nested_tecte, &tecte);
+ if (u->tecte_tag_num >= UMMU_TECTE_TAG_MAX_NUM) {
+ qemu_log("unexpect tecte tag num over %u\n", UMMU_TECTE_TAG_MAX_NUM);
+ return;
+ } else {
+ u->tecte_tag_cache[u->tecte_tag_num++] = tecte_tag;
+ }
+}
+
+static void ummu_invalidate_cache(UMMUState *u, UMMUMcmdqCmd *cmd);
+static void mcmdq_cmd_cfgi_tect_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ uint32_t tecte_tag = CMD_TECTE_TAG(cmd);
+
+ trace_mcmdq_cmd_cfgi_tect_handler(mcmdq_idx, tecte_tag);
+
+ ummu_invalid_single_tecte(u, tecte_tag);
+ ummu_config_tecte(u, tecte_tag);
+ ummu_invalidate_cache(u, cmd);
+}
+
+static void mcmdq_cmd_cfgi_tect_range_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ uint32_t tecte_tag = CMD_TECTE_TAG(cmd);
+ uint8_t range = CMD_TECTE_RANGE(cmd);
+ uint32_t mask;
+ int i;
+ UMMUTecteRange tecte_range = { .invalid_all = false, };
+
+ trace_mcmdq_cmd_cfgi_tect_range_handler(mcmdq_idx, tecte_tag, range);
+
+ if (CMD_TECTE_RANGE_INVILID_ALL(range)) {
+ tecte_range.invalid_all = true;
+ } else {
+ mask = (1ULL << (range + 1)) - 1;
+ tecte_range.start = tecte_tag & ~mask;
+ tecte_range.end = tecte_range.start + mask;
+ }
+
+ g_hash_table_foreach_remove(u->configs, ummu_invalid_tecte, &tecte_range);
+ ummu_invalidate_cache(u, cmd);
+
+ if (tecte_range.invalid_all && u->tecte_tag_num > 0) {
+ for (i = u->tecte_tag_num - 1; i >= 0; i--) {
+ ummu_config_tecte(u, u->tecte_tag_cache[i]);
+ }
+ u->tecte_tag_num = 0;
+ return;
+ }
+
+ for (i = tecte_range.start; i <= tecte_range.end; i++) {
+ ummu_config_tecte(u, i);
+ }
+}
+
+static void mcmdq_cmd_cfgi_tct_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ uint32_t tecte_tag = CMD_TECTE_TAG(cmd);
+
+ trace_mcmdq_cmd_cfgi_tct_handler(mcmdq_idx, tecte_tag);
+
+ ummu_invalid_single_tecte(u, tecte_tag);
+ ummu_invalidate_cache(u, cmd);
+}
+
+static void mcmdq_cmd_cfgi_tct_all_handler(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ trace_mcmdq_cmd_cfgi_tct_all_handler(mcmdq_idx);
+
+ /* cfgi_tct & cfgi_tct_all process is the same */
+ mcmdq_cmd_cfgi_tct_handler(u, cmd, mcmdq_idx);
+}
+
+static void ummu_viommu_invalidate_cache(IOMMUFDViommu *viommu, uint32_t type, UMMUMcmdqCmd *cmd)
+{
+ int ret;
+ uint32_t tecte_tag = CMD_TECTE_TAG(cmd);
+ uint32_t ncmds = 1;
+
+ if (!viommu) {
+ return;
+ }
+
+ ret = iommufd_viommu_invalidate_cache(viommu->iommufd, viommu->viommu_id,
+ type, sizeof(*cmd), &ncmds, cmd);
+ if (ret) {
+ qemu_log("failed to invalidte cache for ummu, tecte_tag = %u, ret = %d\n", tecte_tag, ret);
+ }
+}
+
+static void ummu_invalidate_cache(UMMUState *u, UMMUMcmdqCmd *cmd)
+{
+ IOMMUFDViommu *viommu = NULL;
+ UMMUDevice *ummu_dev = NULL;
+
+ if (!u->viommu) {
+ return;
+ }
+
+ ummu_dev = QLIST_FIRST(&u->viommu->device_list);
+ if (!ummu_dev || !ummu_dev->vdev) {
+ return;
+ }
+
+ viommu = u->viommu->core;
+ ummu_viommu_invalidate_cache(viommu, IOMMU_VIOMMU_INVALIDATE_DATA_UMMU, cmd);
+}
+
+static void mcmdq_cmd_plbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ trace_mcmdq_cmd_plbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]);
+ ummu_invalidate_cache(u, cmd);
+}
+
+static void mcmdq_cmd_tlbi_x_process(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ trace_mcmdq_cmd_tlbi_x_process(mcmdq_idx, mcmdq_cmd_strings[CMD_TYPE(cmd)]);
+ ummu_invalidate_cache(u, cmd);
+}
+
+static void mcmdq_check_pa_continuity_fill_result(UMMUMcmdQueue *mcmdq, bool continuity)
+{
+ uint8_t result = 0;
+ dma_addr_t addr;
+
+ result |= UMMU_RUN_IN_VM_FLAG;
+ if (continuity) {
+ result |= PA_CONTINUITY;
+ } else {
+ result |= PA_NOT_CONTINUITY;
+ }
+
+#define CHECK_PA_CONTINUITY_RESULT_OFFSET 0x2
+ addr = MCMD_QUE_BASE_ADDR(&mcmdq->queue) +
+ MCMD_QUE_RD_IDX(&mcmdq->queue) * mcmdq->queue.entry_size;
+ if (dma_memory_write(&address_space_memory, addr + CHECK_PA_CONTINUITY_RESULT_OFFSET,
+ &result, sizeof(result), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log("dma failed to wirte result(0x%x) to addr 0x%lx\n", result, addr);
+ return;
+ }
+
+ qemu_log("mcmdq check pa continuity update result(0x%x) success.\n", result);
+}
+
+static void mcmdq_cmd_null(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ uint64_t size;
+ uint64_t addr;
+ void *hva = NULL;
+ ram_addr_t rb_offset;
+ RAMBlock *rb = NULL;
+ size_t rb_page_size = 0;
+
+ if (CMD_NULL_SUBOP(cmd) != CMD_NULL_SUBOP_CHECK_PA_CONTINUITY) {
+ qemu_log("current cannot process CMD_NULL subop %u.\n", CMD_NULL_SUBOP(cmd));
+ return;
+ }
+
+ size = CMD_NULL_CHECK_PA_CONTI_SIZE(cmd);
+ addr = CMD_NULL_CHECK_PA_CONTI_ADDR(cmd);
+ hva = cpu_physical_memory_map(addr, &size, false);
+ rb = qemu_ram_block_from_host(hva, false, &rb_offset);
+ if (rb) {
+ rb_page_size = qemu_ram_pagesize(rb);
+ } else {
+ qemu_log("failed to get ram block from host(%p)\n", hva);
+ }
+
+ trace_mcmdq_cmd_null(mcmdq_idx, addr, hva, size, rb_page_size);
+
+#define PAGESZ_2M 0x200000
+ if (rb_page_size < PAGESZ_2M) {
+ mcmdq_check_pa_continuity_fill_result(&u->mcmdqs[mcmdq_idx], false);
+ } else {
+ mcmdq_check_pa_continuity_fill_result(&u->mcmdqs[mcmdq_idx], true);
+ }
+}
+
+static void mcmdq_cmd_prefet_cfg(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx)
+{
+ /* do nothing */
+}
+
+static void (*mcmdq_cmd_handlers[])(UMMUState *u, UMMUMcmdqCmd *cmd, uint8_t mcmdq_idx) = {
+ [CMD_SYNC] = mcmdq_cmd_sync_handler,
+ [CMD_STALL_RESUME] = NULL,
+ [CMD_PREFET_CFG] = mcmdq_cmd_prefet_cfg,
+ [CMD_CFGI_TECT] = mcmdq_cmd_cfgi_tect_handler,
+ [CMD_CFGI_TECT_RANGE] = mcmdq_cmd_cfgi_tect_range_handler,
+ [CMD_CFGI_TCT] = mcmdq_cmd_cfgi_tct_handler,
+ [CMD_CFGI_TCT_ALL] = mcmdq_cmd_cfgi_tct_all_handler,
+ [CMD_CFGI_VMS_PIDM] = NULL,
+ [CMD_PLBI_OS_EID] = mcmdq_cmd_plbi_x_process,
+ [CMD_PLBI_OS_EIDTID] = mcmdq_cmd_plbi_x_process,
+ [CMD_PLBI_OS_VA] = mcmdq_cmd_plbi_x_process,
+ [CMD_TLBI_OS_ALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_OS_TID] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_OS_VA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_OS_VAA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_ALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_TID] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_VA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_HYP_VAA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_S1S2_VMALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_S2_IPA] = mcmdq_cmd_tlbi_x_process,
+ [CMD_TLBI_NS_OS_ALL] = mcmdq_cmd_tlbi_x_process,
+ [CMD_RESUME] = NULL,
+ [CMD_CREATE_KVTBL] = mcmdq_cmd_create_kvtbl,
+ [CMD_DELETE_KVTBL] = mcmdq_cmd_delete_kvtbl,
+ [CMD_NULL] = mcmdq_cmd_null,
+ [CMD_TLBI_OS_ALL_U] = NULL,
+ [CMD_TLBI_OS_ASID_U] = NULL,
+ [CMD_TLBI_OS_VA_U] = NULL,
+ [CMD_TLBI_OS_VAA_U] = NULL,
+ [CMD_TLBI_HYP_ASID_U] = NULL,
+ [CMD_TLBI_HYP_VA_U] = NULL,
+ [CMD_TLBI_S1S2_VMALL_U] = NULL,
+ [CMD_TLBI_S2_IPA_U] = NULL,
+};
+
+static MemTxResult ummu_cmdq_fetch_cmd(UMMUMcmdQueue *mcmdq, UMMUMcmdqCmd *cmd)
+{
+ uint64_t addr, mcmdq_base_addr;
+ MemTxResult ret;
+ int i;
+
+ mcmdq_base_addr = MCMD_QUE_BASE_ADDR(&mcmdq->queue);
+ addr = mcmdq_base_addr + MCMD_QUE_RD_IDX(&mcmdq->queue) * mcmdq->queue.entry_size;
+ ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(UMMUMcmdqCmd),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("addr 0x%lx failed to fectch mcmdq cmd\n", addr);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
+ le32_to_cpus(&cmd->word[i]);
+ }
+
+ return ret;
+}
+
+static void mcmdq_process_task(UMMUState *u, uint8_t mcmdq_idx)
+{
+ UMMUMcmdQueue *mcmdq = &u->mcmdqs[mcmdq_idx];
+ UMMUMcmdqCmd cmd;
+ UmmuMcmdqCmdType cmd_type;
+
+ if (!ummu_mcmdq_enabled(mcmdq)) {
+ ummu_mcmdq_disable_resp(mcmdq);
+ return;
+ }
+
+ while (!ummu_mcmdq_empty(mcmdq)) {
+ if (ummu_cmdq_fetch_cmd(mcmdq, &cmd) != MEMTX_OK) {
+ /* eventq generate later */
+ break;
+ }
+
+ cmd_type = CMD_TYPE(&cmd);
+ if (cmd_type > MCMDQ_CMD_MAX) {
+ /* eventq generate later */
+ break;
+ }
+
+ if (mcmdq_cmd_handlers[cmd_type]) {
+ trace_mcmdq_process_task(mcmdq_idx, mcmdq_cmd_strings[cmd_type]);
+ mcmdq_cmd_handlers[cmd_type](u, &cmd, mcmdq_idx);
+ } else {
+ qemu_log("current cannot process mcmdq cmd: %s.\n", mcmdq_cmd_strings[cmd_type]);
+ }
+
+ ummu_mcmdq_cons_incr(mcmdq);
+ }
+
+ ummu_mcmdq_enable_resp(mcmdq);
+}
+
+static void ummu_mcmdq_reg_writel(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ uint8_t mcmdq_idx;
+ UMMUMcmdQueue *q = NULL;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case MCMDQ_PROD_BASE_ADDR:
+ update_reg32_by_wmask(&u->mcmdqs[mcmdq_idx].queue.prod, data, UMMU_MCMDQ_PI_WMASK);
+ mcmdq_process_task(u, mcmdq_idx);
+ break;
+ case MCMDQ_CONS_BASE_ADDR:
+ update_reg32_by_wmask(&u->mcmdqs[mcmdq_idx].queue.cons, data, UMMU_MCMDQ_CI_WMASK);
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit mcmdq reg write access at 0x%lx\n", offset);
+ break;
+ }
+
+ q = &u->mcmdqs[mcmdq_idx];
+ trace_ummu_mcmdq_reg_writel(mcmdq_idx, MCMD_QUE_WD_IDX(&q->queue), MCMD_QUE_RD_IDX(&q->queue));
+}
+
+static void ummu_glb_int_disable(UMMUState *u, UMMUUSIVectorType type)
+{
+ qemu_log("start disable glb int\n");
+
+ if (u->usi_virq[type] < 0) {
+ return;
+ }
+
+ kvm_irqchip_release_virq(kvm_state, u->usi_virq[type]);
+ u->usi_virq[type] = -1;
+}
+
+static void ummu_glb_int_enable(UMMUState *u, UMMUUSIVectorType type)
+{
+ KVMRouteChange route_change;
+ USIMessage msg;
+ uint32_t interrupt_id = UMMU_INTERRUPT_ID;
+
+ if (type == UMMU_USI_VECTOR_EVETQ) {
+ msg = ummu_get_eventq_usi_message(u);
+ } else {
+ msg = ummu_get_gerror_usi_message(u);
+ }
+
+ route_change = kvm_irqchip_begin_route_changes(kvm_state);
+ u->usi_virq[type] = kvm_irqchip_add_usi_route(&route_change, msg, interrupt_id, NULL);
+ trace_ummu_glb_int_enable(type, u->usi_virq[type]);
+ if (u->usi_virq[type] < 0) {
+ qemu_log("kvm irqchip failed to add usi route.\n");
+ return;
+ }
+ kvm_irqchip_commit_route_changes(&route_change);
+}
+
+static void ummu_handle_glb_int_enable_update(UMMUState *u, UMMUUSIVectorType type,
+ bool was_enabled, bool is_enabled)
+{
+ if (was_enabled && !is_enabled) {
+ ummu_glb_int_disable(u, type);
+ } else if (!was_enabled && is_enabled) {
+ ummu_glb_int_enable(u, type);
+ }
+}
+
+static void ummu_glb_int_en_process(UMMUState *u, uint64_t data)
+{
+ bool gerror_was_enabled, eventq_was_enabled;
+ bool gerror_is_enabled, eventq_is_enabled;
+
+ /* process eventq interrupt update */
+ eventq_was_enabled = ummu_event_que_int_en(u);
+ ummu_set_event_que_int_en(u, data);
+ eventq_is_enabled = ummu_event_que_int_en(u);
+ ummu_handle_glb_int_enable_update(u, UMMU_USI_VECTOR_EVETQ,
+ eventq_was_enabled, eventq_is_enabled);
+
+ /* process glb_err interrupt update */
+ gerror_was_enabled = ummu_glb_err_int_en(u);
+ ummu_set_glb_err_int_en(u, data);
+ gerror_is_enabled = ummu_glb_err_int_en(u);
+ ummu_handle_glb_int_enable_update(u, UMMU_USI_VECTOR_GERROR,
+ gerror_was_enabled, gerror_is_enabled);
+}
+
+static MemTxResult ummu_mapt_cmdq_fetch_cmd(MAPTCmdqBase *base, MAPTCmd *cmd)
+{
+ dma_addr_t base_addr = MAPT_UCMDQ_BASE_ADDR(base);
+ dma_addr_t addr = base_addr + MAPT_UCMDQ_CI(base) * sizeof(*cmd);
+ int ret, i;
+
+ ret = dma_memory_read(&address_space_memory, addr, cmd, sizeof(*cmd),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("addr 0x%lx failed to fectch mapt ucmdq cmd.\n", addr);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cmd->word); i++) {
+ le32_to_cpus(&cmd->word[i]);
+ }
+
+ return ret;
+}
+
+static void ummu_mapt_cplq_add_entry(MAPTCmdqBase *base, MAPTCmdCpl *cpl)
+{
+ dma_addr_t base_addr = MAPT_UCPLQ_BASE_ADDR(base);
+ dma_addr_t addr = base_addr + MAPT_UCPLQ_PI(base) * sizeof(*cpl);
+ uint32_t tmp = cpu_to_le32(*(uint32_t *)cpl);
+
+ if (dma_memory_write(&address_space_memory, addr, &tmp,
+ sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log("dma failed to wirte cpl entry to addr 0x%lx\n", addr);
+ }
+}
+
+static void ummu_process_mapt_cmd(UMMUState *u, MAPTCmdqBase *base, MAPTCmd *cmd, uint32_t ci)
+{
+ uint32_t type = MAPT_UCMD_TYPE(cmd);
+ MAPTCmdCpl cpl;
+ UMMUMcmdqCmd mcmd_cmd = { 0 };
+ uint16_t tecte_tag;
+ uint32_t tid;
+
+ mcmd_cmd.word[0] = CMD_PLBI_OS_EID;
+ /* default set cpl staus invalid */
+ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_INVALID, 0);
+ tecte_tag = ummu_mapt_cmdq_base_get_tecte_tag(base);
+ tid = ummu_mapt_cmdq_base_get_token_id(base);
+ qemu_log("tid: %u, tecte_tag: %u\n", tid, tecte_tag);
+ switch (type) {
+ case MAPT_UCMD_TYPE_PSYNC:
+ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PSYNC.\n");
+ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_PSYNC_SUCCESS, ci);
+ break;
+ case MAPT_UCMD_TYPE_PLBI_USR_ALL:
+ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_ALL.\n");
+ ummu_mcmdq_construct_plbi_os_eidtid(&mcmd_cmd, tid, tecte_tag);
+ ummu_invalidate_cache(u, &mcmd_cmd);
+ break;
+ case MAPT_UCMD_TYPE_PLBI_USR_VA:
+ qemu_log("start process mapt cmd: MAPT_UCMD_TYPE_PLBI_USR_VA.\n");
+ ummu_plib_usr_va_to_pibi_os_va(cmd, &mcmd_cmd, tid, tecte_tag);
+ ummu_invalidate_cache(u, &mcmd_cmd);
+ break;
+ default:
+ qemu_log("unknow mapt cmd type: 0x%x\n", type);
+ ummu_mapt_ucplq_set_cpl(&cpl, MAPT_UCPL_STATUS_TYPE_ERROR, ci);
+ break;
+ }
+
+ if (cpl.cpl_status == MAPT_UCPL_STATUS_INVALID) {
+ return;
+ }
+
+ if (ummu_mapt_ucplq_full(base)) {
+ qemu_log("mapt ucplq full, failed to add cpl entry.\n");
+ return;
+ }
+ ummu_mapt_cplq_add_entry(base, &cpl);
+ ummu_mapt_ucqlq_prod_incr(base);
+ qemu_log("mapt cplq add entry success, cplpi: %u, cplci: %u.\n",
+ MAPT_UCPLQ_PI(base), MAPT_UCPLQ_CI(base));
+}
+
+static void ummu_process_mapt_cmdq(UMMUState *u, MAPTCmdqBase *base)
+{
+ MAPTCmd cmd;
+ int ret;
+
+ while (!ummu_mapt_ucmdq_empty(base)) {
+ ret = ummu_mapt_cmdq_fetch_cmd(base, &cmd);
+ if (ret) {
+ qemu_log("failed to fetch matp cmdq cmd.\n");
+ return;
+ }
+ ummu_process_mapt_cmd(u, base, &cmd, MAPT_UCMDQ_CI(base));
+ ummu_mapt_ucmdq_cons_incr(base);
+ }
+ qemu_log("after cmdq process, log2size: %u, cmdpi: %u, cmdci: %u, cplpi: %u, cplci: %u\n",
+ MAPT_UCMDQ_LOG2SIZE(base), MAPT_UCMDQ_PI(base), MAPT_UCMDQ_CI(base),
+ MAPT_UCPLQ_PI(base), MAPT_UCPLQ_CI(base));
+}
+
+static void ummu_mapt_ctrlr_page_write_process(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ MAPTCmdqBase base;
+ uint32_t qid = ummu_mapt_cmdq_get_qid(u, offset);
+ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base);
+ int ret;
+
+ qemu_log("qid: %u, mapt_ctxt_base: 0x%lx\n", qid, addr);
+ ret = ummu_mapt_get_cmdq_base(u, addr, qid, &base);
+ if (ret) {
+ qemu_log("failed to get mapt cmdq base.\n");
+ return;
+ }
+
+ switch (offset & UCMDQ_UCPLQ_CI_PI_MASK) {
+ case UCMDQ_PI:
+ ummu_mapt_cmdq_base_update_ucmdq_pi(&base, (uint16_t)data);
+ ummu_process_mapt_cmdq(u, &base);
+ break;
+ case UCMDQ_CI:
+ ummu_mapt_cmdq_base_update_ucmdq_ci(&base, (uint16_t)data);
+ break;
+ case UCPLQ_PI:
+ ummu_mapt_cmdq_base_update_ucplq_pi(&base, (uint16_t)data);
+ break;
+ case UCPLQ_CI:
+ ummu_mapt_cmdq_base_update_ucplq_ci(&base, (uint16_t)data);
+ break;
+ default:
+ qemu_log("cannot process addr(0x%lx) mpat ctrlr page write.\n", offset);
+ return;
+ }
+
+ ret = ummu_mapt_update_cmdq_base(u, addr, qid, &base);
+ if (ret) {
+ qemu_log("failed to update mapt cmdq ctx.\n");
+ return;
+ }
+}
+
+static void ummu_reg_writew(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ switch (offset) {
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ ummu_mapt_ctrlr_page_write_process(u, offset, data);
+ break;
+ default:
+ qemu_log("ummu cannot handle 16-bit write access at: 0x%lx\n", offset);
+ break;
+ }
+}
+
+static int ummu_mapt_process_release_um_queue(UMMUState *u)
+{
+ MAPTCmdqBase base;
+ uint32_t qid = u->release_um_queue_id;
+ dma_addr_t addr = MAPT_CMDQ_CTXT_BASE_ADDR(u->mapt_cmdq_ctxt_base);
+
+ memset(&base, 0, sizeof(base));
+ if (ummu_mapt_update_cmdq_base(u, addr, qid, &base)) {
+ qemu_log("failed to release um queue(qid: %u)\n", qid);
+ return -1;
+ }
+
+ qemu_log("release um queue(qid: %u) success.\n", qid);
+ return 0;
+}
+
+static void ummu_reg_writel(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ switch (offset) {
+ case A_CTRL0:
+ update_reg32_by_wmask(&u->ctrl[0], data, UMMU_CTRL0_WMASK);
+ ummu_cr0_process_task(u);
+ break;
+ case A_CTRL1:
+ update_reg32_by_wmask(&u->ctrl[1], data, UMMU_CTRL1_WMASK);
+ break;
+ case A_CTRL2:
+ update_reg32_by_wmask(&u->ctrl[2], data, UMMU_CTRL2_WMASK);
+ break;
+ case A_CTRL3:
+ update_reg32_by_wmask(&u->ctrl[3], data, UMMU_CTRL3_WMASK);
+ break;
+ case A_TECT_BASE_CFG:
+ update_reg32_by_wmask(&u->tect_base_cfg, data, UMMU_TECT_BASE_CFG_WMASK);
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ ummu_mcmdq_reg_writel(u, offset, data);
+ break;
+ case A_EVENT_QUE_PI:
+ update_reg32_by_wmask(&u->eventq.queue.prod, data, UMMU_EVENTQ_PI_WMASK);
+ break;
+ case A_EVENT_QUE_CI:
+ update_reg32_by_wmask(&u->eventq.queue.cons, data, UMMU_EVENTQ_CI_WMASK);
+ break;
+ case A_EVENT_QUE_USI_DATA:
+ update_reg32_by_wmask(&u->eventq.usi_data, data, UMMU_EVENT_QUE_USI_DATA_WMASK);
+ break;
+ case A_EVENT_QUE_USI_ATTR:
+ update_reg32_by_wmask(&u->eventq.usi_attr, data, UMMU_EVENTQ_USI_ATTR_WMASK);
+ break;
+ case A_GLB_ERR_INT_USI_DATA:
+ update_reg32_by_wmask(&u->glb_err.usi_data, data, UMMU_GLB_ERR_INT_USI_DATA_WMASK);
+ break;
+ case A_GLB_ERR_INT_USI_ATTR:
+ update_reg32_by_wmask(&u->glb_err.usi_attr, data, UMMU_GLB_ERR_INT_USI_ATTR_WMASK);
+ break;
+ case A_GLB_INT_EN:
+ ummu_glb_int_en_process(u, data);
+ break;
+ case A_GLB_ERR_RESP:
+ update_reg32_by_wmask(&u->glb_err.glb_err_resp, data, UMMU_GLB_ERR_RESP_WMASK);
+ break;
+ case A_RELEASE_UM_QUEUE:
+ /* release_um_queue reg set 1 to release um_queue */
+ if ((data & RELEASE_UM_QUEUE_WMASK) != 1) {
+ break;
+ }
+ if (ummu_mapt_process_release_um_queue(u)) {
+ u->release_um_queue = 1;
+ break;
+ }
+ /* release success, set release_um_queue reg to 0, means release success */
+ u->release_um_queue = 0;
+ break;
+ case A_RELEASE_UM_QUEUE_ID:
+ update_reg32_by_wmask(&u->release_um_queue_id, data, RELEASE_UM_QUEUE_ID_WMASK);
+ break;
+ case A_UCMDQ_PI_START_REG...A_UCPLQ_CI_END_REG:
+ ummu_mapt_ctrlr_page_write_process(u, offset, data);
+ break;
+ case A_UMCMD_PAGE_SEL:
+ qemu_log("ucmdq set page sel to %s\n",
+ data == MAPT_CMDQ_CTRLR_PAGE_SIZE_4K ? "4K" : "64K");
+ update_reg32_by_wmask(&u->ucmdq_page_sel, data, UMCMD_PAGE_SEL_WMASK);
+ break;
+ case A_DSTEID_KV_TABLE_BASE_CFG:
+ case A_UMMU_DSTEID_KV_TABLE_HASH_CFG0:
+ case A_UMMU_DSTEID_KV_TABLE_HASH_CFG1:
+ case A_UMMU_USER_CONFIG0...A_UMMU_USER_CONFIG11:
+ case A_UMMU_MEM_USI_DATA:
+ case A_UMMU_MEM_USI_ATTR:
+ case A_UMMU_INT_MASK:
+ case A_UMMU_DSTEID_CAM_TABLE_BASE_CFG:
+ /* do nothing */
+ break;
+ default:
+ qemu_log("ummu cannot handle 32-bit write access at 0x%lx\n", offset);
+ break;
+ }
+}
+
+static void ummu_mcmdq_reg_writell(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ uint8_t mcmdq_idx;
+
+ mcmdq_idx = (uint8_t)(offset & MCMDQ_IDX_MASK) >> __bf_shf(MCMDQ_IDX_MASK);
+ if (mcmdq_idx >= UMMU_MAX_MCMDQS) {
+ qemu_log("invalid idx %u, offset is 0x%lx\n", mcmdq_idx, offset);
+ return;
+ }
+
+ switch (offset & MCMDQ_BASE_ADDR_MASK) {
+ case A_MCMD_QUE_BASE:
+ update_reg64_by_wmask(&u->mcmdqs[mcmdq_idx].queue.base, data, UMMU_MCMDQ_BASE_WMASK);
+ u->mcmdqs[mcmdq_idx].queue.log2size = MCMD_QUE_LOG2SIZE(data);
+ trace_ummu_mcmdq_base_reg_writell(mcmdq_idx, u->mcmdqs[mcmdq_idx].queue.base,
+ u->mcmdqs[mcmdq_idx].queue.log2size);
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit mcmdq reg write access at 0x%lx\n", offset);
+ break;
+ }
+}
+
+static void ummu_reg_writell(UMMUState *u, hwaddr offset, uint64_t data)
+{
+ switch (offset) {
+ case A_TECT_BASE0:
+ update_reg64_by_wmask(&u->tect_base, data, UMMU_TECT_BASE_WMASK);
+ break;
+ case A_MCMD_QUE_BASE...A_MCMD_QUE_LASTEST_CI:
+ ummu_mcmdq_reg_writell(u, offset, data);
+ break;
+ case A_EVENT_QUE_BASE0:
+ update_reg64_by_wmask(&u->eventq.queue.base, data, UMMU_EVENTQ_BASE_WMASK);
+ u->eventq.queue.log2size = EVENT_QUE_LOG2SIZE(data);
+ trace_ummu_eventq_req_writell(u->eventq.queue.base, u->eventq.queue.log2size);
+ break;
+ case A_EVENT_QUE_USI_ADDR0:
+ update_reg64_by_wmask(&u->eventq.usi_addr, data, UMMU_EVENTQ_USI_ADDR_WMASK);
+ trace_ummu_eventq_usi_reg_writell(data);
+ break;
+ case A_GLB_ERR_INT_USI_ADDR0:
+ update_reg64_by_wmask(&u->glb_err.usi_addr, data, UMMU_GLB_ERR_INT_USI_ADDR_WMASK);
+ trace_ummu_glberr_usi_reg_writell(data);
+ break;
+ case A_MAPT_CMDQ_CTXT_BADDR0:
+ update_reg64_by_wmask(&u->mapt_cmdq_ctxt_base, data, MAPT_CMDQ_CTXT_BADDR_WMASK);
+ trace_ummu_mapt_ctx_base_reg_writell(u->mapt_cmdq_ctxt_base);
+ break;
+ case A_DSTEID_KV_TABLE_BASE0:
+ case A_UMMU_DSTEID_CAM_TABLE_BASE0:
+ case A_UMMU_MEM_USI_ADDR0:
+ /* do nothing */
+ break;
+ default:
+ qemu_log("ummu cannot handle 64-bit write access at 0x%lx\n", offset);
+ break;
+ }
+}
+
+static void ummu_reg_write(void *opaque, hwaddr offset, uint64_t data, unsigned size)
+{
+ UMMUState *u = opaque;
+
+ switch (size) {
+ case 2:
+ ummu_reg_writew(u, offset, data);
+ break;
+ case 4:
+ ummu_reg_writel(u, offset, data);
+ break;
+ case 8:
+ ummu_reg_writell(u, offset, data);
+ break;
+ default:
+ qemu_log("cann't not process ummu reg write for size: %u\n", size);
+ break;
+ }
+}
+
+static const MemoryRegionOps ummu_reg_ops = {
+ .read = ummu_reg_read,
+ .write = ummu_reg_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .valid = {
+ .min_access_size = 2,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 2,
+ .max_access_size = 8,
+ },
+};
+
+static void ummu_registers_init(UMMUState *u)
+{
+ int i;
+
+ memset(u->cap, 0, sizeof(u->cap));
+ /* cap 0 init */
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, DSTEID_SIZE, 0x10);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TOKENID_SIZE, 0x14);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, ATTR_PERMS_OVR, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, ATTR_TYPES_OVR, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, S2_ATTR_TYPE, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TCT_LEVEL, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TECT_MODE, 0x1);
+ u->cap[0] = FIELD_DP32(u->cap[0], CAP0, TECT_LEVEL, 0x1);
+ /* cap 1 init */
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_SIZE, 0x13);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_NUMB, 0x0);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENTQ_SUPPORT, 0x1);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_SIZE, 0xF);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_NUMB, 0x3);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, MCMDQ_SUPPORT, 0x1);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, EVENT_GEN, 0x1);
+ u->cap[1] = FIELD_DP32(u->cap[1], CAP1, STALL_MAX, 0x80);
+ /* cap 2 init */
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, VMID_TLBI, 0x0);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, TLB_BOARDCAST, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, RANGE_TLBI, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, OA_SIZE, 0x5);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN4K_T, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN16K_T, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, GRAN64K_T, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, VA_EXTEND, 0x0);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, S2_TRANS, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, S1_TRANS, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, SMALL_TRANS, 0x1);
+ u->cap[2] = FIELD_DP32(u->cap[2], CAP2, TRANS_FORM, 0x2);
+ /* cap 3 init */
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HIER_ATTR_DISABLE, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, S2_EXEC_NEVER_CTRL, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, BBM_LEVEL, 0x2);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, COHERENT_ACCESS, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, TTENDIAN_MODE, 0x0);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, MTM_SUPPORT, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HTTU_SUPPORT, 0x2);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, HYP_S1CONTEXT, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, USI_SUPPORT, 0x1);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, STALL_MODEL, 0x0);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, TERM_MODEL, 0x0);
+ u->cap[3] = FIELD_DP32(u->cap[3], CAP3, SATI_MAX, 0x1);
+ /* cap 4 init */
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCMDQ_UCPLQ_NUMB, 0x10);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCMDQ_SIZE, 0xF);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UCPLQ_SIZE, 0xF);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_SIZE, 0xF);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_NUMB, 0x5);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, UIEQ_SUPPORT, 0x1);
+ u->cap[4] = FIELD_DP32(u->cap[4], CAP4, PPLB_SUPPORT, 0x0);
+
+ /* cap 5 init */
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, MAPT_SUPPORT, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, MAPT_MODE, 0x3);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, GRAN2M_P, 0x0);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, GRAN4K_P, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, TOKENVAL_CHK, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, TOKENVAL_CHK_MODE, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, RANGE_PLBI, 0x1);
+ u->cap[5] = FIELD_DP32(u->cap[5], CAP5, PLB_BORDCAST, 0x0);
+ /* cap 6 init */
+ u->cap[6] = FIELD_DP32(u->cap[6], CAP6, MTM_ID_MAX, 0x00FF);
+ u->cap[6] = FIELD_DP32(u->cap[6], CAP6, MTM_GP_MAX, 0x03);
+
+ /* ctrlr init */
+ memset(u->ctrl, 0, sizeof(u->ctrl));
+ u->ctrl[1] = FIELD_DP32(u->ctrl[1], CTRL1, TECT_MODE_SEL, 0x1);
+
+ /* tect init */
+ u->tect_base = 0;
+ u->tect_base_cfg = 0;
+
+ /* mcmdq init */
+ for (i = 0; i < UMMU_MAX_MCMDQS; i++) {
+ u->mcmdqs[i].queue.base = 0;
+ u->mcmdqs[i].queue.prod = 0;
+ u->mcmdqs[i].queue.cons = 0;
+ u->mcmdqs[i].queue.entry_size = sizeof(UMMUMcmdqCmd);
+ }
+
+ /* eventq init */
+ memset(&u->eventq, 0, sizeof(u->eventq));
+
+ /* glb err init */
+ memset(&u->glb_err, 0, sizeof(u->glb_err));
+
+ /* evt queue init */
+ u->eventq.queue.base = 0;
+ u->eventq.queue.prod = 0;
+ u->eventq.queue.cons = 0;
+ u->eventq.queue.entry_size = sizeof(UMMUEvent);
+
+ /* mapt cmdq ctxt base addr init */
+ u->mapt_cmdq_ctxt_base = 0;
+
+ /* umcmdq default page set to 4K */
+ u->ucmdq_page_sel = MAPT_CMDQ_CTRLR_PAGE_SIZE_4K;
+}
+
+int ummu_associating_with_ubc(BusControllerState *ubc)
+{
+ UMMUState *ummu;
+ unsigned int bus_num;
+
+ if (1 != sscanf(ubc->bus->qbus.name, "ubus.%u", &bus_num)) {
+ qemu_log("failed to get bus num %s\n",
+ ubc->bus->qbus.name);
+ return -1;
+ }
+ ummu = ummu_find_by_bus_num(bus_num);
+ if (!ummu) {
+ qemu_log("failed to get ummu %d\n", bus_num);
+ return -1;
+ }
+ ummu->bus = ubc->bus;
+ return 0;
+}
+
+static UMMUDevice *ummu_get_udev(UBBus *bus, UMMUState *u, uint32_t eid)
+{
+ UMMUDevice *ummu_dev = NULL;
+ UBDevice *udev = NULL;
+ char *name = NULL;
+
+ udev = ub_find_device_by_eid(bus, eid);
+ ummu_dev = g_hash_table_lookup(u->ummu_devs, udev);
+ if (ummu_dev) {
+ return ummu_dev;
+ }
+
+ /* will be freed when remove from hash table */
+ ummu_dev = g_new0(UMMUDevice, 1);
+ ummu_dev->ummu = u;
+ ummu_dev->udev = udev;
+
+ name = g_strdup_printf("%s-0x%x", u->mrtypename, eid);
+ memory_region_init_iommu(&ummu_dev->iommu, sizeof(ummu_dev->iommu), u->mrtypename,
+ OBJECT(u), name, UINT64_MAX);
+ address_space_init(&ummu_dev->as_sysmem, &u->root, name);
+ address_space_init(&ummu_dev->as, MEMORY_REGION(&ummu_dev->iommu), name);
+ g_free(name);
+ g_hash_table_insert(u->ummu_devs, udev, ummu_dev);
+
+ return ummu_dev;
+}
+
+static AddressSpace *ummu_find_add_as(UBBus *bus, void *opaque, uint32_t eid)
+{
+ UMMUState *u = opaque;
+ UMMUDevice *ummu_dev = ummu_get_udev(bus, u, eid);
+
+ if (u->nested && !ummu_dev->s1_hwpt) {
+ return &ummu_dev->as_sysmem;
+ }
+
+ return &ummu_dev->as;
+}
+
+static bool ummu_is_nested(void *opaque)
+{
+ UMMUState *u = opaque;
+
+ return u->nested;
+}
+
+static bool ummu_dev_attach_viommu(UMMUDevice *udev,
+ HostIOMMUDeviceIOMMUFD *idev, Error **errp)
+{
+ UMMUState *u = udev->ummu;
+ UMMUS2Hwpt *s2_hwpt = NULL;
+ UMMUViommu *viommu = NULL;
+ uint32_t s2_hwpt_id;
+
+ if (u->viommu) {
+ return host_iommu_device_iommufd_attach_hwpt(
+ idev, u->viommu->s2_hwpt->hwpt_id, errp);
+ }
+
+ if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid, idev->ioas_id,
+ IOMMU_HWPT_ALLOC_NEST_PARENT,
+ IOMMU_HWPT_DATA_NONE, 0, NULL,
+ &s2_hwpt_id, NULL, errp)) {
+ error_setg(errp, "failed to allocate an S2 hwpt");
+ return false;
+ }
+
+ if (!host_iommu_device_iommufd_attach_hwpt(idev, s2_hwpt_id, errp)) {
+ error_setg(errp, "failed to attach stage-2 HW pagetable");
+ goto free_s2_hwpt;
+ }
+
+ viommu = g_new0(UMMUViommu, 1);
+ viommu->core = iommufd_backend_alloc_viommu(idev->iommufd, idev->devid,
+ IOMMU_VIOMMU_TYPE_UMMU,
+ s2_hwpt_id);
+ if (!viommu->core) {
+ error_setg(errp, "failed to allocate a viommu");
+ goto free_viommu;
+ }
+
+ s2_hwpt = g_new0(UMMUS2Hwpt, 1);
+ s2_hwpt->iommufd = idev->iommufd;
+ s2_hwpt->hwpt_id = s2_hwpt_id;
+ s2_hwpt->ioas_id = idev->ioas_id;
+ qemu_log("alloc hwpt for s2 success, hwpt id is %u\n", s2_hwpt_id);
+
+ viommu->iommufd = idev->iommufd;
+ viommu->s2_hwpt = s2_hwpt;
+
+ u->viommu = viommu;
+ return true;
+
+free_viommu:
+ g_free(viommu);
+ host_iommu_device_iommufd_attach_hwpt(idev, udev->idev->ioas_id, errp);
+free_s2_hwpt:
+ iommufd_backend_free_id(idev->iommufd, s2_hwpt_id);
+
+ return false;
+}
+
+static bool ummu_dev_set_iommu_dev(UBBus *bus, void *opaque, uint32_t eid,
+ HostIOMMUDevice *hiod, Error **errp)
+{
+ HostIOMMUDeviceIOMMUFD *idev = HOST_IOMMU_DEVICE_IOMMUFD(hiod);
+ UMMUState *u = opaque;
+ UMMUDevice *ummu_dev = NULL;
+
+ if (!u->nested) {
+ error_setg(errp, "set iommu dev expcet ummu is nested mode\n");
+ return false;
+ }
+
+ if (!idev) {
+ error_setg(errp, "unexpect idev is NULL\n");
+ return false;
+ }
+
+ ummu_dev = ummu_get_udev(bus, u, eid);
+ if (!ummu_dev) {
+ error_setg(errp, "failed to get ummu dev by eid 0x%x\n", eid);
+ return false;
+ }
+
+ if (ummu_dev->idev) {
+ if (ummu_dev->idev != idev) {
+ error_setg(errp, "udev(%s) exist idev conflict new config idev\n", ummu_dev->udev->name);
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ if (!ummu_dev_attach_viommu(ummu_dev, idev, errp)) {
+ error_report("Unable to attach viommu");
+ return false;
+ }
+
+ ummu_dev->idev = idev;
+ ummu_dev->viommu = u->viommu;
+ QLIST_INSERT_HEAD(&u->viommu->device_list, ummu_dev, next);
+
+ return 0;
+}
+
+static void ummu_dev_unset_iommu_dev(UBBus *bus, void *opaque, uint32_t eid)
+{
+ UMMUDevice *ummu_dev;
+ UMMUViommu *viommu = NULL;
+ UMMUVdev *vdev = NULL;
+ UMMUState *u = opaque;
+ UBDevice *udev = NULL;
+
+ if (!u->nested) {
+ return;
+ }
+
+ udev = ub_find_device_by_eid(bus, eid);
+ ummu_dev = g_hash_table_lookup(u->ummu_devs, udev);
+ if (!ummu_dev) {
+ return;
+ }
+
+ if (!host_iommu_device_iommufd_attach_hwpt(ummu_dev->idev,
+ ummu_dev->idev->ioas_id, NULL)) {
+ error_report("Unable to attach dev to the default HW pagetable");
+ }
+
+ vdev = ummu_dev->vdev;
+ viommu = ummu_dev->viommu;
+
+ ummu_dev->idev = NULL;
+ ummu_dev->viommu = NULL;
+ QLIST_REMOVE(ummu_dev, next);
+
+ if (vdev) {
+ iommufd_backend_free_id(viommu->iommufd, vdev->core->vdev_id);
+ g_free(vdev->core);
+ g_free(vdev);
+ }
+
+ if (QLIST_EMPTY(&viommu->device_list)) {
+ iommufd_backend_free_id(viommu->iommufd, viommu->core->viommu_id);
+ g_free(viommu->core);
+ iommufd_backend_free_id(viommu->iommufd, viommu->s2_hwpt->hwpt_id);
+ g_free(viommu->s2_hwpt);
+ g_free(viommu);
+ u->viommu = NULL;
+ }
+}
+
+static const UBIOMMUOps ummu_ops = {
+ .get_address_space = ummu_find_add_as,
+ .ummu_is_nested = ummu_is_nested,
+ .set_iommu_device = ummu_dev_set_iommu_dev,
+ .unset_iommu_device = ummu_dev_unset_iommu_dev,
+};
+
+static void ub_save_ummu_list(UMMUState *u)
+{
+ QLIST_INSERT_HEAD(&ub_umms, u, node);
+}
+
+static void ub_remove_ummu_list(UMMUState *u)
+{
+ QLIST_REMOVE(u, node);
+}
+
+static void ummu_base_realize(DeviceState *dev, Error **errp)
+{
+ static uint8_t NO = 0;
+ UMMUState *u = UB_UMMU(dev);
+ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev);
+
+ u->bus_num = NO;
+ sysdev->parent_obj.id = g_strdup_printf("ummu.%u", NO++);
+
+ memory_region_init_io(&u->ummu_reg_mem, OBJECT(u), &ummu_reg_ops,
+ u, TYPE_UB_UMMU, u->ummu_reg_size);
+ sysbus_init_mmio(sysdev, &u->ummu_reg_mem);
+
+ memset(u->usi_virq, -1, sizeof(u->usi_virq));
+ ummu_registers_init(u);
+ ub_save_ummu_list(u);
+
+ u->ummu_devs = g_hash_table_new_full(NULL, NULL, NULL, g_free);
+ u->configs = g_hash_table_new_full(NULL, NULL, NULL, g_free);
+ QLIST_INIT(&u->kvtbl);
+ if (u->primary_bus) {
+ ub_setup_iommu(u->primary_bus, &ummu_ops, u);
+ } else {
+ error_setg(errp, "UMMU is not attached to any UB bus!");
+ }
+
+ u->tecte_tag_num = 0;
+ u->mrtypename = TYPE_UMMU_IOMMU_MEMORY_REGION;
+ if (u->nested) {
+ memory_region_init(&u->stage2, OBJECT(u), "stage2", UINT64_MAX);
+ memory_region_init_alias(&u->sysmem, OBJECT(u),
+ "ummu-sysmem", get_system_memory(), 0,
+ memory_region_size(get_system_memory()));
+ memory_region_add_subregion(&u->stage2, 0, &u->sysmem);
+
+ memory_region_init(&u->root, OBJECT(u), "ummu-root", UINT64_MAX);
+ memory_region_add_subregion(&u->root, 0, &u->stage2);
+ }
+}
+
+static void ummu_base_unrealize(DeviceState *dev)
+{
+ UMMUState *u = UB_UMMU(dev);
+ SysBusDevice *sysdev = SYS_BUS_DEVICE(dev);
+ UMMUKVTblEntry *entry = NULL;
+ UMMUKVTblEntry *next_entry = NULL;
+
+ ub_remove_ummu_list(u);
+ if (sysdev->parent_obj.id) {
+ g_free(sysdev->parent_obj.id);
+ }
+
+ if (u->ummu_devs) {
+ g_hash_table_remove_all(u->ummu_devs);
+ g_hash_table_destroy(u->ummu_devs);
+ u->ummu_devs = NULL;
+ }
+
+ if (u->configs) {
+ g_hash_table_remove_all(u->configs);
+ g_hash_table_destroy(u->configs);
+ u->configs = NULL;
+ }
+
+ QLIST_FOREACH_SAFE(entry, &u->kvtbl, list, next_entry) {
+ QLIST_REMOVE(entry, list);
+ g_free(entry);
+ }
+}
+
+static void ummu_base_reset(DeviceState *dev)
+{
+ /* reset ummu relative struct later */
+}
+
+static Property ummu_dev_properties[] = {
+ DEFINE_PROP_UINT64("ub-ummu-reg-size", UMMUState,
+ ummu_reg_size, 0),
+ DEFINE_PROP_LINK("primary-bus", UMMUState, primary_bus,
+ TYPE_UB_BUS, UBBus *),
+ DEFINE_PROP_BOOL("nested", UMMUState, nested, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void ummu_base_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, ummu_dev_properties);
+ dc->realize = ummu_base_realize;
+ dc->unrealize = ummu_base_unrealize;
+ dc->reset = ummu_base_reset;
+}
+
+static const TypeInfo ummu_base_info = {
+ .name = TYPE_UB_UMMU,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(UMMUState),
+ .class_data = NULL,
+ .class_size = sizeof(UMMUBaseClass),
+ .class_init = ummu_base_class_init,
+};
+
+static int ummu_get_tecte(UMMUState *ummu, dma_addr_t addr, TECTE *tecte)
+{
+ int ret, i;
+
+ ret = dma_memory_read(&address_space_memory, addr, tecte, sizeof(*tecte),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("Cannot fetch tecte at address=0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tecte->word); i++) {
+ le32_to_cpus(&tecte->word[i]);
+ }
+
+ return 0;
+}
+
+static uint32_t ummu_get_tecte_tag_by_dest_eid(UMMUState *u, uint32_t dst_eid)
+{
+ UMMUKVTblEntry *entry = NULL;
+
+ QLIST_FOREACH(entry, &u->kvtbl, list) {
+ if (entry->dst_eid == dst_eid) {
+ break;
+ }
+ }
+
+ if (!entry) {
+ qemu_log("cannot find tecte_tag by dst_eid 0x%x\n", dst_eid);
+ return UINT32_MAX;
+ }
+ qemu_log("success get tecte_tag(0x%x) by dst_eid(0x%x)\n", entry->tecte_tag, dst_eid);
+
+ return entry->tecte_tag;
+}
+
+static int ummu_find_tecte(UMMUState *ummu, uint32_t tecte_tag, TECTE *tecte)
+{
+ dma_addr_t tect_base_addr = TECT_BASE_ADDR(ummu->tect_base);
+ dma_addr_t tecte_addr;
+ int ret;
+ int i;
+
+ if (ummu_tect_fmt_2level(ummu)) {
+ int l1_tecte_offset, l2_tecte_offset;
+ uint32_t split;
+ dma_addr_t l1ptr, l2ptr;
+ TECTEDesc l1_tecte_desc;
+
+ split = ummu_tect_split(ummu);
+ l1_tecte_offset = tecte_tag >> split;
+ l2_tecte_offset = tecte_tag & ((1 << split) - 1);
+ l1ptr = (dma_addr_t)(tect_base_addr + l1_tecte_offset * sizeof(l1_tecte_desc));
+
+ ret = dma_memory_read(&address_space_memory, l1ptr, &l1_tecte_desc,
+ sizeof(l1_tecte_desc), MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("dma read failed for tecte level1 desc.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(l1_tecte_desc.word); i++) {
+ le32_to_cpus(&l1_tecte_desc.word[i]);
+ }
+
+ if (TECT_DESC_V(&l1_tecte_desc) == 0) {
+ qemu_log("tecte desc is invalid\n");
+ return -EINVAL;
+ }
+
+ l2ptr = TECT_L2TECTE_PTR(&l1_tecte_desc);
+ tecte_addr = l2ptr + l2_tecte_offset * sizeof(*tecte);
+ } else {
+ qemu_log("liner table process not support\n");
+ return -EINVAL;
+ }
+
+ if (ummu_get_tecte(ummu, tecte_addr, tecte)) {
+ qemu_log("failed to get tecte.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ummu_decode_tecte(UMMUState *ummu, UMMUTransCfg *cfg,
+ TECTE *tecte, UMMUEventInfo *event)
+{
+ if (TECTE_VALID(tecte) == 0) {
+ qemu_log("fetched tecte is invalid\n");
+ return -EINVAL;
+ }
+
+ cfg->tct_ptr = TECTE_TCT_PTR(tecte);
+ cfg->tct_num = TECTE_TCT_NUM(tecte);
+ cfg->tct_fmt = TECTE_TCT_FMT(tecte);
+
+ qemu_log("tct_ptr: 0x%lx, tct_num: %lu, fmt: %lu\n",
+ cfg->tct_ptr, cfg->tct_num, cfg->tct_fmt);
+ return 0;
+}
+
+static int ummu_get_tcte(UMMUState *ummu, dma_addr_t addr,
+ TCTE *tcte, uint32_t tid)
+{
+ int ret, i;
+ uint64_t *_tcte;
+
+ ret = dma_memory_read(&address_space_memory, addr, tcte, sizeof(*tcte),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ qemu_log("Cannot fetch tcte at address=0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tcte->word); i++) {
+ le32_to_cpus(&tcte->word[i]);
+ }
+
+ _tcte = (uint64_t *)tcte;
+ qemu_log("fetch tcte(%u): <0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx>\n",
+ tid, _tcte[0], _tcte[1], _tcte[2], _tcte[3], _tcte[4], _tcte[5], _tcte[6], _tcte[7]);
+ return 0;
+}
+
+static int ummu_find_tcte(UMMUState *ummu, UMMUTransCfg *cfg, uint32_t tid,
+ TCTE *tcte, UMMUEventInfo *event)
+{
+ int l1idx, l2idx;
+ dma_addr_t tct_lv1_addr, tcte_addr;
+ TCTEDesc tct_desc;
+ int ret, i;
+
+ if (cfg->tct_num == 0 || tid >= TCTE_MAX_NUM(cfg->tct_num)) {
+ event->type = EVT_BAD_TOKENID;
+ return -EINVAL;
+ }
+
+ if (TCT_FMT_LINEAR == cfg->tct_fmt || TCT_FMT_LVL2_4K == cfg->tct_fmt) {
+ event->type = EVT_TCT_FETCH;
+ qemu_log("current dont support TCT_FMT_LINEAR&TCT_FMT_LVL2_4K.\n");
+ return -EINVAL;
+ }
+
+ l1idx = tid >> TCT_SPLIT_64K;
+ tct_lv1_addr = cfg->tct_ptr + l1idx * sizeof(tct_desc);
+ ret = dma_memory_read(&address_space_memory, tct_lv1_addr, &tct_desc, sizeof(tct_desc),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ event->type = EVT_TCT_FETCH;
+ qemu_log("failed to dma read tct lv1 entry.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tct_desc.word); i++) {
+ le32_to_cpus(&tct_desc.word[i]);
+ }
+
+ qemu_log("l1idx: %d, tct_l1_addr: 0x%lx, tct_desc: 0x%lx, tcte_ptr: 0x%llx, l1tcte_v: %u\n",
+ l1idx, tct_lv1_addr, *(uint64_t *)&tct_desc, TCT_L2TCTE_PTR(&tct_desc), TCT_L1TCTE_V(&tct_desc));
+
+ if (TCT_L1TCTE_V(&tct_desc) == 0) {
+ event->type = EVT_BAD_TOKENID;
+ qemu_log("l2tcte is invalid\n");
+ return -EINVAL;
+ }
+
+ l2idx = tid & (TCT_L2_ENTRIES - 1);
+ tcte_addr = TCT_L2TCTE_PTR(&tct_desc) + l2idx * sizeof(*tcte);
+ qemu_log("l2idx: %d, tcte_addr: 0x%lx\n", l2idx, tcte_addr);
+ ret = ummu_get_tcte(ummu, tcte_addr, tcte, tid);
+ if (ret) {
+ event->type = EVT_TCT_FETCH;
+ qemu_log("failed to get tcte, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ummu_decode_tcte(UMMUState *ummu, UMMUTransCfg *cfg,
+ TCTE *tcte, UMMUEventInfo *event)
+{
+ uint32_t tct_v = TCTE_TCT_V(tcte);
+
+ if (tct_v == 0) {
+ qemu_log("fetched tcte invalid\n");
+ event->type = EVT_BAD_TCT;
+ return -1;
+ }
+
+ cfg->tct_ttba = TCTE_TTBA(tcte);
+ cfg->tct_sz = TCTE_SZ(tcte);
+ cfg->tct_tgs = tgs2granule(TCTE_TGS(tcte));
+ qemu_log("tcte_tbba: 0x%lx, sz: %u, tgs: %u, tct_v: %u\n",
+ cfg->tct_ttba, cfg->tct_sz, cfg->tct_tgs, tct_v);
+ return 0;
+}
+
+static int ummu_tect_parse_sparse_table(UMMUDevice *ummu_dev, UMMUTransCfg *cfg,
+ uint32_t dest_eid, UMMUEventInfo *event)
+{
+ UMMUState *ummu = ummu_dev->ummu;
+ int ret;
+ TECTE tecte;
+ TCTE tcte;
+ uint32_t tecte_tag;
+ uint32_t tid = ub_dev_get_token_id(ummu_dev->udev);
+
+ tecte_tag = ummu_get_tecte_tag_by_dest_eid(ummu, dest_eid);
+ if (tecte_tag == UINT32_MAX) {
+ qemu_log("failed to get tecte tag by dest_eid(%u).\n", dest_eid);
+ event->type = EVT_BAD_DSTEID;
+ goto failed;
+ }
+
+ ret = ummu_find_tecte(ummu, tecte_tag, &tecte);
+ if (ret) {
+ event->type = EVT_TECT_FETCH;
+ qemu_log("failed to find tecte: %d\n", ret);
+ goto failed;
+ }
+
+ ret = ummu_decode_tecte(ummu, cfg, &tecte, event);
+ if (ret) {
+ event->type = EVT_BAD_TECT;
+ qemu_log("failed to decode tecte.\n");
+ goto failed;
+ }
+
+ qemu_log("get udev(%s %s) tid(%u)\n",
+ ummu_dev->udev->name, ummu_dev->udev->qdev.id, tid);
+ ret = ummu_find_tcte(ummu, cfg, tid, &tcte, event);
+ if (ret) {
+ qemu_log("failed to find tecte.\n");
+ goto failed;
+ }
+
+ ret = ummu_decode_tcte(ummu, cfg, &tcte, event);
+ if (ret) {
+ qemu_log("failed to decode tecte.\n");
+ goto failed;
+ }
+ cfg->tecte_tag = tecte_tag;
+ cfg->tid = tid;
+
+ return 0;
+
+failed:
+ event->tid = tid;
+ event->tecte_tag = tecte_tag;
+ return -EINVAL;
+}
+
+static int ummu_decode_config(UMMUDevice *ummu_dev, UMMUTransCfg *cfg, UMMUEventInfo *event)
+{
+ uint32_t dest_eid = ub_dev_get_ueid(ummu_dev->udev);
+
+ qemu_log("ummu decode config dest_eid is %u.\n", dest_eid);
+ if (ummu_tect_mode_sparse_table(ummu_dev->ummu)) {
+ return ummu_tect_parse_sparse_table(ummu_dev, cfg, dest_eid, event);
+ }
+
+ event->type = EVT_TECT_FETCH;
+ event->tecte_tag = ummu_get_tecte_tag_by_dest_eid(ummu_dev->ummu, dest_eid);
+
+ qemu_log("current not support process linear table.\n");
+ return -1;
+}
+
+static UMMUTransCfg *ummu_get_config(UMMUDevice *ummu_dev, UMMUEventInfo *event)
+{
+ UMMUState *ummu = ummu_dev->ummu;
+ UMMUTransCfg *cfg = NULL;
+
+ cfg = g_hash_table_lookup(ummu->configs, ummu_dev);
+ if (cfg) {
+ return cfg;
+ }
+
+ /* cfg will be freed when removed from hash table */
+ cfg = g_new0(UMMUTransCfg, 1);
+ if (!ummu_decode_config(ummu_dev, cfg, event)) {
+ g_hash_table_insert(ummu->configs, ummu_dev, cfg);
+ } else {
+ g_free(cfg);
+ cfg = NULL;
+ }
+
+ return cfg;
+}
+
+static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte)
+{
+ int ret;
+ dma_addr_t addr = baseaddr + index * sizeof(*pte);
+
+ ret = ldq_le_dma(&address_space_memory, addr, pte, MEMTXATTRS_UNSPECIFIED);
+ if (ret) {
+ qemu_log("failed to get dma data for adr 0x%lx\n", addr);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ummu_ptw_64_s1(UMMUTransCfg *cfg, dma_addr_t iova, IOMMUTLBEntry *entry, UMMUPTWEventInfo *ptw_info)
+{
+ dma_addr_t baseaddr, indexmask;
+ uint32_t granule_sz, stride, level, inputsize;
+
+ granule_sz = cfg->tct_tgs;
+ stride = VMSA_STRIDE(granule_sz);
+ inputsize = 64 - cfg->tct_sz;
+ level = 4 - (inputsize - 4) / stride;
+ indexmask = VMSA_IDXMSK(inputsize, stride, level);
+ baseaddr = extract64(cfg->tct_ttba, 0, 48);
+ baseaddr &= ~indexmask;
+
+ qemu_log("stride: %u, inputsize: %u, level: %u, baseaddr: 0x%lx\n",
+ stride, inputsize, level, baseaddr);
+ while (level < VMSA_LEVELS) {
+ uint64_t subpage_size = 1ULL << level_shift(level, granule_sz);
+ uint64_t mask = subpage_size - 1;
+ uint64_t pte, gpa;
+ uint32_t offset = iova_level_offset(iova, inputsize, level, granule_sz);
+
+ if (get_pte(baseaddr, offset, &pte)) {
+ goto error;
+ }
+
+ if (is_invalid_pte(pte) || is_reserved_pte(pte, level)) {
+ qemu_log("invalid or reserved pte.\n");
+ break;
+ }
+
+ if (is_table_pte(pte, level)) {
+ baseaddr = get_table_pte_address(pte, granule_sz);
+ level++;
+ continue;
+ } else if (is_page_pte(pte, level)) {
+ gpa = get_page_pte_address(pte, granule_sz);
+ } else {
+ uint64_t block_size;
+ gpa = get_block_pte_address(pte, level, granule_sz, &block_size);
+ }
+
+ entry->translated_addr = gpa;
+ entry->iova = iova & ~mask;
+ entry->addr_mask = mask;
+
+ return;
+ }
+
+error:
+ ptw_info->type = UMMU_PTW_ERR_TRANSLATION;
+ return;
+}
+
+static void ummu_ptw(UMMUTransCfg *cfg, dma_addr_t iova, IOMMUTLBEntry *entry, UMMUPTWEventInfo *ptw_info)
+{
+ ummu_ptw_64_s1(cfg, iova, entry, ptw_info);
+}
+
+static MemTxResult eventq_write(UMMUEventQueue *q, UMMUEvent *evt_in)
+{
+ dma_addr_t base_addr, addr;
+ MemTxResult ret;
+ UMMUEvent evt = *evt_in;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(evt.word); i++) {
+ cpu_to_le32s(&evt.word[i]);
+ }
+
+ base_addr = EVENT_QUE_BASE_ADDR(&q->queue);
+ addr = base_addr + EVENT_QUE_WR_IDX(&q->queue) * q->queue.entry_size;
+ ret = dma_memory_write(&address_space_memory, addr, &evt, sizeof(UMMUEvent),
+ MEMTXATTRS_UNSPECIFIED);
+ if (ret != MEMTX_OK) {
+ return ret;
+ }
+
+ ummu_eventq_prod_incr(q);
+ qemu_log("eventq: addr(0x%lx), prod(%u), cons(%u)\n", addr,
+ EVENT_QUE_WR_IDX(&q->queue), EVENT_QUE_RD_IDX(&q->queue));
+ return MEMTX_OK;
+}
+
+static MemTxResult ummu_write_eventq(UMMUState *u, UMMUEvent *evt)
+{
+ UMMUEventQueue *queue = &u->eventq;
+ MemTxResult r;
+
+ if (!ummu_eventq_enabled(u)) {
+ return MEMTX_ERROR;
+ }
+
+ if (ummu_eventq_full(queue)) {
+ qemu_log("ummu eventq full, eventq write failed.\n");
+ return MEMTX_ERROR;
+ }
+
+ r = eventq_write(queue, evt);
+ if (r != MEMTX_OK) {
+ return r;
+ }
+
+ if (!ummu_eventq_empty(queue)) {
+ ummu_glb_usi_notify(u, UMMU_USI_VECTOR_EVETQ);
+ }
+
+ return MEMTX_OK;
+}
+
+static void ummu_record_event(UMMUState *u, UMMUEventInfo *info)
+{
+ UMMUEvent evt = {};
+ MemTxResult r;
+
+ if (!ummu_eventq_enabled(u)) {
+ qemu_log("ummu eventq disabled.\n");
+ return;
+ }
+
+ /* need set more EVT info for different event later */
+ EVT_SET_TYPE(&evt, info->type);
+ EVT_SET_TECTE_TAG(&evt, info->tecte_tag);
+ EVT_SET_TID(&evt, info->tid);
+
+ qemu_log("report event %s: tecte_tag %u tid %u\n",
+ ummu_event_type_strings[info->type], info->tecte_tag, info->tid);
+
+ r = ummu_write_eventq(u, &evt);
+ if (r != MEMTX_OK) {
+ qemu_log("ummu failed to write eventq.\n");
+ /* trigger glb err irq later */
+ }
+}
+
+static IOMMUTLBEntry ummu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
+ IOMMUAccessFlags flag, int iommu_idx)
+{
+ UMMUDevice *ummu_dev = container_of(mr, UMMUDevice, iommu);
+ UMMUTransCfg *cfg = NULL;
+ IOMMUTLBEntry entry = {
+ .target_as = &address_space_memory,
+ .iova = addr,
+ .translated_addr = addr,
+ .addr_mask = ~(hwaddr)0,
+ .perm = IOMMU_RW,
+ };
+ UMMUEventInfo event = {
+ .type = EVT_NONE
+ };
+ UMMUPTWEventInfo ptw_info = {
+ .type = UMMU_PTW_ERR_NONE
+ };
+
+ cfg = ummu_get_config(ummu_dev, &event);
+ if (!cfg) {
+ qemu_log("failed to get ummu config.\n");
+ goto epilogue;
+ }
+
+ /* need support cache TLB entry later */
+ ummu_ptw(cfg, addr, &entry, &ptw_info);
+ if (ptw_info.type == UMMU_PTW_ERR_NONE) {
+ goto epilogue;
+ }
+
+ event.tecte_tag = cfg->tecte_tag;
+ event.tid = cfg->tid;
+ switch (ptw_info.type)
+ {
+ case UMMU_PTW_ERR_TRANSLATION:
+ event.type = EVT_A_TRANSLATION;
+ break;
+ case UMMU_PTW_ERR_PERMISSION:
+ event.type = EVT_A_PERMISSION;
+ break;
+ default:
+ break;
+ }
+
+epilogue:
+ qemu_log("ummu_translate: addr(0x%lx), translated_addr(0x%lx)\n", addr, entry.translated_addr);
+
+ if (event.type != EVT_NONE) {
+ ummu_record_event(ummu_dev->ummu, &event);
+ }
+
+ return entry;
+}
+
+static int ummu_notify_flag_changed(IOMMUMemoryRegion *iommu,
+ IOMMUNotifierFlag old,
+ IOMMUNotifierFlag new,
+ Error **errp)
+{
+ qemu_log("ummu_notify_flag_changed\n");
+ return 0;
+}
+
+void ummu_dev_uninstall_nested_tecte(UMMUDevice *ummu_dev)
+{
+ HostIOMMUDeviceIOMMUFD *idev = ummu_dev->idev;
+ UMMUS1Hwpt *s1_hwpt = ummu_dev->s1_hwpt;
+ uint32_t hwpt_id;
+
+ if (!s1_hwpt || !ummu_dev->viommu) {
+ return;
+ }
+
+ hwpt_id = ummu_dev->viommu->s2_hwpt->hwpt_id;
+ if (!host_iommu_device_iommufd_attach_hwpt(idev, hwpt_id, NULL)) {
+ error_report("Unable to attach dev to stage-2 HW pagetable");
+ return;
+ }
+
+ qemu_log("uninstall s1 hwpt(%u) success\n", s1_hwpt->hwpt_id);
+ iommufd_backend_free_id(idev->iommufd, s1_hwpt->hwpt_id);
+ ummu_dev->s1_hwpt = NULL;
+ g_free(s1_hwpt);
+}
+
+int ummu_dev_install_nested_tecte(UMMUDevice *ummu_dev, uint32_t data_type,
+ uint32_t data_len, void *data)
+{
+ UMMUViommu *viommu = ummu_dev->viommu;
+ UMMUS1Hwpt *s1_hwpt = ummu_dev->s1_hwpt;
+ HostIOMMUDeviceIOMMUFD *idev = ummu_dev->idev;
+ uint64_t *tecte = (uint64_t *)data;
+
+ if (!idev || !viommu) {
+ return -ENOENT;
+ }
+
+ if (s1_hwpt) {
+ return 0;
+ }
+
+ s1_hwpt = g_new0(UMMUS1Hwpt, 1);
+ if (!s1_hwpt) {
+ return -ENOMEM;
+ }
+
+ s1_hwpt->ummu = ummu_dev->ummu;
+ s1_hwpt->viommu = viommu;
+ s1_hwpt->iommufd = idev->iommufd;
+
+ if (tecte) {
+ trace_ummu_dev_install_nested_tecte(tecte[0], tecte[1]);
+ }
+
+ if (!iommufd_backend_alloc_hwpt(idev->iommufd, idev->devid,
+ viommu->core->viommu_id, 0, data_type,
+ data_len, data, &s1_hwpt->hwpt_id, NULL, NULL)) {
+ goto free;
+ }
+
+ if (!host_iommu_device_iommufd_attach_hwpt(idev, s1_hwpt->hwpt_id, NULL)) {
+ goto free_hwpt;
+ }
+
+ ummu_dev->s1_hwpt = s1_hwpt;
+
+ return 0;
+free_hwpt:
+ iommufd_backend_free_id(idev->iommufd, s1_hwpt->hwpt_id);
+free:
+ ummu_dev->s1_hwpt = NULL;
+ g_free(s1_hwpt);
+
+ return -EINVAL;
+}
+
+static void ummu_iommu_memory_region_class_init(ObjectClass *klass, void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = ummu_translate;
+ imrc->notify_flag_changed = ummu_notify_flag_changed;
+}
+
+static const TypeInfo ummu_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_UMMU_IOMMU_MEMORY_REGION,
+ .class_init = ummu_iommu_memory_region_class_init,
+};
+
+static void ummu_base_register_types(void)
+{
+ type_register_static(&ummu_base_info);
+ type_register_static(&ummu_iommu_memory_region_info);
+}
+
+type_init(ummu_base_register_types)
diff --git a/hw/ub/ub_ummu_internal.h b/hw/ub/ub_ummu_internal.h
new file mode 100644
index 0000000000000000000000000000000000000000..806b58693fb66260cc3bde4be5cf80a7b0c7b504
--- /dev/null
+++ b/hw/ub/ub_ummu_internal.h
@@ -0,0 +1,962 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#ifndef UB_UMMU_INTERNAL_H
+#define UB_UMMU_INTERNAL_H
+#include "hw/registerfields.h"
+#include "hw/ub/ub_usi.h"
+#include "sysemu/dma.h"
+#include "sysemu/iommufd.h"
+#include
+
+/* ummu spec register define */
+REG32(CAP0, 0x0010)
+ FIELD(CAP0, DSTEID_SIZE, 0, 8)
+ FIELD(CAP0, TOKENID_SIZE, 8, 5)
+ FIELD(CAP0, ATTR_PERMS_OVR, 13, 1)
+ FIELD(CAP0, ATTR_TYPES_OVR, 14, 1)
+ FIELD(CAP0, S2_ATTR_TYPE, 15, 1)
+ FIELD(CAP0, TCT_LEVEL, 16, 1)
+ FIELD(CAP0, TECT_MODE, 17, 2)
+ FIELD(CAP0, TECT_LEVEL, 19, 1)
+
+REG32(CAP1, 0x0014)
+ FIELD(CAP1, EVENTQ_SIZE, 0, 5)
+ FIELD(CAP1, EVENTQ_NUMB, 5, 4)
+ FIELD(CAP1, EVENTQ_SUPPORT, 9, 1)
+ FIELD(CAP1, MCMDQ_SIZE, 10, 4)
+ FIELD(CAP1, MCMDQ_NUMB, 14, 4)
+ FIELD(CAP1, MCMDQ_SUPPORT, 18, 1)
+ FIELD(CAP1, EVENT_GEN, 19, 1)
+ FIELD(CAP1, STALL_MAX, 20, 12)
+
+REG32(CAP2, 0x0018)
+ FIELD(CAP2, VMID_TLBI, 0, 1)
+ FIELD(CAP2, TLB_BOARDCAST, 1, 1)
+ FIELD(CAP2, RANGE_TLBI, 2, 1)
+ FIELD(CAP2, OA_SIZE, 3, 3)
+ FIELD(CAP2, GRAN4K_T, 6, 1)
+ FIELD(CAP2, GRAN16K_T, 7, 1)
+ FIELD(CAP2, GRAN64K_T, 8, 1)
+ FIELD(CAP2, VA_EXTEND, 9, 2)
+ FIELD(CAP2, S2_TRANS, 11, 1)
+ FIELD(CAP2, S1_TRANS, 12, 1)
+ FIELD(CAP2, SMALL_TRANS, 13, 1)
+ FIELD(CAP2, TRANS_FORM, 14, 2)
+
+REG32(CAP3, 0x001C)
+ FIELD(CAP3, HIER_ATTR_DISABLE, 0, 1)
+ FIELD(CAP3, S2_EXEC_NEVER_CTRL, 1, 1)
+ FIELD(CAP3, BBM_LEVEL, 2, 2)
+ FIELD(CAP3, COHERENT_ACCESS, 4, 1)
+ FIELD(CAP3, TTENDIAN_MODE, 5, 2)
+ FIELD(CAP3, MTM_SUPPORT, 7, 1)
+ FIELD(CAP3, HTTU_SUPPORT, 8, 2)
+ FIELD(CAP3, HYP_S1CONTEXT, 10, 1)
+ FIELD(CAP3, USI_SUPPORT, 11, 1)
+ FIELD(CAP3, STALL_MODEL, 12, 2)
+ FIELD(CAP3, TERM_MODEL, 14, 1)
+ FIELD(CAP3, SATI_MAX, 15, 6)
+
+REG32(CAP4, 0x0020)
+ FIELD(CAP4, UCMDQ_UCPLQ_NUMB, 0, 8)
+ FIELD(CAP4, UCMDQ_SIZE, 8, 4)
+ FIELD(CAP4, UCPLQ_SIZE, 12, 4)
+ FIELD(CAP4, UIEQ_SIZE, 16, 4)
+ FIELD(CAP4, UIEQ_NUMB, 20, 4)
+ FIELD(CAP4, UIEQ_SUPPORT, 24, 1)
+ FIELD(CAP4, PPLB_SUPPORT, 25, 1)
+
+REG32(CAP5, 0x0024)
+ FIELD(CAP5, MAPT_SUPPORT, 0, 1)
+ FIELD(CAP5, MAPT_MODE, 1, 2)
+ FIELD(CAP5, GRAN2M_P, 3, 1)
+ FIELD(CAP5, GRAN4K_P, 4, 1)
+ FIELD(CAP5, TOKENVAL_CHK, 5, 1)
+ FIELD(CAP5, TOKENVAL_CHK_MODE, 6, 2)
+ FIELD(CAP5, RANGE_PLBI, 8, 1)
+ FIELD(CAP5, PLB_BORDCAST, 9, 1)
+
+REG32(CAP6, 0x0028)
+ FIELD(CAP6, MTM_ID_MAX, 0, 16)
+ FIELD(CAP6, MTM_GP_MAX, 16, 8)
+
+#define UMMU_CTRL0_WMASK GENMASK(5, 0)
+REG32(CTRL0, 0x0030)
+ FIELD(CTRL0, UMMU_EN, 0, 1)
+ FIELD(CTRL0, EVENTQ_EN, 1, 1)
+ FIELD(CTRL0, VMID_WILDCARD_T, 2, 3)
+ FIELD(CTRL0, MAPT_EN, 5, 1)
+
+REG32(CTRL0_ACK, 0x0034)
+ FIELD(CTRL0_ACK, UMMU_EN, 0, 1)
+ FIELD(CTRL0_ACK, EVENTQ_EN, 1, 1)
+ FIELD(CTRL0_ACK, VMID_WILDCARD_T, 2, 3)
+ FIELD(CTRL0_ACK, MAPT_EN, 5, 1)
+
+#define UMMU_CTRL1_WMASK GENMASK(15, 0)
+REG32(CTRL1, 0x0038)
+ FIELD(CTRL1, QUEUE_IC_T, 0, 2)
+ FIELD(CTRL1, QUEUE_OC_T, 2, 2)
+ FIELD(CTRL1, QUEUE_SH_T, 4, 2)
+ FIELD(CTRL1, TABLE_IC_T, 6, 2)
+ FIELD(CTRL1, TABLE_OC_T, 8, 2)
+ FIELD(CTRL1, TABLE_SH_T, 10, 2)
+ FIELD(CTRL1, E2H, 12, 1)
+ FIELD(CTRL1, BAD_DSTEID_RECORD, 13, 1)
+ FIELD(CTRL1, PRIVATE_TLB, 14, 1)
+ FIELD(CTRL1, TECT_MODE_SEL, 15, 1)
+
+#define UMMU_CTRL2_WMASK GENMASK(6, 0)
+REG32(CTRL2, 0x003C)
+ FIELD(CTRL2, PRIVATE_PLB, 6, 1)
+ FIELD(CTRL2, UIE_QUEUE_SH_P, 4, 2)
+ FIELD(CTRL2, UIE_QUEUE_OC_P, 2, 2)
+ FIELD(CTRL2, UIE_QUEUE_IC_P, 0, 2)
+
+#define UMMU_CTRL3_WMASK (GENMASK(23, 0) | GENMASK(31, 31))
+REG32(CTRL3, 0x0040)
+ FIELD(CTRL3, UPDATE_FLG, 31, 1)
+ FIELD(CTRL3, UOTR_MTM_GP, 16, 8)
+ FIELD(CTRL3, UOTR_MTM_ID, 0, 16)
+
+#define UMMU_TECT_BASE_WMASK (GENMASK_ULL(51, 6) | GENMASK_ULL(63, 63))
+REG32(TECT_BASE0, 0x0070)
+ FIELD(TECT_BASE0, TECT_BASE_ADDR0, 6, 26)
+
+REG32(TECT_BASE1, 0x0074)
+ FIELD(TECT_BASE1, TECT_BASE_ADDR1, 0, 19)
+ FIELD(TECT_BASE1, TECT_RA_CFG, 31, 1)
+
+#define UMMU_TECT_BASE_CFG_WMASK GENMASK_ULL(12, 0)
+REG32(TECT_BASE_CFG, 0x0078)
+ FIELD(TECT_BASE_CFG, TECT_LOG2SIZE, 0, 6)
+ FIELD(TECT_BASE_CFG, TECT_SPLIT, 6, 5)
+ FIELD(TECT_BASE_CFG, TECT_FMT, 11, 2)
+
+#define UMMU_MCMDQ_BASE_WMASK (GENMASK_ULL(51, 0) | GENMASK_ULL(63, 63))
+#define UMMU_MCMDQ_PI_WMASK (GENMASK(19, 0) | GENMASK(23, 23) | GENMASK(31, 31))
+#define UMMU_MCMDQ_CI_WMASK (GENMASK(19, 0) | GENMASK(26, 23) | GENMASK(31, 31))
+#define A_MCMD_QUE_BASE 0x0100
+#define A_MCMD_QUE_LASTEST_CI 0x10FC
+
+#define UMMU_EVENTQ_BASE_WMASK (GENMASK_ULL(4, 0) | GENMASK_ULL(51, 6) | GENMASK_ULL(63, 63))
+REG32(EVENT_QUE_BASE0, 0x1100)
+ FIELD(EVENT_QUE_BASE0, EVENT_QUE_LOG2SIZE, 0, 5)
+ FIELD(EVENT_QUE_BASE0, EVENT_QUE_ADDR0, 6, 26)
+
+REG32(EVENT_QUE_BASE1, 0x1104)
+ FIELD(EVENT_QUE_BASE1, EVENT_QUE_ADDR1, 0, 20)
+ FIELD(EVENT_QUE_BASE1, EVENT_QUE_WA_CFG, 31, 1)
+
+#define UMMU_EVENTQ_PI_WMASK (GENMASK(19, 0) | GENMASK(31, 31))
+REG32(EVENT_QUE_PI, 0x1108)
+ FIELD(EVENT_QUE_PI, EVENT_QUE_WR_IDX, 0, 19)
+ FIELD(EVENT_QUE_PI, EVENT_QUE_WR_WRAP, 19, 1)
+ FIELD(EVENT_QUE_PI, EVENT_QUE_OVFLG, 31, 1)
+
+#define UMMU_EVENTQ_CI_WMASK (GENMASK(19, 0) | GENMASK(31, 31))
+REG32(EVENT_QUE_CI, 0x110C)
+ FIELD(EVENT_QUE_CI, EVENT_QUE_RD_IDX, 0, 19)
+ FIELD(EVENT_QUE_CI, EVENT_QUE_RD_WRAP, 19, 1)
+ FIELD(EVENT_QUE_CI, EVENT_QUE_OVFLG_RESP, 31, 1)
+
+#define UMMU_EVENTQ_USI_ADDR_WMASK GENMASK_ULL(51, 2)
+REG32(EVENT_QUE_USI_ADDR0, 0x1110)
+ FIELD(EVENT_QUE_USI_ADDR0, USI_ADDR0, 2, 30)
+
+REG32(EVENT_QUE_USI_ADDR1, 0x1114)
+ FIELD(EVENT_QUE_USI_ADDR1, USI_ADDR1, 0, 20)
+
+#define UMMU_EVENT_QUE_USI_DATA_WMASK GENMASK(31, 0)
+REG32(EVENT_QUE_USI_DATA, 0x1118)
+ FIELD(EVENT_QUE_USI_DATA, USI_DATA, 0, 32)
+
+#define UMMU_EVENTQ_USI_ATTR_WMASK GENMASK(5, 0)
+REG32(EVENT_QUE_USI_ATTR, 0x111C)
+ FIELD(EVENT_QUE_USI_ATTR, USI_MEM_ATTR_CFG, 0, 4)
+ FIELD(EVENT_QUE_USI_ATTR, USI_SH_CFG, 4, 2)
+
+REG32(GLB_INT_EN, 0x1130)
+ FIELD(GLB_INT_EN, GLB_ERR_INT_EN, 0, 1)
+ FIELD(GLB_INT_EN, EVENT_QUE_INT_EN, 1, 1)
+
+REG32(GLB_ERR, 0x1134)
+ FIELD(GLB_ERR, MCMD_QUE_ERR, 0, 1)
+ FIELD(GLB_ERR, EVENT_QUE_ABT_ERR, 1, 1)
+ FIELD(GLB_ERR, USI_MCMD_QUE_ABT_ERR, 2, 1)
+ FIELD(GLB_ERR, USI_EVENT_QUE_ABT_ERR, 3, 1)
+ FIELD(GLB_ERR, USI_UIEQ_QUE_ABT_ERR, 4, 1)
+ FIELD(GLB_ERR, USI_GLB_ERR_ABT_ERR, 7, 1)
+
+#define UMMU_GLB_ERR_RESP_WMASK GENMASK(4, 0) | GENMASK(7, 7)
+REG32(GLB_ERR_RESP, 0x1138)
+ FIELD(GLB_ERR_RESP, MCMDQ_QUE_ERR, 0, 1)
+ FIELD(GLB_ERR_RESP, EVENT_QUE_ABT_ERR, 1, 1)
+ FIELD(GLB_ERR_RESP, USI_MCMDQ_QUE_ABT_ERR, 2, 1)
+ FIELD(GLB_ERR_RESP, USI_EVENT_QUE_ABT_ERR, 3, 1)
+ FIELD(GLB_ERR_RESP, USI_UIEQ_QUE_ABT_ERR, 4, 1)
+ FIELD(GLB_ERR_RESP, USI_GLB_ERR_ABT_ERR, 7, 1)
+
+#define UMMU_GLB_ERR_INT_USI_ADDR_WMASK GENMASK_ULL(51, 2)
+REG32(GLB_ERR_INT_USI_ADDR0, 0x1140)
+ FIELD(GLB_ERR_INT_USI_ADDR0, USI_ADDR0, 2, 29)
+
+REG32(GLB_ERR_INT_USI_ADDR1, 0x1144)
+ FIELD(GLB_ERR_INT_USI_ADDR1, USI_ADDR1, 0, 19)
+
+#define UMMU_GLB_ERR_INT_USI_DATA_WMASK GENMASK(31, 0)
+REG32(GLB_ERR_INT_USI_DATA, 0x1148)
+ FIELD(GLB_ERR_INT_USI_DATA, USI_DATA, 0, 32)
+
+#define UMMU_GLB_ERR_INT_USI_ATTR_WMASK GENMASK(5, 0)
+REG32(GLB_ERR_INT_USI_ATTR, 0x114C)
+ FIELD(GLB_ERR_INT_USI_ATTR, USI_MEM_ATTR_CFG, 0, 4)
+ FIELD(GLB_ERR_INT_USI_ATTR, USI_SH_CFG, 4, 2)
+
+#define MAPT_CMDQ_CTXT_BADDR_WMASK (((GENMASK_ULL(31, 31) | GENMASK_ULL(19, 0)) << 32) | \
+ (GENMASK_ULL(4, 0) | GENMASK_ULL(31, 6)))
+REG32(MAPT_CMDQ_CTXT_BADDR0, 0x1160)
+ FIELD(MAPT_CMDQ_CTXT_BADDR0, MAPT_CMDQ_CTXT_LOG2SIZE, 0, 5)
+ FIELD(MAPT_CMDQ_CTXT_BADDR0, MAPT_CMDQ_CTXT_ADDR0, 6, 26)
+
+REG32(MAPT_CMDQ_CTXT_BADDR1, 0x1164)
+ FIELD(MAPT_CMDQ_CTXT_BADDR1, MAPT_CMDQ_CTXT_ADDR1, 0, 20)
+ FIELD(MAPT_CMDQ_CTXT_BADDR1, MAPT_CMDQ_CTXT_RA_CFG, 31, 1)
+
+#define RELEASE_UM_QUEUE_WMASK 0x1
+REG32(RELEASE_UM_QUEUE, 0x1178)
+ FIELD(RELEASE_UM_QUEUE, MAPT_RLSE_UM_CMDQ, 0, 1)
+
+#define RELEASE_UM_QUEUE_ID_WMASK GENMASK(30, 0)
+REG32(RELEASE_UM_QUEUE_ID, 0x117C)
+ FIELD(RELEASE_UM_QUEUE_ID, MAPT_RLSE_UM_CMDQ_ID, 0, 31)
+
+#define A_UCMDQ_PI_START_REG 0x20000
+/* MAPT Commd queue control page 4k: 0x2000C + 2^16 * 0x1000
+ * MAPT Commd queue control page 64k: 0x2000C + 2^12 * 0x10000 */
+#define A_UCPLQ_CI_END_REG 0x1002000C
+
+/* ummu user register define */
+REG32(UMMU_INT_MASK, 0x3404)
+ FIELD(UMMU_INT_MASK, UIEQ_USI_MASK, 0, 1)
+ FIELD(UMMU_INT_MASK, UBIF_USI_MASK, 1, 1)
+
+REG32(DSTEID_KV_TABLE_BASE0, 0x3800)
+ FIELD(DSTEID_KV_TABLE_BASE0, DSTEID_TV_TABLE_BASE_ADDR0, 5, 27)
+
+REG32(DSTEID_KV_TABLE_BASE1, 0x3804)
+ FIELD(DSTEID_KV_TABLE_BASE1, DSTEID_TV_TABLE_BASE_ADDR1, 0, 20)
+
+REG32(DSTEID_KV_TABLE_BASE_CFG, 0x3808)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_MEMATTR, 0, 4)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_SH, 4, 2)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_BANK_NUM, 8, 8)
+ FIELD(DSTEID_KV_TABLE_BASE_CFG, DSTEID_KV_TABLE_DEPTH, 16, 16)
+
+REG32(UMMU_DSTEID_KV_TABLE_HASH_CFG0, 0x380C)
+ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG0, DSTEID_KV_TABLE_HASH_SEL, 0, 4)
+ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG0, DSTEID_KV_TABLE_HASH_WIDTH, 4, 4)
+
+REG32(UMMU_DSTEID_KV_TABLE_HASH_CFG1, 0x3810)
+ FIELD(UMMU_DSTEID_KV_TABLE_HASH_CFG1, DSTEID_KV_TABLE_HASH_CRC32_SEED, 0, 32)
+
+REG32(UMMU_DSTEID_CAM_TABLE_BASE0, 0x3820)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE0, DSTEID_CAM_TABLE_BASE_ADDR0, 5, 27)
+
+REG32(UMMU_DSTEID_CAM_TABLE_BASE1, 0x3824)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE1, DSTEID_CAM_TABLE_BASE_ADDR1, 0, 20)
+
+REG32(UMMU_DSTEID_CAM_TABLE_BASE_CFG, 0x3828)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_MEMATTR, 0, 4)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_SH, 4, 2)
+ FIELD(UMMU_DSTEID_CAM_TABLE_BASE_CFG, DSTEID_CAM_TABLE_DEPTH, 16, 32)
+
+#define MAPT_CMDQ_CTRLR_PAGE_SIZE_4K 1
+#define MAPT_CMDQ_CTRLR_PAGE_SIZE_64K 0
+#define UMCMD_PAGE_SEL_WMASK 0x1
+REG32(UMCMD_PAGE_SEL, 0x3834)
+ FIELD(UMCMD_PAGE_SEL, PAGE_MODEL_SEL_EN, 0, 1)
+
+
+/* ummu user logic register define */
+REG32(UMMU_USER_CONFIG0, 0x4C00)
+
+REG32(UMMU_USER_CONFIG1, 0x4C04)
+
+REG32(UMMU_USER_CONFIG2, 0x4C08)
+ FIELD(UMMU_USER_CONFIG2, INV_TLB_ALL_NS, 0, 1)
+ FIELD(UMMU_USER_CONFIG2, TBU_L2_MEM_INIT_EN, 1, 1)
+ FIELD(UMMU_USER_CONFIG2, TBU_L2_MEM_INITING, 2, 1)
+ FIELD(UMMU_USER_CONFIG2, MCMDQ_MEM_INIT_EN, 3, 1)
+ FIELD(UMMU_USER_CONFIG2, MCMDQ_MEM_INITING, 4, 1)
+
+REG32(UMMU_USER_CONFIG3, 0x4C0C)
+
+REG32(UMMU_USER_CONFIG4, 0x4C10)
+
+REG32(UMMU_USER_CONFIG5, 0x4C14)
+
+REG32(UMMU_USER_CONFIG6, 0x4C18)
+
+REG32(UMMU_USER_CONFIG7, 0x4C1C)
+
+REG32(UMMU_USER_CONFIG8, 0x4C20)
+
+REG32(UMMU_USER_CONFIG9, 0x4C24)
+
+REG32(UMMU_USER_CONFIG10, 0x4C28)
+
+REG32(UMMU_USER_CONFIG11, 0x4C2C)
+
+REG32(UMMU_MEM_USI_ADDR0, 0x4D90)
+ FIELD(UMMU_MEM_USI_ADDR0, UBIF_MEM_USI_ADDR0, 2, 30)
+
+REG32(UMMU_MEM_USI_ADDR1, 0x4D94)
+ FIELD(UMMU_MEM_USI_ADDR1, UBIF_MEM_USI_ADDR1, 0, 20)
+
+REG32(UMMU_MEM_USI_DATA, 0x4D98)
+ FIELD(UMMU_MEM_USI_DATA, UBIF_MEM_USI_DATA, 0, 32)
+
+REG32(UMMU_MEM_USI_ATTR, 0x4D9C)
+ FIELD(UMMU_MEM_USI_ATTR, UBIF_MEM_USI_MEM_ATTR_CFG, 0, 4)
+ FIELD(UMMU_MEM_USI_ATTR, UBIF_MEM_USI_SH_CFG, 4, 2)
+
+#define TYPE_UMMU_IOMMU_MEMORY_REGION "ummu-iommu-memory-region"
+
+#define CMD_TYPE(x) extract32((x)->word[0], 0, 8)
+#define CMD_SYNC_CM(x) extract32((x)->word[0], 12, 2)
+#define CMD_SYNC_CM_NONE 0x0
+#define CMD_SYNC_CM_USI 0x1
+#define CMD_SYNC_CM_SEV 0x2
+#define CMD_SYNC_USI_SH(x) extract32((x)->word[0], 14, 2)
+#define CMD_SYNC_USI_ATTR(x) extract32((x)->word[0], 16, 4)
+#define CMD_SYNC_USI_DATA(x) extract32((x)->word[1], 0, 32)
+#define CMD_SYNC_USI_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 2))
+#define CMD_CREATE_KVTBL_DEST_EID(x) extract32((x)->word[4], 0, 32)
+#define CMD_CREATE_KVTBL_BASE_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 6))
+#define CMD_CREATE_KVTBL_TECTE_TAG(x) extract32((x)->word[0], 16, 16)
+#define CMD_DELETE_KVTBL_DEST_EID(x) extract32((x)->word[4], 0, 32)
+#define CMD_DELETE_KVTBL_TECTE_TAG(x) extract32((x)->word[0], 16, 16)
+#define CMD_TECTE_TAG(x) extract32((x)->word[4], 0, 16)
+#define CMD_TECTE_RANGE(x) extract32((x)->word[1], 20, 5)
+/* according to UB SPEC, if range val is 31, invalid all tecte */
+#define CMD_TECTE_RANGE_INVILID_ALL(x) ((x) == 31)
+#define CMD_NULL_SUBOP_CHECK_PA_CONTINUITY 1
+#define CMD_NULL_SUBOP(x) extract32((x)->word[0], 8, 8)
+#define CMD_NULL_CHECK_PA_CONTI_SIZE(x) (1 << extract32((x)->word[0], 24, 6))
+#define CMD_NULL_CHECK_PA_CONTI_ADDR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(47, 12))
+#define UMMU_RUN_IN_VM_FLAG 0x10
+#define PA_CONTINUITY 0x00
+#define PA_NOT_CONTINUITY 0x01
+
+#define MCMDQ_BASE_ADDR_MASK ~0xf0UL
+#define MCMDQ_IDX_MASK 0xf0
+#define MCMDQ_PROD_WMASK 0x808fffff
+#define MCMDQ_CONS_WMASK 0x878fffff
+#define MCMDQ_PROD_BASE_ADDR 0x108
+#define MCMDQ_CONS_BASE_ADDR 0x10C
+#define MCMD_QUE_LOG2SIZE(x) extract32(x, 0, 5)
+#define MCMD_QUE_BASE_ADDR(que) ((que)->base & GENMASK_ULL(51, 5))
+#define MCMD_QUE_RD_IDX(que) (extract32((que)->cons, 0, 19) & ((1 << (que)->log2size) - 1))
+#define MCMD_QUE_WD_IDX(que) (extract32((que)->prod, 0, 19) & ((1 << (que)->log2size) - 1))
+#define MCMD_QUE_RD_WRAP(que) extract32((que)->cons, (que)->log2size, 1)
+#define MCMD_QUE_WD_WRAP(que) extract32((que)->prod, (que)->log2size, 1)
+#define MCMD_QUE_EN_BIT(que) extract32((que)->prod, 31, 1)
+#define MCMD_QUE_EN_RESP_BIT 31
+
+#define EVENT_QUE_LOG2SIZE(x) extract32(x, 0, 5)
+#define EVENT_QUE_BASE_ADDR(que) ((que)->base & GENMASK_ULL(51, 6))
+#define EVENT_QUE_RD_IDX(que) (extract32((que)->cons, 0, 19) & ((1 << (que)->log2size) - 1))
+#define EVENT_QUE_WR_IDX(que) (extract32((que)->prod, 0, 19) & ((1 << (que)->log2size) - 1))
+#define EVENT_QUE_RD_WRAP(que) extract32((que)->cons, (que)->log2size, 1)
+#define EVENT_QUE_WR_WRAP(que) extract32((que)->prod, (que)->log2size, 1)
+
+#define TECT_BASE_ADDR(x) ((x) & GENMASK_ULL(51, 6))
+#define TECT_L2TECTE_PTR(x) ((*(uint64_t *)&(x)->word[0]) & GENMASK_ULL(51, 6))
+#define TECT_DESC_V(x) extract32((x)->word[0], 0, 1)
+#define TECTE_TCT_PTR(x) ((*(uint64_t *)&(x)->word[2]) & GENMASK_ULL(51, 6))
+#define TECTE_TCT_NUM(x) extract32((x)->word[2], 0, 5)
+#define TECTE_TCT_FMT(x) extract32((x)->word[3], 20, 2)
+#define TECTE_VALID(x) extract32((x)->word[0], 0, 1)
+#define TECTE_ST_MODE(x) extract32((x)->word[0], 1, 3)
+#define TECTE_ST_MODE_ABORT 0
+#define TECTE_ST_MODE_BYPASS 4
+#define TECTE_ST_MODE_S1 5
+#define TECTE_ST_MODE_S2 6
+#define TECTE_ST_MODE_NESTED 7
+
+#define TCT_FMT_LINEAR 0
+#define TCT_FMT_LVL2_4K 1
+#define TCT_FMT_LVL2_64K 2
+#define TCT_SPLIT_64K 10
+#define TCT_L2_ENTRIES (1UL << TCT_SPLIT_64K)
+#define TCT_L1TCTE_V(x) extract32((x)->word[0], 0, 1)
+#define TCT_L2TCTE_PTR(x) ((*(uint64_t *)&(x)->word[0]) & GENMASK_ULL(51, 12))
+#define TCTE_TTBA(x) ((*(uint64_t *)&(x)->word[4]) & GENMASK_ULL(51, 4))
+#define TCTE_TCT_V(x) extract32((x)->word[0], 0, 1)
+#define TCTE_SZ(x) extract32((x)->word[2], 0, 6)
+#define TCTE_TGS(x) extract32((x)->word[2], 6, 2)
+/* according ub spec Chapter 9, tct max num is 2 ^ tct_num */
+#define TCTE_MAX_NUM(x) (1 << (x))
+
+#define MAPT_CMDQ_CTXT_BASE_BYTES 64
+#define MAPT_CMDQ_CTXT_BASE_ADDR(x) ((x) & GENMASK_ULL(51, 6))
+#define UCMDQ_UCPLQ_CI_PI_MASK 0xFULL
+#define UCMDQ_PI 0x00
+#define UCMDQ_CI 0x04
+#define UCPLQ_PI 0x08
+#define UCPLQ_CI 0x0C
+#define MAPT_4K_CMDQ_CTXT_QID(offset) ((((offset) & (~0xFULL)) - A_UCMDQ_PI_START_REG) / 0x1000)
+#define MAPT_64K_CMDQ_CTXT_QID(offset) ((((offset) & (~0xFULL)) - A_UCMDQ_PI_START_REG) / 0x10000)
+#define MAPT_UCMDQ_LOG2SIZE(base) extract32((base)->word[0], 2, 4)
+#define MAPT_UCMDQ_PI(base) (extract32((base)->word[10], 0, 16) & \
+ ((1 << MAPT_UCMDQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCMDQ_PI_WRAP(base) extract32((base)->word[10], MAPT_UCMDQ_LOG2SIZE(base), 1)
+#define MAPT_UCMDQ_CI(base) (extract32((base)->word[10], 16, 16) & \
+ ((1 << MAPT_UCMDQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCMDQ_CI_WRAP(base) extract32((base)->word[10], 16 + MAPT_UCMDQ_LOG2SIZE(base), 1)
+#define MAPT_UCMDQ_BASE_ADDR(base) ((*(uint64_t *)&(base)->word[0]) & GENMASK_ULL(51, 12))
+
+#define MAPT_UCMD_TYPE_PSYNC 0x01
+#define MAPT_UCMD_TYPE_PLBI_USR_ALL 0x10
+#define MAPT_UCMD_TYPE_PLBI_USR_VA 0x11
+#define MAPT_UCMD_TYPE(cmd) ((cmd)->word[0] & GENMASK(7, 0))
+
+#define MAPT_UCPLQ_LOG2SIZE(base) extract32((base)->word[2], 2, 4)
+#define MAPT_UCPLQ_PI(base) (extract32((base)->word[11], 0, 16) & \
+ ((1 << MAPT_UCPLQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCPLQ_PI_WRAP(base) extract32((base)->word[11], MAPT_UCPLQ_LOG2SIZE(base), 1)
+#define MAPT_UCPLQ_CI(base) (extract32((base)->word[11], 16, 16) & \
+ ((1 << MAPT_UCPLQ_LOG2SIZE(base)) - 1))
+#define MAPT_UCPLQ_CI_WRAP(base) extract32((base)->word[11], 16 + MAPT_UCPLQ_LOG2SIZE(base), 1)
+#define MAPT_UCPLQ_BASE_ADDR(base) ((*(uint64_t *)&(base)->word[2]) & GENMASK_ULL(51, 12))
+#define MAPT_UCPL_STATUS_INVALID 0x0
+#define MAPT_UCPL_STATUS_PSYNC_SUCCESS 0x1
+#define MAPT_UCPL_STATUS_TYPE_ERROR 0x2
+#define MAPT_UCPL_STATUS_PROCESS_ERROR 0x3
+
+typedef struct UMMUMcmdqCmd {
+ uint32_t word[8];
+} UMMUMcmdqCmd;
+
+typedef struct UMMUEvent {
+ uint32_t word[16];
+} UMMUEvent;
+
+typedef enum UmmuMcmdqCmdType {
+ CMD_SYNC = 0x1,
+ CMD_STALL_RESUME = 0x02,
+ CMD_PREFET_CFG = 0x04,
+ CMD_CFGI_TECT = 0x08,
+ CMD_CFGI_TECT_RANGE = 0x09,
+ CMD_CFGI_TCT = 0x0A,
+ CMD_CFGI_TCT_ALL = 0x0B,
+ CMD_CFGI_VMS_PIDM = 0x0C,
+ CMD_PLBI_OS_EID = 0x14,
+ CMD_PLBI_OS_EIDTID = 0x15,
+ CMD_PLBI_OS_VA = 0x16,
+ CMD_TLBI_OS_ALL = 0x10,
+ CMD_TLBI_OS_TID = 0x11,
+ CMD_TLBI_OS_VA = 0x12,
+ CMD_TLBI_OS_VAA = 0x13,
+ CMD_TLBI_HYP_ALL = 0x18,
+ CMD_TLBI_HYP_TID = 0x19,
+ CMD_TLBI_HYP_VA = 0x1A,
+ CMD_TLBI_HYP_VAA = 0x1B,
+ CMD_TLBI_S1S2_VMALL = 0x28,
+ CMD_TLBI_S2_IPA = 0x2a,
+ CMD_TLBI_NS_OS_ALL = 0x2C,
+ CMD_RESUME = 0x44,
+ CMD_CREATE_KVTBL = 0x60,
+ CMD_DELETE_KVTBL = 0x61,
+ CMD_NULL = 0x62,
+ CMD_TLBI_OS_ALL_U = 0x90,
+ CMD_TLBI_OS_ASID_U = 0x91,
+ CMD_TLBI_OS_VA_U = 0x92,
+ CMD_TLBI_OS_VAA_U = 0x93,
+ CMD_TLBI_HYP_ASID_U = 0x99,
+ CMD_TLBI_HYP_VA_U = 0x9A,
+ CMD_TLBI_S1S2_VMALL_U = 0xA8,
+ CMD_TLBI_S2_IPA_U = 0xAa,
+ MCMDQ_CMD_MAX,
+} UmmuMcmdqCmdType;
+
+typedef struct UMMUS2Hwpt {
+ IOMMUFDBackend *iommufd;
+ uint32_t hwpt_id;
+ uint32_t ioas_id;
+} UMMUS2Hwpt;
+
+typedef struct UMMUViommu {
+ UMMUState *ummu;
+ IOMMUFDBackend *iommufd;
+ IOMMUFDViommu *core;
+ UMMUS2Hwpt *s2_hwpt;
+ QLIST_HEAD(, UMMUDevice) device_list;
+ QLIST_ENTRY(UMMUViommu) next;
+} UMMUViommu;
+
+typedef struct UMMUS1Hwpt {
+ void *ummu;
+ IOMMUFDBackend *iommufd;
+ UMMUViommu *viommu;
+ uint32_t hwpt_id;
+ QLIST_HEAD(, UMMUDevice) device_list;
+ QLIST_ENTRY(UMMUViommu) next;
+} UMMUS1Hwpt;
+
+typedef struct UMMUVdev {
+ UMMUViommu *vummu;
+ IOMMUFDVdev *core;
+ uint32_t sid;
+} UMMUVdev;
+
+typedef struct UMMUDevice {
+ UMMUState *ummu;
+ IOMMUMemoryRegion iommu;
+ AddressSpace as;
+ AddressSpace as_sysmem;
+ HostIOMMUDeviceIOMMUFD *idev;
+ UMMUViommu *viommu;
+ UMMUS1Hwpt *s1_hwpt;
+ UBDevice *udev;
+ UMMUVdev *vdev;
+ QLIST_ENTRY(UMMUDevice) next;
+} UMMUDevice;
+
+typedef struct UMMUTransCfg {
+ dma_addr_t tct_ptr;
+ uint64_t tct_num;
+ uint64_t tct_fmt;
+ dma_addr_t tct_ttba;
+ uint32_t tct_sz;
+ uint32_t tct_tgs;
+ uint32_t tecte_tag;
+ uint32_t tid;
+ /* TODO */
+} UMMUTransCfg;
+
+typedef enum UMMUEventType {
+ EVT_NONE = 0,
+ /* unsupport translation type */
+ EVT_UT,
+ /* dstEid overflow */
+ EVT_BAD_DSTEID,
+ /* abort when visit tect, or addr overflow */
+ EVT_TECT_FETCH,
+ /* TECT not valid, (V=0) */
+ EVT_BAD_TECT,
+ /* tect ent lack tokenid */
+ EVT_RESERVE_0 = 5,
+ /* reserved, no content */
+ EVT_BAD_TOKENID,
+ /* 1. TECT.TCT_MAXNUM = 0, tokenid disable,
+ * 2. TECT.ST_MODE[0] = 0, stage 1 translation close.
+ * 3. tokenid > TECT.TCT_MAXNUM
+ * 4. lvl1 tct invalid in two-level tct
+ */
+ EVT_TCT_FETCH,
+ /* invalid tct */
+ EVT_BAD_TCT,
+ /* error when Address Table walk */
+ EVT_A_PTW_EABT,
+ /* translation input bigger than max valid value,
+ * or no valid translation table descriptor
+ */
+ EVT_A_TRANSLATION = 10,
+ /* address translation out put bigger than max valid value */
+ EVT_A_ADDR_SIZE,
+ /* Access flag fault because of AF=0 */
+ EVT_ACCESS,
+ /* address translation permission error */
+ EVT_A_PERMISSION,
+ /* TLB or PLB conflicted in translation */
+ EVT_TBU_CONFLICT,
+ /* config cache conflicted in translation */
+ EVT_CFG_CONFLICT = 15,
+ /* error occured when getting VMS */
+ EVT_VMS_FETCH,
+ /* error when Permission Table walk */
+ EVT_P_PTW_EABT,
+ /* abnormal software configuration in PTW */
+ EVT_P_CFG_ERROR,
+ /* permission exception in PTW process */
+ EVT_P_PERMISSION,
+ /* E-Bit verification failed */
+ EVT_RESERVE_1 = 20,
+ /* reserved, no content */
+ EVT_EBIT_DENY,
+ /* the UMMU hardware reports the execution result
+ * of the CMD_CREAT_DSTEID_TECT_RELATION command
+ * to the software.
+ */
+ EVT_CREATE_DSTEID_TECT_RELATION_RESULT = 60,
+ /* the UMMU hardware reports the execution result
+ * of the CMD_DELETE_DSTEID_TECT_RELATION command
+ * to the software.
+ */
+ EVT_DELETE_DSTEID_TECT_RELATION_RESULT,
+ EVT_MAX
+} UMMUEventType;
+
+typedef struct UMMUEventInfo {
+ UMMUEventType type;
+ uint32_t tecte_tag;
+ uint32_t tid;
+ union {
+ struct {
+ bool stall;
+ } f_translation;
+ } u;
+ /* TODO */
+} UMMUEventInfo;
+
+typedef enum {
+ UMMU_PTW_ERR_NONE,
+ UMMU_PTW_ERR_TRANSLATION,
+ UMMU_PTW_ERR_PERMISSION
+} UMMUPTWEventType;
+
+typedef struct UMMUPTWEventInfo {
+ UMMUPTWEventType type;
+} UMMUPTWEventInfo;
+
+#define EVT_SET_TYPE(x, v) ((x)->word[0] = deposit32((x)->word[0], 0, 8, v))
+#define EVT_SET_TECTE_TAG(x, v) ((x)->word[8] = deposit32((x)->word[8], 0, 16, v))
+#define EVT_SET_TID(x, v) ((x)->word[1] = deposit32((x)->word[1], 0, 20, v))
+
+/* TECTE Level 1 Description */
+typedef struct TECTEDesc {
+ uint32_t word[2];
+} TECTEDesc;
+
+/* TCTE Level1 Description */
+typedef struct TCTEDesc {
+ uint32_t word[2];
+} TCTEDesc;
+
+/* Target Entity Config Table Entry (TECTE) */
+typedef struct TECTE {
+ uint32_t word[16];
+} TECTE;
+
+/* Target Contex Table Entry (TCTE) */
+typedef struct TCTE {
+ uint32_t word[16];
+} TCTE;
+
+typedef struct MAPTCmdqBase {
+ uint32_t word[16];
+} MAPTCmdqBase;
+
+typedef struct MAPTCmd {
+ uint32_t word[4];
+} MAPTCmd;
+
+typedef struct MAPTCmdCpl {
+ uint32_t cpl_status : 4;
+ uint32_t rsv : 12;
+ uint32_t cmdq_ci : 16;
+} MAPTCmdCpl;
+
+typedef struct UMMUTecteRange {
+ bool invalid_all;
+ uint32_t start;
+ uint32_t end;
+} UMMUTecteRange;
+
+static inline void update_reg32_by_wmask(uint32_t *old, uint32_t new, uint32_t wmask)
+{
+ *old = (*old & ~wmask) | (new & wmask);
+}
+
+static inline void update_reg64_by_wmask(uint64_t *old, uint64_t new, uint64_t wmask)
+{
+ *old = (*old & ~wmask) | (new & wmask);
+}
+
+static inline bool ummu_mcmdq_enabled(UMMUMcmdQueue *mcmdq)
+{
+ return MCMD_QUE_EN_BIT(&mcmdq->queue);
+}
+
+static inline void ummu_mcmdq_enable_resp(UMMUMcmdQueue *mcmdq)
+{
+ mcmdq->queue.cons |= GENMASK(MCMD_QUE_EN_RESP_BIT, MCMD_QUE_EN_RESP_BIT);
+}
+
+static inline void ummu_mcmdq_disable_resp(UMMUMcmdQueue *mcmdq)
+{
+ mcmdq->queue.cons &= ~(GENMASK(MCMD_QUE_EN_RESP_BIT, MCMD_QUE_EN_RESP_BIT));
+}
+
+static inline bool ummu_mcmdq_empty(UMMUMcmdQueue *mcmdq)
+{
+ UMMUQueue *q = &mcmdq->queue;
+
+ return MCMD_QUE_WD_IDX(q) == MCMD_QUE_RD_IDX(q) &&
+ MCMD_QUE_WD_WRAP(q) == MCMD_QUE_RD_WRAP(q);
+}
+
+static inline void ummu_mcmdq_cons_incr(UMMUMcmdQueue *mcmdq)
+{
+ mcmdq->queue.cons =
+ deposit32(mcmdq->queue.cons, 0, mcmdq->queue.log2size + 1, mcmdq->queue.cons + 1);
+}
+
+static inline void ummu_set_event_que_int_en(UMMUState *u, uint64_t data)
+{
+ u->eventq.event_que_int_en = FIELD_EX32(data, GLB_INT_EN, EVENT_QUE_INT_EN);
+}
+
+static inline void ummu_set_glb_err_int_en(UMMUState *u, uint64_t data)
+{
+ u->glb_err.glb_err_int_en = FIELD_EX32(data, GLB_INT_EN, GLB_ERR_INT_EN);
+}
+
+static inline bool ummu_event_que_int_en(UMMUState *u)
+{
+ return u->eventq.event_que_int_en;
+}
+
+static inline bool ummu_glb_err_int_en(UMMUState *u)
+{
+ return u->glb_err.glb_err_int_en;
+}
+
+static inline USIMessage ummu_get_eventq_usi_message(UMMUState *u)
+{
+ USIMessage msg;
+
+ msg.address = u->eventq.usi_addr;
+ msg.data = u->eventq.usi_data;
+
+ return msg;
+}
+
+static inline USIMessage ummu_get_gerror_usi_message(UMMUState *u)
+{
+ USIMessage msg;
+
+ msg.address = u->glb_err.usi_addr;
+ msg.data = u->glb_err.usi_data;
+
+ return msg;
+}
+
+#define UMMU_TECT_MODE_SPARSE_TABLE 0x1
+static inline uint32_t ummu_tect_mode_sparse_table(UMMUState *u)
+{
+ return FIELD_EX32(u->ctrl[1], CTRL1, TECT_MODE_SEL) & UMMU_TECT_MODE_SPARSE_TABLE;
+}
+
+#define UMMU_FEAT_2_LVL_TECT 0x1
+static inline uint32_t ummu_tect_fmt_2level(UMMUState *u)
+{
+ return FIELD_EX32(u->tect_base_cfg, TECT_BASE_CFG, TECT_FMT) & UMMU_FEAT_2_LVL_TECT;
+}
+
+static inline uint32_t ummu_tect_split(UMMUState *u)
+{
+ return FIELD_EX32(u->tect_base_cfg, TECT_BASE_CFG, TECT_SPLIT);
+}
+
+static inline int tgs2granule(int bits)
+{
+ switch (bits) {
+ case 0:
+ /* Translation Granule Size: 2 ^ 12 = 4K */
+ return 12;
+ case 1:
+ /* Translation Granule Size: 2 ^ 16 = 64K */
+ return 16;
+ case 2:
+ /* Translation Granule Size: 2 ^ 14 = 16K */
+ return 14;
+ default:
+ return 0;
+ }
+}
+
+static inline bool ummu_eventq_enabled(UMMUState *u)
+{
+ return !!FIELD_EX32(u->ctrl[0], CTRL0, EVENTQ_EN);
+}
+
+static inline bool ummu_eventq_full(UMMUEventQueue *eventq)
+{
+ UMMUQueue *q = &eventq->queue;
+
+ return EVENT_QUE_WR_IDX(q) == EVENT_QUE_RD_IDX(q) &&
+ EVENT_QUE_WR_WRAP(q) != EVENT_QUE_RD_WRAP(q);
+}
+
+static inline bool ummu_eventq_empty(UMMUEventQueue *eventq)
+{
+ UMMUQueue *q = &eventq->queue;
+
+ return EVENT_QUE_WR_IDX(q) == EVENT_QUE_RD_IDX(q) &&
+ EVENT_QUE_WR_WRAP(q) == EVENT_QUE_RD_WRAP(q);
+}
+
+static inline void ummu_eventq_prod_incr(UMMUEventQueue *eventq)
+{
+ UMMUQueue *q = &eventq->queue;
+
+ /* qlog2size + 1: add 1 which is consider for queue wrap bit.
+ * when cons == prod, the queue may full or empty, according warp bit
+ * to detemin full or emtpy. if cons.wrap == prod.wrap, the queue empty,
+ * if cons.wrap != prod.wrap, the queue full.
+ * */
+ q->prod = deposit32(q->prod, 0, q->log2size + 1, q->prod + 1);
+}
+
+/*
+ * MAPT Cmd Queue Base Struct
+ * ┌──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┬──┐
+ * │31│30│29│28│27│26│25│24│23│22│21│20│19│18│17│16│15│14│13│12│11│10│ 9│ 8│ 7│ 6│ 5│ 4│ 3│ 2│ 1│ 0│
+ * 0 │ UCMD QUEUE BASE ADDRESS[31:12] │ │
+ * 1 │ │ UCMD QUEUE BASE ADDRESS[51:32] │
+ * 2 │ UCPL QUEUE BASE ADDRESS[31:12] │ │
+ * 3 │ │ UCPL QUEUE BASE ADDRESS[51:32] │
+ * 4 │ │ TECTE_TAG │
+ * 5 │ │
+ * 6 │ │
+ * 7 │ │
+ * 8 │ │ TokenID │
+ * 9 │ │
+ * 10 │ UCMQ_QUEUE CI │ UCMQ_QUEUE PI │
+ * 11 │ UCPL_QUEUE CI │ UCPL_QUEUE PI │
+ * 12 │ │
+ * 13 │ │
+ * 14 │ │
+ * 15 │ │
+ * └───────────────────────────────────────────────────────────────────────────────────────────────┘
+ */
+static inline void ummu_mapt_cmdq_base_update_ucmdq_pi(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[10] = deposit32(base->word[10], 0, 16, data);
+}
+
+static inline void ummu_mapt_cmdq_base_update_ucmdq_ci(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[10] = deposit32(base->word[10], 16, 16, data);
+}
+
+static inline void ummu_mapt_cmdq_base_update_ucplq_pi(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[11] = deposit32(base->word[11], 0, 16, data);
+}
+
+static inline void ummu_mapt_cmdq_base_update_ucplq_ci(MAPTCmdqBase *base, uint16_t data)
+{
+ base->word[11] = deposit32(base->word[11], 16, 16, data);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucmdq_pi(MAPTCmdqBase *base)
+{
+ return extract32(base->word[10], 0, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucmdq_ci(MAPTCmdqBase *base)
+{
+ return extract32(base->word[10], 16, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucplq_pi(MAPTCmdqBase *base)
+{
+ return extract32(base->word[11], 0, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_ucplq_ci(MAPTCmdqBase *base)
+{
+ return extract32(base->word[11], 16, 16);
+}
+
+static inline uint16_t ummu_mapt_cmdq_base_get_tecte_tag(MAPTCmdqBase *base)
+{
+ return extract32(base->word[4], 0, 16);
+}
+
+static inline uint32_t ummu_mapt_cmdq_base_get_token_id(MAPTCmdqBase *base)
+{
+ return extract32(base->word[8], 0, 20);
+}
+
+static inline bool ummu_mapt_ucmdq_empty(MAPTCmdqBase *base)
+{
+ return MAPT_UCMDQ_PI(base) == MAPT_UCMDQ_CI(base) &&
+ MAPT_UCMDQ_PI_WRAP(base) == MAPT_UCMDQ_CI_WRAP(base);
+}
+
+static inline void ummu_mapt_ucmdq_cons_incr(MAPTCmdqBase *base)
+{
+ base->word[10] = deposit32(base->word[10], 16,
+ MAPT_UCMDQ_LOG2SIZE(base) + 1,
+ ummu_mapt_cmdq_base_get_ucmdq_ci(base) + 1);
+}
+
+static inline bool ummu_mapt_ucplq_full(MAPTCmdqBase *base)
+{
+ return MAPT_UCPLQ_PI(base) == MAPT_UCPLQ_CI(base) &&
+ MAPT_UCPLQ_PI_WRAP(base) != MAPT_UCPLQ_CI_WRAP(base);
+}
+
+static inline void ummu_mapt_ucqlq_prod_incr(MAPTCmdqBase *base)
+{
+ base->word[11] = deposit32(base->word[11], 0,
+ MAPT_UCPLQ_LOG2SIZE(base) + 1,
+ ummu_mapt_cmdq_base_get_ucplq_pi(base) + 1);
+}
+
+static inline void ummu_mapt_ucplq_set_cpl(MAPTCmdCpl *cpl, uint16_t status, uint16_t ci)
+{
+ cpl->cpl_status = status;
+ cpl->cmdq_ci = ci;
+}
+
+static inline uint32_t ummu_mapt_cmdq_get_qid(UMMUState *u, uint64_t offset)
+{
+ if (u->ucmdq_page_sel == MAPT_CMDQ_CTRLR_PAGE_SIZE_4K) {
+ return MAPT_4K_CMDQ_CTXT_QID(offset);
+ } else {
+ return MAPT_64K_CMDQ_CTXT_QID(offset);
+ }
+}
+
+static inline void ummu_mcmdq_construct_plbi_os_eidtid(UMMUMcmdqCmd *mcmd_cmd, uint32_t tid, uint16_t tag)
+{
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 0, 8, CMD_PLBI_OS_EIDTID);
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 12, 20, tid);
+ mcmd_cmd->word[4] = deposit32(mcmd_cmd->word[4], 0, 16, tag);
+}
+
+static inline void ummu_plib_usr_va_to_pibi_os_va(MAPTCmd *mapt_cmd, UMMUMcmdqCmd *mcmd_cmd,
+ uint32_t tid, uint16_t tag)
+{
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 0, 8, CMD_PLBI_OS_VA);
+ mcmd_cmd->word[0] = deposit32(mcmd_cmd->word[0], 12, 20, tid);
+ mcmd_cmd->word[1] = deposit32(mcmd_cmd->word[1], 0, 6, extract32(mapt_cmd->word[1], 0, 6));
+ mcmd_cmd->word[2] = mapt_cmd->word[2] & 0xFFFFF000;
+ mcmd_cmd->word[3] = mapt_cmd->word[3];
+ mcmd_cmd->word[4] = deposit32(mcmd_cmd->word[4], 0, 16, tag);
+}
+
+void ummu_dev_uninstall_nested_tecte(UMMUDevice *ummu_dev);
+int ummu_dev_install_nested_tecte(UMMUDevice *sdev, uint32_t data_type,
+ uint32_t data_len, void *data);
+#endif
diff --git a/hw/ub/ub_usi.c b/hw/ub/ub_usi.c
new file mode 100644
index 0000000000000000000000000000000000000000..8250d853eb2fe53a095ee688e5694f046d3e08ad
--- /dev/null
+++ b/hw/ub/ub_usi.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "hw/qdev-core.h"
+#include "hw/ub/ub_usi.h"
+#include "hw/ub/ub_config.h"
+#include "qemu/log.h"
+#include "exec/address-spaces.h"
+
+void usi_send_message(USIMessage *msg, uint32_t interrupt_id, UBDevice *udev)
+{
+ MemTxAttrs attrs = {};
+ attrs.requester_id = interrupt_id;
+ if (udev) {
+ AddressSpace *as = ub_device_iommu_address_space(udev);
+ address_space_stl_le(as, msg->address, msg->data,
+ attrs, NULL);
+ } else {
+ address_space_stl_le(&address_space_memory, msg->address, msg->data,
+ attrs, NULL);
+ }
+ qemu_log("usi notify success: interrupt_id %u eventid %u gicv3_its 0x%lx\n",
+ interrupt_id, msg->data, msg->address);
+}
diff --git a/include/hw/ub/hisi/ummu.h b/include/hw/ub/hisi/ummu.h
new file mode 100644
index 0000000000000000000000000000000000000000..192f45e7e679229ac914e66ffee244fa71a000f0
--- /dev/null
+++ b/include/hw/ub/hisi/ummu.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+
+#ifndef HISI_UMMU_H
+#define HISI_UMMU_H
+#include "hw/ub/hisi/ubc.h"
+
+#endif
diff --git a/include/hw/ub/ub.h b/include/hw/ub/ub.h
index b07cc36efde676fc5b19de2d95965c73481fc587..ca2a54d845709656d2d7fa2aef4c72b15c96bdb4 100644
--- a/include/hw/ub/ub.h
+++ b/include/hw/ub/ub.h
@@ -20,6 +20,7 @@
#include
#include "qemu/typedefs.h"
#include "exec/memory.h"
+#include "sysemu/host_iommu_device.h"
#include "hw/arm/virt.h"
#define BYTE_SIZE 1
@@ -181,6 +182,25 @@ typedef struct UBDeviceClass {
DECLARE_OBJ_CHECKERS(UBDevice, UBDeviceClass,
UB_DEVICE, TYPE_UB_DEVICE)
+typedef struct UBIOMMUOps {
+ /**
+ * @get_address_space: get the address space for a set of devices
+ * on a UB bus.
+ *
+ * Mandatory callback which returns a pointer to an #AddressSpace
+ *
+ * @bus: the #UBBus being accessed.
+ *
+ * @opaque: the data passed to ub_setup_iommu().
+ *
+ * @eid: ub device eid
+ */
+ AddressSpace * (*get_address_space)(UBBus *bus, void *opaque, uint32_t eid);
+ bool (*set_iommu_device)(UBBus *bus, void *opaque, uint32_t eid,
+ HostIOMMUDevice *dev, Error **errp);
+ void (*unset_iommu_device)(UBBus *bus, void *opaque, uint32_t eid);
+ bool (*ummu_is_nested)(void *opaque);
+} UBIOMMUOps;
static inline void ub_set_byte(uint8_t *config, uint8_t val)
{
@@ -232,5 +252,10 @@ static inline uint64_t ub_config_size(void)
{
return UB_DEV_CONFIG_SPACE_TOTAL_SIZE;
}
+AddressSpace *ub_device_iommu_address_space(UBDevice *dev);
UBDevice *ub_find_device_by_id(const char *id);
+uint32_t ub_interrupt_id(UBDevice *udev);
+void ub_setup_iommu(UBBus *bus, const UBIOMMUOps *ops, void *opaque);
+uint32_t ub_dev_get_token_id(UBDevice *udev);
+uint32_t ub_dev_get_ueid(UBDevice *udev);
#endif
diff --git a/include/hw/ub/ub_bus.h b/include/hw/ub/ub_bus.h
index 58baea4efba61c910880d42ecb48953fec24ed2f..189dbf878533a321be3f040fb07974af44102949 100644
--- a/include/hw/ub/ub_bus.h
+++ b/include/hw/ub/ub_bus.h
@@ -41,6 +41,8 @@ struct UBBus {
BusState qbus;
UBDeviceList devices;
MemoryRegion *address_space_mem;
+ const UBIOMMUOps *iommu_ops;
+ void *iommu_opaque;
};
#define TYPE_UB_BUS "UB_BUS"
diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h
index 840052931e7a70f7cc2ccdc5da64434038e2f43d..440a5bacdf680271e38d0a8c650c10debdc4b724 100644
--- a/include/hw/ub/ub_common.h
+++ b/include/hw/ub/ub_common.h
@@ -416,4 +416,6 @@ typedef struct MsgPktHeader { /* TODO, check byte order */
uint32_t fill_rq(BusControllerState *s, void *rsp, uint32_t rsp_size);
uint32_t fill_cq(BusControllerState *s, HiMsgCqe *cqe);
+bool ub_guid_is_none(UbGuid *guid);
+
#endif
diff --git a/include/hw/ub/ub_ummu.h b/include/hw/ub/ub_ummu.h
index f8b65a0bbe861af11f709e1b790c9fd5b4fdb30a..7e279c94f8afa9454ad99f0e6993d37e56fe7817 100644
--- a/include/hw/ub/ub_ummu.h
+++ b/include/hw/ub/ub_ummu.h
@@ -26,4 +26,100 @@
#define UMMU_INTERRUPT_ID 0x8989 // UMMU DEVICE ID need allocate later
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define TYPE_UB_UMMU "ub-ummu"
+OBJECT_DECLARE_TYPE(UMMUState, UMMUBaseClass, UB_UMMU)
+
+typedef struct UMMUQueue {
+ uint64_t base; /* base register */
+ uint32_t prod;
+ uint32_t cons;
+ uint64_t entry_size;
+ uint8_t log2size;
+} UMMUQueue;
+
+typedef struct UMMUMcmdQueue {
+ UMMUQueue queue;
+} UMMUMcmdQueue;
+
+typedef struct UMMUEventQueue {
+ UMMUQueue queue;
+ uint64_t usi_addr;
+ uint32_t usi_data;
+ uint32_t usi_attr;
+ bool event_que_int_en;
+} UMMUEventQueue;
+
+typedef struct UMMUGlbErr {
+ uint64_t usi_addr;
+ uint32_t usi_data;
+ uint32_t usi_attr;
+ bool glb_err_int_en;
+ uint32_t glb_err;
+ uint32_t glb_err_resp;
+} UMMUGlbErr;
+
+typedef enum UMMUUSIVectorType {
+ UMMU_USI_VECTOR_EVETQ,
+ UMMU_USI_VECTOR_GERROR,
+ UMMU_USI_VECTOR_MAX,
+} UMMUUSIVectorType;
+
+typedef struct UMMUKVTblEntry {
+ uint32_t dst_eid;
+ uint32_t tecte_tag;
+ QLIST_ENTRY(UMMUKVTblEntry) list;
+} UMMUKVTblEntry;
+
+#define UMMU_MAX_MCMDQS 32
+#define UMMU_TECTE_TAG_MAX_NUM 32
+struct UMMUState {
+ /* */
+ SysBusDevice dev;
+ const char *mrtypename;
+ MemoryRegion ummu_reg_mem;
+ uint64_t ummu_reg_size;
+ MemoryRegion root;
+ MemoryRegion stage2;
+ MemoryRegion sysmem;
+
+ /* Nested */
+ bool nested;
+ UMMUViommu *viommu;
+
+ /* spec register define */
+ uint32_t cap[7];
+ uint32_t ctrl[4];
+ uint32_t ctrl0_ack;
+ uint64_t tect_base;
+ uint32_t tect_base_cfg;
+ UMMUMcmdQueue mcmdqs[UMMU_MAX_MCMDQS];
+ UMMUEventQueue eventq;
+ UMMUGlbErr glb_err;
+ uint64_t mapt_cmdq_ctxt_base;
+ uint32_t release_um_queue;
+ uint32_t release_um_queue_id;
+ uint32_t ucmdq_page_sel;
+
+ int usi_virq[UMMU_USI_VECTOR_MAX];
+ uint8_t bus_num;
+ UBBus *bus;
+ QLIST_ENTRY(UMMUState) node;
+ uint32_t tecte_tag_cache[UMMU_TECTE_TAG_MAX_NUM];
+ uint32_t tecte_tag_num;
+
+ UBBus *primary_bus; // 前两行那个bus还是否有必要?
+ GHashTable *ummu_devs;
+ GHashTable *configs;
+ QLIST_HEAD(, UMMUKVTblEntry) kvtbl;
+};
+
+struct UMMUBaseClass {
+ /* */
+ SysBusDeviceClass parent_class;
+};
+
+UMMUState *ummu_find_by_bus_num(uint8_t bus_num);
+int ummu_associating_with_ubc(BusControllerState *ubc);
#endif
diff --git a/include/hw/ub/ub_usi.h b/include/hw/ub/ub_usi.h
new file mode 100644
index 0000000000000000000000000000000000000000..96332e585031a670c8a683ad206a4ffe0f01c954
--- /dev/null
+++ b/include/hw/ub/ub_usi.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#ifndef UB_USI_H
+#define UB_USI_H
+#include "qemu/typedefs.h"
+#include "hw/ub/ub.h"
+
+struct USIMessage {
+ uint64_t address;
+ uint32_t data;
+};
+
+void usi_send_message(USIMessage *msg, uint32_t interrupt_id, UBDevice *udev);
+
+#endif
diff --git a/include/hw/ub/ubus_instance.h b/include/hw/ub/ubus_instance.h
new file mode 100644
index 0000000000000000000000000000000000000000..deb6353ed845a98503e1eab6d05cb15e2ea61265
--- /dev/null
+++ b/include/hw/ub/ubus_instance.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2024-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+
+#ifndef UB_BUS_INSTANCE_H
+#define UB_BUS_INSTANCE_H
+
+#define UBUS_INSTANCE_UNKNOW (-1)
+#define UBUS_INSTANCE_STATIC_SERVER 0
+#define UBUS_INSTANCE_STATIC_CLUSTER 1
+#define UBUS_INSTANCE_DYNAMIC_SERVER 2
+#define UBUS_INSTANCE_DYNAMIC_CLUSTER 3
+
+#define UBUS_INSTANCE_IS_STATIC_SERVER(type) (type == UBUS_INSTANCE_STATIC_SERVER)
+#define UBUS_INSTANCE_IS_STATIC_CLUSTER(type) (type == UBUS_INSTANCE_STATIC_CLUSTER)
+#define UBUS_INSTANCE_IS_DYNAMIC_SERVER(type) (type == UBUS_INSTANCE_DYNAMIC_SERVER)
+#define UBUS_INSTANCE_IS_DYNAMIC_CLUSTER(type) (type == UBUS_INSTANCE_DYNAMIC_CLUSTER)
+#define UBUS_INSTANCE_IS_STATIC(type) \
+ (UBUS_INSTANCE_IS_STATIC_SERVER(type) || UBUS_INSTANCE_IS_STATIC_CLUSTER(type))
+#define UBUS_INSTANCE_IS_DYNAMIC(type) \
+ (UBUS_INSTANCE_IS_DYNAMIC_SERVER(type) || UBUS_INSTANCE_IS_DYNAMIC_CLUSTER(type))
+#define UBUS_INSTANCE_IS_SERVER(type) \
+ (UBUS_INSTANCE_IS_STATIC_SERVER(type) || UBUS_INSTANCE_IS_DYNAMIC_SERVER(type))
+#define UBUS_INSTANCE_IS_CLUSTER(type) \
+ (UBUS_INSTANCE_IS_STATIC_CLUSTER(type) || UBUS_INSTANCE_IS_DYNAMIC_CLUSTER(type))
+
+#endif
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index a1b15dd21959cbc28484e807b64f594cb82a41aa..f52ceea7a00d883490278c4d24044b847c798e05 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -140,7 +140,9 @@ typedef struct VMStateDescription VMStateDescription;
/* UB typedef */
typedef struct UBDevice UBDevice;
+typedef struct USIMessage USIMessage;
typedef struct UBBus UBBus;
+typedef struct UMMUViommu UMMUViommu;
/*
* Pointer types
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index de68df91a3d086589886272c86bf05fa8a93f212..50b9798542228cb79ad6222ba784482b1a143eee 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -411,7 +411,8 @@ void kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr);
#endif
void kvm_arch_init_irq_routing(KVMState *s);
-
+int kvm_arch_fixup_usi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, UBDevice *dev);
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data, PCIDevice *dev);
@@ -513,6 +514,8 @@ int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev);
int kvm_irqchip_update_msi_route(KVMRouteChange *c, int virq, MSIMessage msg,
PCIDevice *dev);
void kvm_irqchip_commit_routes(KVMState *s);
+int kvm_irqchip_add_usi_route(KVMRouteChange *c, USIMessage msg, uint32_t devid, UBDevice *udev);
+int kvm_irqchip_update_usi_route(KVMRouteChange *c, int virq, USIMessage msg, UBDevice *udev);
static inline KVMRouteChange kvm_irqchip_begin_route_changes(KVMState *s)
{
diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h
index 3e57fee01cc7ec52083e30bfe789d88f39d6751f..79bc50379bf84ebfc36f640de6d52b6d4a152e78 100644
--- a/linux-headers/linux/iommufd.h
+++ b/linux-headers/linux/iommufd.h
@@ -416,6 +416,10 @@ struct iommu_hwpt_arm_smmuv3 {
__aligned_le64 ste[2];
};
+struct iommu_hwpt_ummu {
+ __aligned_le64 tecte[2];
+};
+
/**
* enum iommu_hwpt_data_type - IOMMU HWPT Data Type
* @IOMMU_HWPT_DATA_NONE: no data
@@ -426,6 +430,7 @@ enum iommu_hwpt_data_type {
IOMMU_HWPT_DATA_NONE = 0,
IOMMU_HWPT_DATA_VTD_S1 = 1,
IOMMU_HWPT_DATA_ARM_SMMUV3 = 2,
+ IOMMU_HWPT_DATA_UMMU = 3,
};
/**
@@ -701,10 +706,12 @@ struct iommu_hwpt_get_dirty_bitmap {
* Data Type
* @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1
* @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3
+ * @IOMMU_VIOMMU_INVALIDATE_DATA_UMMU: Invalidation data for UMMU
*/
enum iommu_hwpt_invalidate_data_type {
IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0,
IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1,
+ IOMMU_VIOMMU_INVALIDATE_DATA_UMMU = 2,
};
/**
@@ -902,10 +909,12 @@ struct iommu_fault_alloc {
* enum iommu_viommu_type - Virtual IOMMU Type
* @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
* @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type
+ * @IOMMU_VIOMMU_TYPE_UMMU: HISI UMMU driver specific type
*/
enum iommu_viommu_type {
IOMMU_VIOMMU_TYPE_DEFAULT = 0,
IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
+ IOMMU_VIOMMU_TYPE_UMMU = 2,
};
/**
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index 50f22717ec811901e0e06e7c644c7ce50b58134c..1bb6e332e7d87958e71237c1ceb8e81ab022b775 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -36,6 +36,9 @@
#include "hw/irq.h"
#include "qapi/visitor.h"
#include "qemu/log.h"
+#ifdef CONFIG_UB
+#include "hw/ub/ub.h"
+#endif // CONFIG_UB
/*
* SMMCC KVM Vendor hypercall definitions.
@@ -1402,6 +1405,44 @@ int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level)
return kvm_set_irq(kvm_state, kvm_irq, !!level);
}
+#ifdef CONFIG_UB
+int kvm_arch_fixup_usi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data, UBDevice *dev)
+{
+ AddressSpace *as = ub_device_iommu_address_space(dev);
+ hwaddr xlat, len, doorbell_gpa;
+ MemoryRegionSection mrs;
+ MemoryRegion *mr;
+
+ if (as == &address_space_memory) {
+ return 0;
+ }
+
+ /* USI doorbell address is translated by an IOMMU */
+ RCU_READ_LOCK_GUARD();
+ mr = address_space_translate(as, address, &xlat, &len, true,
+ MEMTXATTRS_UNSPECIFIED);
+ if (!mr) {
+ qemu_log("address space translate address(0x%lx) failed.\n", address);
+ return 1;
+ }
+
+ mrs = memory_region_find(mr, xlat, 1);
+ if (!mrs.mr) {
+ qemu_log("mr failed to find mrs.\n");
+ return 1;
+ }
+
+ doorbell_gpa = mrs.offset_within_address_space;
+ memory_region_unref(mrs.mr);
+ qemu_log("IOVA(0x%lx) trans to GPA(0x%lx) by iommu success.\n", address, doorbell_gpa);
+ route->u.msi.address_lo = doorbell_gpa;
+ route->u.msi.address_hi = doorbell_gpa >> 32;
+
+ return 0;
+}
+#endif // CONFIG_UB
+
int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
uint64_t address, uint32_t data, PCIDevice *dev)
{