diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index b389ef762203e4cf84342937b4af892cb6f5be4c..120fe0d0a1b942c9ff914977b06b624985282c64 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -60,6 +60,9 @@
#include "hw/acpi/viot.h"
#include "kvm_arm.h"
#include "hw/virtio/virtio-acpi.h"
+#ifdef CONFIG_UB
+#include "hw/ub/ub_acpi.h"
+#endif
#define ARM_SPI_BASE 32
@@ -679,6 +682,11 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
} else {
rc_mapping_count = 1;
}
+
+#ifdef CONFIG_UB
+ nb_nodes += 3; /* UBC0, UMU0, PMU0 */
+#endif
+
/* Number of IORT Nodes */
build_append_int_noprefix(table_data, nb_nodes, 4);
@@ -788,6 +796,10 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
build_iort_rmr_nodes(table_data, smmu_idmaps, smmu_offset, &id);
}
+#ifdef CONFIG_UB
+ acpi_iort_add_ub(table_data);
+#endif
+
acpi_table_end(linker, &table);
g_array_free(smmu_idmaps, true);
g_array_free(its_idmaps, true);
@@ -1318,6 +1330,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
acpi_dsdt_add_tpm(scope, vms);
#endif
+#ifdef CONFIG_UB
+ acpi_dsdt_add_ub(scope);
+#endif
+
aml_append(dsdt, scope);
/* copy AML table into ACPI tables blob */
@@ -1365,7 +1381,10 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
/* DSDT is pointed to by FADT */
dsdt = tables_blob->len;
build_dsdt(tables_blob, tables->linker, vms);
-
+#ifdef CONFIG_UB
+ acpi_add_table(table_offsets, tables_blob);
+ build_ubrt(tables_blob, tables->linker, vms);
+#endif
/* FADT MADT PPTT GTDT MCFG SPCR DBG2 pointed to by RSDT */
acpi_add_table(table_offsets, tables_blob);
build_fadt_rev6(tables_blob, tables->linker, vms, dsdt);
@@ -1474,8 +1493,6 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables)
" or PCI bridges.");
}
acpi_align_size(tables_blob, ACPI_BUILD_TABLE_SIZE);
-
-
/* Cleanup memory that's no longer used. */
g_array_free(table_offsets, true);
}
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index b2091406849961e161cf6d6b216a2e600fc2c91b..470a320bc629f12e9f47152721be8135a5f9ec8f 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -90,9 +90,15 @@
#include "qemu/log.h"
#ifdef CONFIG_UB
#include "hw/ub/ub.h"
+#include "hw/ub/ub_bus.h"
#include "hw/ub/ub_ubc.h"
+#include "hw/ub/hisi/ub_mem.h"
+#include "hw/ub/ub_acpi.h"
#include "hw/ub/hisi/ubc.h"
#include "hw/ub/hisi/ub_fm.h"
+#include "hw/ub/ub_ummu.h"
+#include "hw/ub/ub_common.h"
+#include "hw/ub/ub_config.h"
#endif // CONFIG_UB
#define DEFINE_VIRT_MACHINE_LATEST(major, minor, latest) \
@@ -213,6 +219,16 @@ static MemMapEntry extended_memmap[] = {
[VIRT_HIGH_PCIE_ECAM] = { 0x0, 256 * MiB },
/* Second PCIe window */
[VIRT_HIGH_PCIE_MMIO] = { 0x0, 512 * GiB },
+#ifdef CONFIG_UB
+ /* ub mmio window */
+ [VIRT_HIGH_UB_MMIO] = { 0x0, UBIOS_MMIOS_SIZE_PER_UBC * UBIOS_UBC_TABLE_CNT},
+ /* ub idev fers window */
+ [VIRT_UB_IDEV_ERS] = { 0x0, 512 * GiB},
+ [VIRT_UBC_BASE_REG] = { 0x0, BASE_REG_SIZE}, /* now only support one UBC */
+ [VIRT_UBIOS_INFO_TABLE] = { 0x0, UBIOS_TABLE_SIZE},
+ [VIRT_UB_MEM_CC] = { 0x0, UB_MEM_SPACE_SIZE},
+ [VIRT_UB_MEM_NC] = { 0x0, UB_MEM_SPACE_SIZE},
+#endif // CONFIG_UB
};
static const int a15irqmap[] = {
@@ -376,6 +392,20 @@ static void create_fdt(VirtMachineState *vms)
}
}
+#ifdef CONFIG_UB
+static void create_ubios_info_table_fdt(VirtMachineState *vms, MemoryRegion *machine_ram)
+{
+ MachineState *ms = MACHINE(vms);
+
+ qemu_fdt_setprop_u64(ms->fdt, "/chosen", "linux,ubios-information-table",
+ vms->memmap[VIRT_UBIOS_INFO_TABLE].base);
+ qemu_log("create fdt for ubios-information-table 0x%lx\n",
+ vms->memmap[VIRT_UBIOS_INFO_TABLE].base);
+
+ ub_init_ubios_info_table(vms, ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB));
+}
+#endif // CONFIG_UB
+
static void fdt_add_timer_nodes(const VirtMachineState *vms)
{
/* On real hardware these interrupts are level-triggered.
@@ -1725,11 +1755,46 @@ static void create_virtio_iommu_dt_bindings(VirtMachineState *vms)
static void create_ub(VirtMachineState *vms)
{
DeviceState *ubc;
+ MemoryRegion *mmio_reg;
+ MemoryRegion *mmio_alias;
+
+ if (ub_cfg_addr_map_table_init() < 0) {
+ qemu_log("failed to init ub cfg addr map table\n");
+ exit(1);
+ }
ubc = qdev_new(TYPE_BUS_CONTROLLER);
qdev_prop_set_uint32(ubc, "ub-bus-controller-msgq-reg-size", UBC_MSGQ_REG_SIZE);
qdev_prop_set_uint32(ubc, "ub-bus-controller-fm-msgq-reg-size", FM_MSGQ_REG_SIZE);
sysbus_realize_and_unref(SYS_BUS_DEVICE(ubc), &error_fatal);
+
+ /* in ub_bus_controller_realize will call sysbus_init_mmio init memory_region in order,
+ * 0: msgq_reg_mem
+ * 1: fm_msgq_reg_mem
+ * 2: ub controller io_mmio
+ * sysbus_mmio_map get inited memory_region by index 0, msgq_reg_mem
+ */
+ sysbus_mmio_map(SYS_BUS_DEVICE(ubc), 0,
+ vms->memmap[VIRT_UBC_BASE_REG].base + UBC_MSGQ_REG_OFFSET);
+ /* sysbus_mmio_map get inited memory_region by index 1, fm_msgq_reg_mem */
+ sysbus_mmio_map(SYS_BUS_DEVICE(ubc), 1,
+ vms->memmap[VIRT_UBC_BASE_REG].base + FM_MSGQ_REG_OFFSET);
+ mmio_alias = g_new0(MemoryRegion, 1);
+ /* here get inited memory_region by index 3, ub controller io_mmio */
+ mmio_reg = sysbus_mmio_get_region(SYS_BUS_DEVICE(ubc), 2);
+ memory_region_init_alias(mmio_alias, OBJECT(ubc), "ub-mmio",
+ mmio_reg, vms->memmap[VIRT_HIGH_UB_MMIO].base,
+ vms->memmap[VIRT_HIGH_UB_MMIO].size);
+ memory_region_add_subregion(get_system_memory(),
+ vms->memmap[VIRT_HIGH_UB_MMIO].base,
+ mmio_alias);
+
+ mmio_alias = g_new0(MemoryRegion, 1);
+ memory_region_init_alias(mmio_alias, OBJECT(ubc), "ub-idev-fers-as",
+ mmio_reg, vms->memmap[VIRT_UB_IDEV_ERS].base,
+ vms->memmap[VIRT_UB_IDEV_ERS].size);
+ memory_region_add_subregion(get_system_memory(),
+ vms->memmap[VIRT_UB_IDEV_ERS].base, mmio_alias);
}
#endif // CONFIG_UB
static void create_pcie(VirtMachineState *vms)
@@ -2040,6 +2105,14 @@ static inline bool *virt_get_high_memmap_enabled(VirtMachineState *vms,
&vms->highmem_redists,
&vms->highmem_ecam,
&vms->highmem_mmio,
+#ifdef CONFIG_UB
+ &vms->highmem_ub_mmio,
+ &vms->highmem_idev_ers,
+ &vms->highmem_ubc_base_reg,
+ &vms->highmem_ubios_info_table,
+ &vms->highmem_ub_mem_cc,
+ &vms->highmem_ub_mem_nc,
+#endif // CONFIG_UB
};
assert(ARRAY_SIZE(extended_memmap) - VIRT_LOWMEMMAP_LAST ==
@@ -2056,6 +2129,9 @@ static void virt_set_high_memmap(VirtMachineState *vms,
bool *region_enabled, fits;
int i;
+#ifdef CONFIG_UB
+ ub_set_gpa_bits((uint8_t)pa_bits);
+#endif
for (i = VIRT_LOWMEMMAP_LAST; i < ARRAY_SIZE(extended_memmap); i++) {
region_enabled = virt_get_high_memmap_enabled(vms, i);
region_base = ROUND_UP(base, extended_memmap[i].size);
@@ -2074,6 +2150,10 @@ static void virt_set_high_memmap(VirtMachineState *vms,
*/
fits = (region_base + region_size) <= BIT_ULL(pa_bits);
*region_enabled &= fits;
+#ifdef CONFIG_UB
+ qemu_log("%d base 0x%lx size 0x%lx enable %u highmem_compact %u\n", i,
+ region_base, region_size, *region_enabled, vms->highmem_compact);
+#endif
if (vms->highmem_compact && !*region_enabled) {
continue;
}
@@ -2152,8 +2232,8 @@ static void virt_set_memmap(VirtMachineState *vms, int pa_bits)
/* Base address of the high IO region */
memtop = base = device_memory_base + ROUND_UP(device_memory_size, GiB);
if (memtop > BIT_ULL(pa_bits)) {
- error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n",
- pa_bits, memtop - BIT_ULL(pa_bits));
+ error_report("Addressing limited to %d bits, but memory exceeds it by %llu bytes\n",
+ pa_bits, memtop - BIT_ULL(pa_bits));
exit(EXIT_FAILURE);
}
if (base < device_memory_base) {
@@ -2861,6 +2941,16 @@ static void machvirt_init(MachineState *machine)
machine->ram);
virt_flash_fdt(vms, sysmem, secure_sysmem ?: sysmem);
+#ifdef CONFIG_UB
+ qemu_log("memory_region_add_reservation 0x%lx size %ld round up %ld\n",
+ vms->memmap[VIRT_UBIOS_INFO_TABLE].base, UBIOS_TABLE_SIZE,
+ ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB));
+ memory_region_add_reservation_with_ram(get_system_memory(),
+ OBJECT(machine->memdev), "ubios-information-table",
+ vms->memmap[VIRT_UBIOS_INFO_TABLE].base,
+ ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB));
+ create_ubios_info_table_fdt(vms, machine->ram);
+#endif // CONFIG_UB
create_gic(vms, sysmem);
@@ -3995,6 +4085,50 @@ static int virt_kvm_type(MachineState *ms, const char *type_str)
return requested_pa_size | rme_vm_type | type;
}
+#ifdef CONFIG_UB
+static bool virt_get_ummu(Object *obj, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ return vms->ummu;
+}
+
+static void virt_set_ummu(Object *obj, bool value, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ vms->ummu = value;
+}
+
+static bool virt_ub_get_cluster_mode(Object *obj, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ return vms->ub_cluster_mode;
+}
+
+static void virt_ub_set_cluster_mode(Object *obj, bool value, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ vms->ub_cluster_mode = value;
+}
+
+static bool virt_ub_get_fm_deployment_info(Object *obj, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ return vms->fm_deployment;
+}
+
+static void virt_ub_set_fm_deployment_info(Object *obj, bool value, Error **errp)
+{
+ VirtMachineState *vms = VIRT_MACHINE(obj);
+
+ vms->fm_deployment = value;
+}
+#endif // CONFIG_UB
+
static void virt_machine_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -4169,6 +4303,17 @@ static void virt_machine_class_init(ObjectClass *oc, void *data)
object_class_property_set_description(oc, "x-target-impl-cpus",
"Describe target cpu impl in the format midr1:revidr1-midr2:revidr2"
"Maximum 4 midr:revidr pair is supported");
+#ifdef CONFIG_UB
+ object_class_property_add_bool(oc, "ummu", virt_get_ummu, virt_set_ummu);
+ object_class_property_add_bool(oc, "ub-cluster-mode", virt_ub_get_cluster_mode,
+ virt_ub_set_cluster_mode);
+ object_class_property_set_description(oc, "ub-cluster-mode",
+ "Set on/off to enable/disable ub cluster mode");
+ object_class_property_add_bool(oc, "fm-deployment", virt_ub_get_fm_deployment_info,
+ virt_ub_set_fm_deployment_info);
+ object_class_property_set_description(oc, "fm-deployment",
+ "Set on/off to support FM msg queue or not");
+#endif // CONFIG_UB
}
static char *virt_get_kvm_type(Object *obj, Error **errp G_GNUC_UNUSED)
@@ -4235,6 +4380,17 @@ static void virt_instance_init(Object *obj)
/* MTE is disabled by default. */
vms->mte = false;
+#ifdef CONFIG_UB
+ vms->highmem_ub_mmio = true;
+ vms->highmem_idev_ers = true;
+ vms->highmem_ubc_base_reg = true;
+ vms->highmem_ubios_info_table = true;
+ vms->highmem_ub_mem_cc = true;
+ vms->highmem_ub_mem_nc = true;
+ vms->ub_cluster_mode = false;
+ vms->fm_deployment = false;
+#endif
+
vms->irqmap = a15irqmap;
virt_flash_create(vms);
diff --git a/hw/ub/meson.build b/hw/ub/meson.build
index e1146704e67aaebe746e7e0e9fbfbd3d5d3859e3..ffa135dacfeb25689b7beff1644c8bfb518ec65d 100644
--- a/hw/ub/meson.build
+++ b/hw/ub/meson.build
@@ -2,6 +2,7 @@ ub_ss = ss.source_set()
ub_ss.add(files(
'ub.c',
'ub_ubc.c',
+ 'ub_config.c',
'ub_acpi.c',
))
system_ss.add_all(when: 'CONFIG_HW_UB', if_true: ub_ss)
diff --git a/hw/ub/ub_acpi.c b/hw/ub/ub_acpi.c
index 9b3af8220360d583b7adc7ad44f4fc2945370d71..f5407020f5866a6b1b7482a7569fa07f9acbe06b 100644
--- a/hw/ub/ub_acpi.c
+++ b/hw/ub/ub_acpi.c
@@ -31,7 +31,418 @@
#include "qapi/error.h"
#include "qapi/util.h"
#include "qapi/qmp/qstring.h"
+#include "hw/ub/ub_ummu.h"
#include "hw/ub/hisi/ub_mem.h"
#include "hw/ub/hisi/ub_fm.h"
#include "hw/acpi/aml-build.h"
+#include "hw/ub/ub_common.h"
+#define UBIOS_VERSION 1
+#define DTS_SIG_UBCTL "bus controller"
+#define DTS_SIG_UMMU "ummu"
+#define DTS_SIG_RSV_MEM "rsv_mem"
+static uint8_t gpa_bits;
+void ub_set_gpa_bits(uint8_t bits)
+{
+ gpa_bits = bits;
+}
+
+static void ub_init_table_header(DtsTableHeader *header,
+ const char *name,
+ uint32_t size, uint16_t version)
+{
+ strncpy(header->name, name, sizeof(header->name) - 1);
+ header->total_size = size;
+ header->version = version;
+ header->remain_size = 0;
+ header->checksum = 0x711;
+ qemu_log("%s total_size %u\n", name, size);
+}
+
+static void ub_init_vendor_info(UbcVendorInfo *vendor_info, VirtMachineState *vms)
+{
+ uint16_t mar_id;
+ uint64_t base_reg = vms->memmap[VIRT_UBC_BASE_REG].base;
+ uint64_t addr_cc = vms->memmap[VIRT_UB_MEM_CC].base;
+ uint64_t addr_nc = vms->memmap[VIRT_UB_MEM_NC].base;
+ UbMemDecoderInfo *mem_info;
+ uint64_t local_reg_offset[] = {
+ BA0_OFFSET,
+ BA1_OFFSET,
+ BA2_OFFSET,
+ BA3_OFFSET,
+ BA4_OFFSET,
+ };
+ uint64_t mar_space_size[] = {
+ UB_MEM_MAR0_SPACE_SIZE,
+ UB_MEM_MAR1_SPACE_SIZE,
+ UB_MEM_MAR2_SPACE_SIZE,
+ UB_MEM_MAR3_SPACE_SIZE,
+ UB_MEM_MAR4_SPACE_SIZE,
+ };
+
+ memset(vendor_info, 0, sizeof(UbcVendorInfo));
+ vendor_info->ub_mem_ver = 0;
+ vendor_info->max_addr_bits = gpa_bits;
+ /* now only support one UBC */
+ vendor_info->cmd_queue_base = vms->memmap[VIRT_UBC_BASE_REG].base + 0x218c000;
+ vendor_info->event_queue_base = vms->memmap[VIRT_UBC_BASE_REG].base + 0x218e000;
+ vendor_info->vendor_feature_sets = 0;
+
+ for (mar_id = 0; mar_id < MAR_NUM_ONE_UDIE; mar_id++) {
+ mem_info = &vendor_info->mem_info[mar_id];
+ mem_info->decode_addr = base_reg + local_reg_offset[mar_id] + MAR_OFFSET;
+ mem_info->cc_base_addr = mar_space_size[mar_id] ?
+ addr_cc >> MB_SIZE_OFFSET : 0;
+ mem_info->cc_base_size = mar_space_size[mar_id] >> MB_SIZE_OFFSET;
+ mem_info->nc_base_addr = mar_space_size[mar_id] ?
+ addr_nc >> MB_SIZE_OFFSET : 0;
+ mem_info->nc_base_size = mar_space_size[mar_id] >> MB_SIZE_OFFSET;
+ addr_cc += mar_space_size[mar_id];
+ addr_nc += mar_space_size[mar_id];
+ qemu_log("MAR%u decode_addr 0x%lx, cc ba 0x%x size 0x%x,"
+ " nc ba 0x%x size 0x%x\n",
+ mar_id, mem_info->decode_addr,
+ mem_info->cc_base_addr, mem_info->cc_base_size,
+ mem_info->nc_base_addr, mem_info->nc_base_size);
+ }
+}
+
+static void ub_init_ubc_node(uint16_t ubc_count, UbcNode *ubc, VirtMachineState *vms)
+{
+ uint16_t i;
+ uint64_t ub_mmio_addr = vms->memmap[VIRT_HIGH_UB_MMIO].base;
+ for (i = 0; i < ubc_count; i++) {
+ (ubc + i)->interrupt_id_start = UBC_INTERRUPT_ID_START + i * UBC_INTERRUPT_ID_CNT;
+ (ubc + i)->interrupt_id_end = (ubc + i)->interrupt_id_start + UBC_INTERRUPT_ID_CNT - 1;
+ (ubc + i)->gpa_base = ub_mmio_addr + i * UBIOS_MMIOS_SIZE_PER_UBC;
+ (ubc + i)->gpa_size = UBIOS_MMIOS_SIZE_PER_UBC;
+ (ubc + i)->memory_size_limit = gpa_bits;
+ (ubc + i)->dma_cca = 1; /* 1: DMA(Y) CCA(Y) */
+ (ubc + i)->ummu_mapping = UBIOS_UMMU_TABLE_CNT ? 0 : 0xffff;
+ (ubc + i)->proximity_domain = 0;
+ (ubc + i)->msg_queue_base = vms->memmap[VIRT_UBC_BASE_REG].base +
+ UBC_MSGQ_REG_OFFSET;
+ (ubc + i)->msg_queue_size = UBC_MSGQ_REG_SIZE;
+ (ubc + i)->msg_queue_depth = HI_MSGQ_DEPTH;
+ (ubc + i)->msg_queue_interrupt = UBC_QUEUE_INTERRUPT_DEFAULT;
+ /*
+ * Interrupt attributes
+ * BIT0: Triggering
+ * ACPI_LEVEL_SENSITIVE 0x00
+ * ACPI_EDGE_SENSITIVE 0x01
+ * BIT1: Polarity
+ * ACPI_ACTIVE_HIGH 0x00
+ * ACPI_ACTIVE_LOW 0x01
+ */
+ (ubc + i)->msg_queue_interrupt_attr = 0x0;
+ memset(&(ubc + i)->ubc_info, 0, sizeof(UbGuid));
+ ub_init_vendor_info((UbcVendorInfo *)&(ubc + i)->vendor_info, vms);
+ qemu_log("init ubc_table[%d]=0x%lx, interrupt_id=[0x%x-0x%x]\n",
+ i, (ubc + i)->gpa_base, (ubc + i)->interrupt_id_start,
+ (ubc + i)->interrupt_id_end);
+ }
+}
+
+static void ub_init_ubios_ubc_table(DtsSubUbcTable *ubc_table, VirtMachineState *vms)
+{
+ UbcNode *ubc = NULL;
+
+ ubc_table->ubc_count = UBIOS_UBC_TABLE_CNT;
+ ub_init_table_header(&ubc_table->header, DTS_SIG_UBCTL,
+ UBIOS_UBC_TABLE_SIZE(ubc_table->ubc_count),
+ UBIOS_VERSION);
+ ubc_table->local_cna_start = LOCAL_CNA_START;
+ ubc_table->local_cna_end = LOCAL_CNA_END;
+ ubc_table->local_eid_start = LOCAL_EID_START;
+ ubc_table->local_eid_end = LOCAL_EID_END;
+ ubc_table->feature_set = 0;
+ /* ubc_table->cluster_mode
+ * System working mode
+ * 0: single-node system
+ * 1: cluster mode
+ */
+ ubc_table->cluster_mode = vms->ub_cluster_mode;
+ qemu_log("init ub cluster mode %u\n", ubc_table->cluster_mode);
+ ubc = (UbcNode *)ubc_table->node;
+ ub_init_ubc_node(ubc_table->ubc_count, ubc, vms);
+}
+
+static void ub_init_ummu_vendor_info(UbMemMmuInfo *vendor_info, VirtMachineState *vms)
+{
+ vendor_info->valid_bits = UB_MEM_VALID_VALUE;
+ vendor_info->protection_table_bits = 0xa;
+ vendor_info->translation_table_bits = 0x11;
+ vendor_info->ext_reg_base = (vms->memmap[VIRT_UBC_BASE_REG].base | UMMU_OFFSET | UB_MEM_REG_BASE);
+ vendor_info->ext_reg_size = UMMU_EXT_REG_SIZE;
+ qemu_log("ummu vendor info reg_base=0x%lx\n", vendor_info->ext_reg_base);
+}
+
+static void ub_init_ubios_ummu_table(DtsSubUmmuTable *ummu_table, VirtMachineState *vms)
+{
+ uint16_t i;
+ UmmuNode *ummu = NULL;
+ UbMemMmuInfo *vendor_info = NULL;
+
+ ummu_table->count = UBIOS_UMMU_TABLE_CNT;
+ ub_init_table_header(&ummu_table->header, DTS_SIG_UMMU,
+ UBIOS_UMMU_TABLE_SIZE(ummu_table->count),
+ UBIOS_VERSION);
+ ummu = (UmmuNode *)ummu_table->node;
+ for (i = 0; i < ummu_table->count; i++) {
+ (ummu + i)->base_addr = vms->memmap[VIRT_UBC_BASE_REG].base + UMMU_REG_OFFSET +
+ i * SINGLE_UMMU_REG_SIZE;
+ (ummu + i)->addr_size = UMMU_REG_SIZE;
+ (ummu + i)->interrupt_id = UMMU_INTERRUPT_ID;
+ (ummu + i)->proximity_domain = 0;
+ (ummu + i)->its_index = 0;
+ (ummu + i)->pmu_addr = (ummu + i)->base_addr + SINGLE_UMMU_REG_SIZE;
+ (ummu + i)->pmu_size = SINGLE_UMMU_PMU_REG_SIZE;
+ (ummu + i)->pmu_interrupt_id = UMMU_INTERRUPT_ID + 1;
+ (ummu + i)->min_tid = 65;
+ (ummu + i)->max_tid = 0xFFFFF;
+ (ummu + i)->vender_id = VENDER_ID_HUAWEI;
+
+ vendor_info = (UbMemMmuInfo *)(ummu + i)->vender_info;
+ ub_init_ummu_vendor_info(vendor_info, vms);
+ qemu_log("init ummu_table[%d]=0x%lx,pmu_addr=0x%lx,pmu_size=0x%lx,pmu_interrupt_id=0x%x\n",
+ i, (ummu + i)->base_addr, (ummu + i)->pmu_addr,
+ (ummu + i)->pmu_size, (ummu + i)->pmu_interrupt_id);
+ }
+}
+
+static void ub_init_ubios_rsv_mem_table(DtsRsvMemTable *rsv_mem_table, VirtMachineState *vms)
+{
+ MemRange *mem_range;
+ rsv_mem_table->count = UBIOS_UMMU_TABLE_CNT;
+ ub_init_table_header(&rsv_mem_table->header, DTS_SIG_RSV_MEM,
+ UBIOS_RSV_MEM_TABLE_SIZE(rsv_mem_table->count),
+ UBIOS_VERSION);
+ mem_range = (MemRange *)rsv_mem_table->node;
+ mem_range->flags = 0x1; /* direct mapping */
+ memset(mem_range->reserved, 0, sizeof(mem_range->reserved));
+ mem_range->base = 0x8000000; /* MSI_IOVA_BASE */
+ mem_range->size = 0x100000; /* MSI_IOVA_LENGTH */
+}
+
+void ub_init_ubios_info_table(VirtMachineState *vms, uint64_t total_size)
+{
+ uint64_t ubios_info_tables = vms->memmap[VIRT_UBIOS_INFO_TABLE].base;
+ uint64_t ubc_tables_addr = ubios_info_tables + UBIOS_INFO_TABLE_SIZE;
+ uint64_t ummu_tables_addr;
+ uint64_t size = total_size;
+ DtsRootTable *ubios = (DtsRootTable *)cpu_physical_memory_map(ubios_info_tables,
+ &size, true);
+ DtsSubUbcTable *ubc_table = (DtsSubUbcTable *)(ubios + 1);
+ uint64_t ubc_table_size;
+ DtsSubUmmuTable *ummu_table;
+ uint64_t ummu_table_size;
+ uint64_t rsv_mem_tables_addr;
+ DtsRsvMemTable *rsv_mem_table;
+
+ if (!ubios || size != total_size) {
+ if (ubios) {
+ cpu_physical_memory_unmap(ubios, size, true, size);
+ }
+ qemu_log("cpu_physical_memory_map failed, size %lu total %lu ptr %p\n",
+ size, total_size, ubios);
+ return;
+ }
+ qemu_log("ubios_info_tables=0x%lx, ubc_tables_addr=0x%lx,"
+ "ubios table size=%lu, UBIOS_UBC_TABLE_CNT %u,"
+ "UBIOS_UMMU_TABLE_CNT %u\n",
+ ubios_info_tables, ubc_tables_addr, total_size,
+ UBIOS_UBC_TABLE_CNT, UBIOS_UMMU_TABLE_CNT);
+ memset(ubios, 0, sizeof(DtsRootTable));
+ ub_init_table_header(&ubios->header, "ubios root",
+ sizeof(DtsRootTable), UBIOS_VERSION);
+ /* init ubc table */
+ ubios->tables[ubios->count] = ubc_tables_addr;
+ ub_init_ubios_ubc_table(ubc_table, vms);
+ qemu_log("ubc ubios->tables[%u] = 0x%lx ubc_table = 0x%lx \n",
+ ubios->count, ubc_tables_addr, (uint64_t)ubc_table);
+ ubios->count++;
+ ubc_table_size = UBIOS_UBC_TABLE_SIZE(ubc_table->ubc_count);
+
+ /* init ummu table */
+ ummu_tables_addr = ubc_tables_addr + ALIGN_UP(ubc_table_size, UB_ALIGNMENT);
+ ummu_table = (DtsSubUmmuTable *)((uint8_t *)(ubc_table) +
+ ALIGN_UP(ubc_table_size, UB_ALIGNMENT));
+ ubios->tables[ubios->count] = ummu_tables_addr;
+ ub_init_ubios_ummu_table(ummu_table, vms);
+ qemu_log("ummu ubios->tables[%u] = 0x%lx ummu_table=0x%lx\n",
+ ubios->count, ummu_tables_addr, (uint64_t)ummu_table);
+ ubios->count++;
+ ummu_table_size = UBIOS_UMMU_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT);
+
+ /* init rsv mem table */
+ rsv_mem_tables_addr = ummu_tables_addr + ALIGN_UP(ummu_table_size, UB_ALIGNMENT);
+ rsv_mem_table = (DtsRsvMemTable *)((uint8_t *)(ummu_table) +
+ ALIGN_UP(ummu_table_size, UB_ALIGNMENT));
+ ubios->tables[ubios->count] = rsv_mem_tables_addr;
+ ub_init_ubios_rsv_mem_table(rsv_mem_table, vms);
+ ubios->count++;
+
+ cpu_physical_memory_unmap(ubios, size, true, size);
+}
+
+void ub_set_ubinfo_in_ubc_table(VirtMachineState *vms)
+{
+ uint64_t ubios_info_tables = vms->memmap[VIRT_UBIOS_INFO_TABLE].base;
+ uint64_t total_size = ROUND_UP(UBIOS_TABLE_SIZE, 4 * KiB);
+ uint64_t size = total_size;
+ UBBus *bus = vms->ub_bus;
+
+ if (!bus) {
+ qemu_log("there is no ub bus\n");
+ return;
+ }
+
+ BusControllerState *ubc = container_of_ubbus(bus);
+ UbGuid guid = ubc->ubc_dev->parent.guid;
+ DtsRootTable *ubios = (DtsRootTable *)cpu_physical_memory_map(ubios_info_tables,
+ &size, true);
+ DtsSubUbcTable *ubc_table = (DtsSubUbcTable *)(ubios + 1);
+ UbcNode *ubc_node = (UbcNode *)ubc_table->node;
+
+ if (!ubios || size != total_size) {
+ if (ubios) {
+ cpu_physical_memory_unmap(ubios, size, true, size);
+ }
+ qemu_log("cpu_physical_memory_map failed, size %lu total %lu ptr %p\n",
+ size, total_size, ubios);
+ return;
+ }
+ /* The virtual machine currently supports only one ub controller. */
+ ubc_node->ubc_info = guid;
+
+ cpu_physical_memory_unmap(ubios, size, true, size);
+}
+
+void build_ubrt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
+{
+ /* 3 subtables: ubc, ummu, UB Reserved Memory */
+ uint8_t table_cnt = 3;
+ uint64_t ubios_info_tables = vms->memmap[VIRT_UBIOS_INFO_TABLE].base;
+ uint64_t ubc_tables_addr = ubios_info_tables + UBIOS_INFO_TABLE_SIZE;
+ uint64_t ubc_table_size = UBIOS_UBC_TABLE_SIZE(UBIOS_UBC_TABLE_CNT);
+ uint64_t ummu_tables_addr = ubc_tables_addr + ALIGN_UP(ubc_table_size, UB_ALIGNMENT);
+ uint64_t ummu_table_size = UBIOS_UMMU_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT);
+ uint64_t rsv_mem_tables_addr = ummu_tables_addr + ALIGN_UP(ummu_table_size, UB_ALIGNMENT);
+ AcpiTable table = { .sig = "UBRT", .rev = 0, .oem_id = vms->oem_id,
+ .oem_table_id = vms->oem_table_id };
+
+ acpi_table_begin(&table, table_data);
+ build_append_int_noprefix(table_data, table_cnt, 4);
+
+ build_append_int_noprefix(table_data, ACPI_UB_TABLE_TYPE_BUS_CONTROLLER, 1);
+ build_append_int_noprefix(table_data, 0, 7);
+ build_append_int_noprefix(table_data, ubc_tables_addr, 8);
+
+ build_append_int_noprefix(table_data, ACPI_UB_TABLE_TYPE_UMMU, 1);
+ build_append_int_noprefix(table_data, 0, 7);
+ build_append_int_noprefix(table_data, ummu_tables_addr, 8);
+
+ build_append_int_noprefix(table_data, ACPI_UB_TABLE_TYPE_RSV_MEM, 1);
+ build_append_int_noprefix(table_data, 0, 7);
+ build_append_int_noprefix(table_data, rsv_mem_tables_addr, 8);
+
+ acpi_table_end(linker, &table);
+ qemu_log("init UBRT: ubc_tbl=0x%lx, ummu_tbl=0x%lx, rsv_mem_tbl=0x%lx\n",
+ ubc_tables_addr, ummu_tables_addr, rsv_mem_tables_addr);
+}
+
+void acpi_dsdt_add_ub(Aml *scope)
+{
+ Aml *dev_ubc = aml_device("UBC0");
+ Aml *dev_ummu = aml_device("UMU0");
+ Aml *dev_pmu = aml_device("PMU0");
+
+ aml_append(dev_ubc, aml_name_decl("_HID", aml_string("HISI0541")));
+ aml_append(dev_ubc, aml_name_decl("_UID", aml_int(0)));
+ aml_append(scope, dev_ubc);
+
+ aml_append(dev_ummu, aml_name_decl("_HID", aml_string("HISI0551")));
+ aml_append(dev_ummu, aml_name_decl("_UID", aml_int(0)));
+ aml_append(scope, dev_ummu);
+
+ aml_append(dev_pmu, aml_name_decl("_HID", aml_string("HISI0571")));
+ aml_append(dev_pmu, aml_name_decl("_UID", aml_int(0)));
+ aml_append(scope, dev_pmu);
+}
+
+void acpi_iort_add_ub(GArray *table_data)
+{
+ char name_ubc[11] = "\\_SB_.UBC0";
+ char name_ummu[11] = "\\_SB_.UMU0";
+ char name_pmu[11] = "\\_SB_.PMU0";
+ int name_ubc_len = sizeof(name_ubc);
+ int name_ummu_len = sizeof(name_ummu);
+ int name_pmu_len = sizeof(name_pmu);
+
+ /* Table 16 UBC */
+ build_append_int_noprefix(table_data, 1 /* Named component */, 1); /* Type */
+ build_append_int_noprefix(table_data, 0x40, 2); /* Length */
+ build_append_int_noprefix(table_data, 0, 1); /* Revision */
+ build_append_int_noprefix(table_data, 0, 4); /* Identifier */
+ build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */
+ build_append_int_noprefix(table_data, 0x2c, 4); /* Reference to ID Array */
+ /* Named component specific data */
+ build_append_int_noprefix(table_data, 0, 4); /* Node Flags */
+ build_append_int_noprefix(table_data, 0, 4); /* Memory access properties: Cache Coherency */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Hints */
+ build_append_int_noprefix(table_data, 0, 2); /* Memory access properties: Reserved */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Memory Flags */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory Size Limit */
+ g_array_append_vals(table_data, name_ubc, name_ubc_len); /* Device object name */
+ build_append_int_noprefix(table_data, 0, 4); /* Padding */
+ build_append_int_noprefix(table_data, 0, 4); /* Input base */
+ build_append_int_noprefix(table_data, 1, 4); /* Number of IDs */
+ build_append_int_noprefix(table_data, UBC_INTERRUPT_ID_START, 4); /* Output base */
+ build_append_int_noprefix(table_data, 0x30, 4); /* Output Reference */
+ build_append_int_noprefix(table_data, 1, 4); /* Flags */
+
+ /* Table 16 UMMU */
+ build_append_int_noprefix(table_data, 1 /* Named component */, 1); /* Type */
+ build_append_int_noprefix(table_data, 0x40, 2); /* Length */
+ build_append_int_noprefix(table_data, 0, 1); /* Revision */
+ build_append_int_noprefix(table_data, 0, 4); /* Identifier */
+ build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */
+ build_append_int_noprefix(table_data, 0x2c, 4); /* Reference to ID Array */
+ /* Named component specific data */
+ build_append_int_noprefix(table_data, 0, 4); /* Node Flags */
+ build_append_int_noprefix(table_data, 0, 4); /* Memory access properties: Cache Coherency */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Hints */
+ build_append_int_noprefix(table_data, 0, 2); /* Memory access properties: Reserved */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Memory Flags */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory Size Limit */
+ g_array_append_vals(table_data, name_ummu, name_ummu_len); /* Device object name */
+ build_append_int_noprefix(table_data, 0, 4); /* Padding */
+ build_append_int_noprefix(table_data, 0, 4); /* Input base */
+ build_append_int_noprefix(table_data, 1, 4); /* Number of IDs */
+ build_append_int_noprefix(table_data, UMMU_INTERRUPT_ID, 4); /* Output base */
+ build_append_int_noprefix(table_data, 0x30, 4); /* Output Reference */
+ build_append_int_noprefix(table_data, 1, 4); /* Flags */
+
+ /* Table 16 PMU */
+ build_append_int_noprefix(table_data, 1 /* Named component */, 1); /* Type */
+ build_append_int_noprefix(table_data, 0x40, 2); /* Length */
+ build_append_int_noprefix(table_data, 0, 1); /* Revision */
+ build_append_int_noprefix(table_data, 0, 4); /* Identifier */
+ build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */
+ build_append_int_noprefix(table_data, 0x2c, 4); /* Reference to ID Array */
+ /* Named component specific data */
+ build_append_int_noprefix(table_data, 0, 4); /* Node Flags */
+ build_append_int_noprefix(table_data, 0, 4); /* Memory access properties: Cache Coherency */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Hints */
+ build_append_int_noprefix(table_data, 0, 2); /* Memory access properties: Reserved */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory access properties: Memory Flags */
+ build_append_int_noprefix(table_data, 0, 1); /* Memory Size Limit */
+ g_array_append_vals(table_data, name_pmu, name_pmu_len); /* Device object name */
+ build_append_int_noprefix(table_data, 0, 4); /* Padding */
+ build_append_int_noprefix(table_data, 0, 4); /* Input base */
+ build_append_int_noprefix(table_data, 1, 4); /* Number of IDs */
+ build_append_int_noprefix(table_data, UMMU_INTERRUPT_ID + 1, 4); /* Output base */
+ build_append_int_noprefix(table_data, 0x30, 4); /* Output Reference */
+ build_append_int_noprefix(table_data, 1, 4); /* Flags */
+}
diff --git a/hw/ub/ub_config.c b/hw/ub/ub_config.c
new file mode 100644
index 0000000000000000000000000000000000000000..32ae6b91e4ac4c70f649b8220fcf7a72bb8f3d32
--- /dev/null
+++ b/hw/ub/ub_config.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+#include "qemu/osdep.h"
+#include "qemu/module.h"
+#include "hw/arm/virt.h"
+#include "hw/qdev-properties.h"
+#include "hw/ub/ub.h"
+#include "hw/ub/ub_bus.h"
+#include "hw/ub/ub_ubc.h"
+#include "hw/ub/ub_config.h"
+#include "qemu/log.h"
+#include "migration/vmstate.h"
+#include "qapi/error.h"
+
+UbCfgAddrMapEntry *g_ub_cfg_addr_map_table = NULL;
+uint32_t g_emulated_ub_cfg_size;
+
+uint64_t ub_cfg_slice_start_offset[UB_CFG_EMULATED_SLICES_NUM] = {
+ [CFG0_BASIC] = 0x0,
+ [CAP1_RSV] = 0x100,
+ [CAP2_SHP] = 0x200,
+ [CAP3_ERR_RECORD] = 0x300,
+ [CAP4_ERR_INFO] = 0x400,
+ [CAP5_EMQ] = 0x500,
+ [CFG1_BASIC] = 0x10000,
+ [CAP1_DECODER] = 0x10100,
+ [CAP2_JETTY] = 0x10200,
+ [CAP3_INT_TYPE1] = 0x10300,
+ [CAP4_INT_TYPE2] = 0x10400,
+ [CAP5_RSV] = 0x10500,
+ [CAP6_UB_MEM] = 0x10600,
+ [CFG0_PORT_BASIC] = 0x20000,
+ [CFG0_ROUTE_TABLE] = 0xF0000000,
+};
+
+static void ub_cfg_display_addr_map_table(void)
+{
+ int i;
+
+ for (i = 0; i < UB_CFG_SLICE_NUMS; i++) {
+ qemu_log("map_table[%d]---start_addr: 0x%lx, mapped_offset: 0x%lx\n", i,
+ g_ub_cfg_addr_map_table[i].start_addr, g_ub_cfg_addr_map_table[i].mapped_offset);
+ }
+}
+
+int ub_cfg_addr_map_table_init(void)
+{
+ int i, idx;
+
+ /* used in all qemu lifecycle, be freed when qemu exit */
+ g_ub_cfg_addr_map_table = malloc(UB_CFG_SLICE_NUMS * sizeof(UbCfgAddrMapEntry));
+ if (!g_ub_cfg_addr_map_table) {
+ qemu_log("failed to malloc for g_ub_cfg_addr_map_table\n");
+ return -1;
+ }
+
+ /* fill general slice map table */
+ for (i = 0; i < UB_CFG_GENERAL_SLICES_NUM; i++) {
+ g_ub_cfg_addr_map_table[i].start_addr = ub_cfg_slice_start_offset[i];
+ g_ub_cfg_addr_map_table[i].start_addr *= UB_CFG_START_OFFSET_GRANU;
+ g_ub_cfg_addr_map_table[i].mapped_offset = i * UB_CFG_SLICE_SIZE;
+ }
+
+ /* fill port info slice map table */
+ for (i = 0; i < UB_DEV_MAX_NUM_OF_PORT; i++) {
+ idx = UB_CFG_GENERAL_SLICES_NUM + i;
+ g_ub_cfg_addr_map_table[idx].start_addr = ub_cfg_slice_start_offset[CFG0_PORT_BASIC];
+ g_ub_cfg_addr_map_table[idx].start_addr *= UB_CFG_START_OFFSET_GRANU;
+ g_ub_cfg_addr_map_table[idx].start_addr += i * UB_PORT_SZ;
+ g_ub_cfg_addr_map_table[idx].mapped_offset = idx * UB_CFG_SLICE_SIZE;
+ }
+
+ /* fill route table slice map table */
+ idx = UB_CFG_GENERAL_SLICES_NUM + UB_DEV_MAX_NUM_OF_PORT;
+ g_ub_cfg_addr_map_table[idx].start_addr = ub_cfg_slice_start_offset[CFG0_ROUTE_TABLE];
+ g_ub_cfg_addr_map_table[idx].start_addr *= UB_CFG_START_OFFSET_GRANU;
+ g_ub_cfg_addr_map_table[idx].mapped_offset = idx * UB_CFG_SLICE_SIZE;
+
+ g_emulated_ub_cfg_size = UB_CFG_SLICE_NUMS * UB_CFG_SLICE_SIZE;
+ qemu_log("each ub-dev emulated ub cfg size is 0x%x bytes\n", g_emulated_ub_cfg_size);
+
+ return 0;
+}
+
+uint32_t ub_emulated_config_size(void)
+{
+ return g_emulated_ub_cfg_size;
+}
+
+uint64_t ub_cfg_offset_to_emulated_offset(uint64_t offset, bool check_success)
+{
+ uint64_t emulate_offset = UINT64_MAX;
+ int i;
+ uint64_t diff;
+
+ for (i = 0; i < UB_CFG_SLICE_NUMS; i++) {
+ if (offset < g_ub_cfg_addr_map_table[i].start_addr) {
+ break;
+ }
+
+ diff = offset - g_ub_cfg_addr_map_table[i].start_addr;
+ if (diff >= UB_CFG_SLICE_SIZE) {
+ continue;
+ }
+
+ emulate_offset = g_ub_cfg_addr_map_table[i].mapped_offset + diff;
+ break;
+ }
+
+ if (check_success) {
+ if (emulate_offset == UINT64_MAX) {
+ ub_cfg_display_addr_map_table();
+ qemu_log("failed to convert offset 0x%lx to emulated offset\n", offset);
+ }
+ assert(emulate_offset != UINT64_MAX);
+ }
+
+ return emulate_offset;
+}
\ No newline at end of file
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index a621844eb3905ed3d064774b0c086962bdedd0a4..7f0d3ed39d2209a6977f74a80eaadf906750b23a 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -155,6 +155,14 @@ enum {
VIRT_HIGH_GIC_REDIST2 = VIRT_LOWMEMMAP_LAST,
VIRT_HIGH_PCIE_ECAM,
VIRT_HIGH_PCIE_MMIO,
+#ifdef CONFIG_UB
+ VIRT_HIGH_UB_MMIO,
+ VIRT_UB_IDEV_ERS,
+ VIRT_UBC_BASE_REG,
+ VIRT_UBIOS_INFO_TABLE,
+ VIRT_UB_MEM_CC,
+ VIRT_UB_MEM_NC,
+#endif // CONFIG_UB
};
typedef enum VirtIOMMUType {
@@ -221,6 +229,15 @@ struct VirtMachineState {
bool highmem_mmio;
bool highmem_redists;
#ifdef CONFIG_UB
+ bool highmem_ub_mmio;
+ bool highmem_idev_ers;
+ bool highmem_ubc_base_reg;
+ bool highmem_ubios_info_table;
+ bool highmem_ub_mem_cc;
+ bool highmem_ub_mem_nc;
+ bool ummu;
+ bool ub_cluster_mode;
+ bool fm_deployment;
UBBus *ub_bus;
#endif // CONFIG_UB
bool its;
diff --git a/include/hw/ub/hisi/ubc.h b/include/hw/ub/hisi/ubc.h
index fdaeae7b3e2ecc43b5b9300fbfe316cfa7d60284..f9201741a92385f6d6c36abc640e9c0f286f203b 100644
--- a/include/hw/ub/hisi/ubc.h
+++ b/include/hw/ub/hisi/ubc.h
@@ -45,4 +45,358 @@
#define UBC_INTERRUPT_ID_CNT 0x1000
#define VENDER_ID_HUAWEI 0xCC08
+/*
+ * Local Register layout
+ *
+ * +-----------------------------+
+ * | rsv 15M |
+ * +-----------------------------+
+ * | 16th 1M CCUM |
+ * +-----------------------------+ 0xf00_0000
+ * | 15th 16M UMMU |
+ * +-----------------------------+ 0xe00_0000
+ * | 14th 16M NL4 |
+ * +-----------------------------+ 0xd00_0000
+ * | 13th 16M BA4 |
+ * +-----------------------------+ 0xc00_0000
+ * | 12th 16M NL3 |
+ * +-----------------------------+ 0xb00_0000
+ * | 11th 16M BA3 |
+ * +-----------------------------+ 0xa00_0000
+ * | 10th 16M NL2 |
+ * +-----------------------------+ 0x900_0000
+ * | 9th 16M BA2 |
+ * +-----------------------------+ 0x800_000
+ * | 8th 16M NL1 |
+ * +-----------------------------+ 0x700_0000
+ * | 7th 16M BA1 |
+ * +-----------------------------+ 0x600_0000
+ * | 6th 16M NL0 |
+ * +-----------------------------+ 0x500_0000
+ * | 5th 16M TA |
+ * +-----------------------------+ 0x400_0000
+ * | 4th 16M BA0 |
+ * +-----------------------------+ 0x300_0000
+ * | 3th 16M TP |
+ * +-----------------------------+ 0x200_0000
+ * | 2rd 16M MISC |
+ * +-----------------------------+ 0x100_0000
+ * | 1st 16M |
+ * +-----------------------------+ 0x000_0000
+ */
+#define FST_OFFSET 0x0000000
+#define MISC_OFFSET 0x1000000
+#define TP_OFFSET 0x2000000
+#define BA0_OFFSET 0x3000000
+#define TA_OFFSET 0x4000000
+#define NL0_OFFSET 0x5000000
+#define BA1_OFFSET 0x6000000
+#define NL1_OFFSET 0x7000000
+#define BA2_OFFSET 0x8000000
+#define NL2_OFFSET 0x9000000
+#define BA3_OFFSET 0xa000000
+#define NL3_OFFSET 0xb000000
+#define BA4_OFFSET 0xc000000
+#define NL4_OFFSET 0xd000000
+#define UMMU_OFFSET 0xe000000
+#define CCUM_OFFSET 0xf000000
+#define LOCAL_REG_TYPE_SHIFT 24
+#define LOCAL_REG_TYPE_MASK GENMASK_ULL(27, 24)
+#define LOCAL_REG_MEMBER_MASK GENMASK_ULL(23, 0)
+#define LOCAL_REG_ADDR_2_TYPE(addr) (((addr) & LOCAL_REG_TYPE_MASK) >> LOCAL_REG_TYPE_SHIFT)
+#define BA_REG_MEMBER_MASK GENMASK_ULL(19, 16)
+#define BA_REG_DATA_MASK GENMASK_ULL(15, 0)
+#define TOP_REG_OFFSET (0x00 * 64 * KiB)
+#define RXDMA_OFFSET (0x01 * 64 * KiB)
+#define TXDMA_OFFSET (0x02 * 64 * KiB)
+#define MASTER_OFFSET (0x03 * 64 * KiB)
+#define LSAD_OFFSET (0x04 * 64 * KiB)
+#define SMAP_OFFSET (0x0a * 64 * KiB)
+#define P2P_OFFSET (0x0c * 64 * KiB)
+#define MAR_OFFSET (0x0d * 64 * KiB)
+#define MAR_DECODE_OFFSET (0x0e * 64 * KiB) /* mem decoder table */
+#define UB_RAS_OFFSET (0x12 * 64 * KiB)
+#define CCUA_OFFSET (0x14 * 64 * KiB)
+
+/* references LinQuickCV100_UBOMMU_nManager */
+#define TP_UBOMMU0_OFFSET 0x180000
+#define TP_UBOMMU1_OFFSET 0x190000
+#define TP_UBOMMU2_OFFSET 0x1c0000
+#define TP_UBOMMU3_OFFSET 0x1d0000
+#define TP_UBOMMU4_OFFSET 0x1e0000
+#define TP_UBOMMU5_OFFSET 0x1f0000
+#define TP_UBOMMU0_SELF_REG_OFFSET 0
+#define TP_UBOMMU0_PMCG_OFFSET 0x3000
+#define TP_UBOMMU0_PROTOCOL_OFFSET 0x4000
+#define TP_UBOMMU0_CMDQ_OFFSET 0xc000
+#define TP_UBOMMU0_EVENTQ_OFFSET 0xe000
+#define SELF_ICG_CFG_OFFSET 0x0
+#define UBOMMU_MEM_INIT_OFFSET 0x1804
+#define DECODER_REG_BASE (TP_OFFSET + TP_UBOMMU0_OFFSET + \
+ TP_UBOMMU0_SELF_REG_OFFSET) /* 0x2180000 */
+#define CMDQ_BASE_ADDR (TP_OFFSET + TP_UBOMMU0_OFFSET + \
+ TP_UBOMMU0_CMDQ_OFFSET) /* 0x218c000 */
+#define EVTQ_BASE_ADDR (TP_OFFSET + TP_UBOMMU0_OFFSET + \
+ TP_UBOMMU0_EVENTQ_OFFSET) /* 0x218e000 */
+
+#define DECODER_SELF_ICG_CFG_OFFSET (DECODER_REG_BASE + \
+ SELF_ICG_CFG_OFFSET) /* 0x2180000 */
+#define DECODER_SELF_MEM_INIT_OFFSET (DECODER_REG_BASE + \
+ UBOMMU_MEM_INIT_OFFSET) /* 0x2181804 */
+#define DECODER_MEM_INIT_DONE_VAL 0x3F
+#define DECODER_MEM_INIT_DONE_SHIFT 16
+
+#define SQ_ADDR_L 0x0
+#define SQ_ADDR_H 0x4
+#define SQ_PI 0x8
+#define SQ_CI 0xc
+#define SQ_DEPTH 0x10
+#define SQ_STATUS 0x14
+#define RQ_ADDR_L 0x40
+#define RQ_ADDR_H 0x44
+#define RQ_PI 0x48
+#define RQ_CI 0x4c
+#define RQ_DEPTH 0x50
+#define RQ_ENTRY_SIZE 0x54
+#define RQ_STATUS 0x58
+#define CQ_ADDR_L 0x70
+#define CQ_ADDR_H 0x74
+#define CQ_PI 0x78
+#define CQ_CI 0x7c
+#define CQ_DEPTH 0x80
+#define CQ_STATUS 0x84
+#define CQ_INT_MASK 0x88
+#define CQ_INT_STATUS 0x8c
+#define CQ_INT_RO 0x90
+#define CQ_INT_SET 0x94
+#define MSGQ_RST 0xB0
+
+#define HI_MSG_SQE_PLD_SIZE 0x800 /* 2K */
+#define HI_MSG_RQE_SIZE 0x800 /* 2K */
+#define HI_MSGQ_DEPTH 16
+#define HI_SQ_CFG_DEPTH HI_MSGQ_DEPTH
+#define HI_RQ_CFG_DEPTH HI_MSGQ_DEPTH
+#define HI_CQ_CFG_DEPTH HI_MSGQ_DEPTH
+#define HI_FM_MSG_ID_MIN 128
+#define HI_FM_MSG_ID_MAX 191
+#define MAP_COMMAND_MASK 0xff
+#define HI_FM_MSG_MAX (HI_SQ_CFG_DEPTH - 1)
+#define HI_MSGQ_MAX_DEPTH 1024
+#define HI_MSGQ_MIN_DEPTH 4
+
+/*
+ * msgq sq memory layout
+ * +----------------------------+
+ * | sqe 1 |
+ * |----------------------------| 12Byte
+ * +----| payload addr(offset) |
+ * | +----------------------------+
+ * | | sqe 2 |
+ * | |----------------------------| 12Byte
+ * +-------| payload addr(offset) |
+ * | | +----------------------------+
+ * | | | ..... |
+ * | | | |
+ * | | +----------------------------+
+ * | | | sqe (depth) |
+ * | | |----------------------------| 12Byte
+ * +---------| payload addr(offset) |
+ * | | | +----------------------------+
+ * | | +--> | payload 1 | 1K
+ * | | +----------------------------+
+ * | +-----> | payload 2 | 1K
+ * | +----------------------------+
+ * | | ....... | 1K
+ * | +----------------------------+
+ * +-------> | payload (depth) | 1K
+ * +----------------------------+
+ *
+ * SQE layout
+ * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ * |31|30|29|28|27|26|25|24|23|22|21|20|19|18|17|16|15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
+ * +-----------------------------------------------+-----------------------+-----------+-----------+
+ * | payload length | msg id |submsg code| msg code |
+ * +-----------------------------------------------+-----+--+--+-----------+-----------+-----------+
+ * | rsvd |e1|e2| vl | rsvd | e1:icrc e2:local
+ * +-----------------------------------------------------+--+--+-----------+-----------------------+
+ * | payload addr |
+ * +-----------------------------------------------------------------------------------------------+
+ */
+typedef struct HiMsgSqe {
+ /* DW0 */
+ uint32_t task_type : 2;
+ uint32_t rsvd0 : 2;
+ uint32_t local : 1;
+ uint32_t dev_type : 2;
+ uint32_t icrc : 1;
+ union {
+ struct {
+ uint8_t type : 1;
+ uint8_t msg_code : 3;
+ uint8_t sub_msg_code : 4;
+ };
+ uint8_t opcode;
+ };
+ uint32_t p_len : 12;
+ uint32_t rsvd1 : 4;
+
+ /* DW1 */
+ uint32_t msn : 16;
+ uint32_t rsvd3 : 16;
+
+ /* DW2 */
+ uint32_t p_addr;
+
+ /* DW3 */
+ uint32_t rsvd2;
+} HiMsgSqe;
+#define HI_MSG_SQE_SIZE sizeof(HiMsgSqe)
+
+typedef struct HiMsgCqe {
+ /* DW0 */
+ uint32_t task_type : 2;
+ uint32_t rsvd0 : 6;
+ union {
+ struct {
+ uint8_t type : 1;
+ uint8_t msg_code : 3;
+ uint8_t sub_msg_code : 4;
+ };
+ uint8_t opcode;
+ };
+ uint32_t p_len : 12;
+ uint32_t rsvd1 : 4;
+
+ /* DW1 */
+ uint32_t msn : 16;
+ uint32_t rsvd5 : 16;
+
+ /* DW2 */
+ uint32_t rq_pi : 10;
+ uint32_t rsvd2 : 6;
+ uint32_t status : 8;
+ uint32_t rsvd3 : 8;
+
+ /* DW3 */
+ uint32_t rsvd4;
+} HiMsgCqe;
+#define HI_MSG_CQE_SIZE sizeof(HiMsgCqe)
+
+typedef struct HiMsgSqePld {
+ char packet[HI_MSG_SQE_PLD_SIZE];
+} HiMsgSqePld;
+
+typedef struct HiMsgqInfo {
+ uint64_t sq_base_addr_gpa;
+ uint64_t sq_base_addr_hva;
+ uint64_t sq_sz;
+ uint64_t cq_base_addr_gpa;
+ uint64_t cq_base_addr_hva;
+ uint64_t cq_sz;
+ uint64_t rq_base_addr_gpa;
+ uint64_t rq_base_addr_hva;
+ uint64_t rq_sz;
+} HiMsgqInfo;
+
+typedef enum HiMsgqIdx {
+ MSG_SQ = 0,
+ MSG_RQ = 1,
+ MSG_CQ = 2,
+ MSGQ_NUM
+} HiMsgqIdx_t;
+
+enum HiCqeStatus {
+ CQE_SUCCESS,
+ CQE_FAIL
+};
+
+enum HiCqSwState {
+ CQ_SW_INIT,
+ CQ_SW_HANDLED
+};
+
+struct HiMsgQueue {
+ HiMsgqIdx_t idx;
+
+ union {
+ struct HiMsgSqe *sqe;
+ void *rqe;
+ struct HiMsgCqe *cqe;
+ void *entry;
+ };
+
+ uint16_t entry_size;
+ uint8_t depth;
+ uint8_t ci;
+ uint8_t pi;
+
+ pthread_spinlock_t lock;
+};
+
+#define UB_MSG_CODE_ENUM 0x8 /* hisi private */
+enum HiEnumSubMsgCode {
+ ENUM_QUERY_REQ = 0,
+ ENUM_QUERY_RSP,
+ CNA_CFG_REQ,
+ CNA_CFG_RSP
+};
+
+enum UB_MSG_RSP_STATUS_CODE {
+ UB_MSG_RSP_SUCCESS,
+ UB_MSG_RSP_INVALID_MESSAGE,
+ UB_MSG_RSP_UPI_BEYOND_AUTH,
+ UB_MSG_RSP_INVALID_TOKEN,
+ UB_MSG_RSP_REG_ATTR_MISMATCH,
+ UB_MSG_RSP_INVALID_ADDR,
+ UB_MSG_RSP_HW_EXEC_FAILED,
+ UB_MSG_RSP_LACK_OF_EID,
+};
+
+enum HiTaskType {
+ PROTOCOL_MSG = 0,
+ PROTOCOL_ENUM = 1,
+ HISI_PRIVATE = 2
+};
+
+typedef enum HiMsgqPrivateOpcode {
+ CC_CTX_CFG_CMD = 0,
+ QUERY_UB_MEM_ROUTE_CMD = 1,
+ EU_TABLE_CFG_CMD = 2,
+ CC_CTX_QUERY_CMD = 3
+} HiMsgqPrivateOpcode;
+
+typedef enum HiEuCfgStatus {
+ EU_CFG_FAIL,
+ EU_CFG_SUCCESS
+} HiEuCfgStatus;
+
+typedef struct HiEuCfgReq {
+ uint32_t eu_msg_code : 4;
+ uint32_t cfg_entry_num : 10;
+ uint32_t tbl_cfg_mode : 1;
+ uint32_t tbl_cfg_status : 1;
+ uint32_t entry_start_id : 16;
+ uint32_t eid : 20;
+ uint32_t rsv0 : 12;
+ uint32_t upi : 16;
+ uint32_t rsv1 : 16;
+} HiEuCfgReq;
+#define HI_EU_CFG_REQ_SIZE 12
+
+typedef struct HiEuCfgRsp {
+ uint32_t eu_msg_code : 4;
+ uint32_t cfg_entry_num : 10;
+ uint32_t tbl_cfg_mode : 1;
+ uint32_t tbl_cfg_status : 1;
+ uint32_t entry_start_id : 16;
+} HiEuCfgRsp;
+#define HI_EU_CFG_RSP_SIZE 4
+
+typedef struct HiEuCfgPld {
+ union {
+ HiEuCfgReq req;
+ HiEuCfgRsp rsp;
+ };
+} HiEuCfgPld;
+
#endif
diff --git a/include/hw/ub/ub_acpi.h b/include/hw/ub/ub_acpi.h
index d3af1c78bdd21eb9638eb13cdabb66c298a70608..239e99b81081642b9d0b3a9be3f07b456fab1fe4 100644
--- a/include/hw/ub/ub_acpi.h
+++ b/include/hw/ub/ub_acpi.h
@@ -172,4 +172,10 @@ typedef struct AcpiUbrtTable {
UBIOS_UMMU_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT) + \
UBIOS_RSV_MEM_TABLE_SIZE(UBIOS_UMMU_TABLE_CNT))
+void ub_init_ubios_info_table(VirtMachineState *vms, uint64_t total_size);
+void ub_set_gpa_bits(uint8_t bits);
+void build_ubrt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms);
+void ub_set_ubinfo_in_ubc_table(VirtMachineState *vms);
+void acpi_dsdt_add_ub(Aml *scope);
+void acpi_iort_add_ub(GArray *table_data);
#endif
\ No newline at end of file
diff --git a/include/hw/ub/ub_common.h b/include/hw/ub/ub_common.h
index d52dc7e6514e77b08e57c040784e4dac26d3414c..b8a0287e5683760bdf6d37fa1d6de1ec35b9f2c8 100644
--- a/include/hw/ub/ub_common.h
+++ b/include/hw/ub/ub_common.h
@@ -285,4 +285,132 @@
#define LOOP_HELPER(macro, n) LOOP##n(macro)
#define LOOP(macro, n) LOOP_HELPER(macro, n)
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_first_bit((addr), (size)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define EID_HIGH(eid) (((eid) >> 12) & 0xff)
+#define EID_LOW(eid) ((eid) & 0xfff)
+#define EID_GEN(eid_h, eid_l) ((eid_h) << 12 | (eid_l))
+
+#define UB_ALIGNMENT 64
+
+/* Round number down to multiple */
+#define ALIGN_DOWN(n, m) ((n) / (m) * (m))
+
+/* Round number up to multiple */
+#define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define BITS_PER_LONG_LONG 64
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define DASH_SZ 3
+/* The caller is responsible for free memory. */
+char *line_generator(uint8_t len);
+enum UbMsgType {
+ MSG_REQ = 0,
+ MSG_RSP = 1
+};
+
+enum UbMsgCode {
+ UB_MSG_CODE_RAS = 0,
+ UB_MSG_CODE_LINK = 1,
+ UB_MSG_CODE_CFG = 2,
+ UB_MSG_CODE_VDM = 3,
+ UB_MSG_CODE_EXCH = 4,
+ UB_MSG_CODE_SEC = 5,
+ UB_MSG_CODE_POOL = 6,
+ UB_MSG_CODE_MAX = 7
+};
+
+struct UbLinkHeader {
+ uint32_t plen : 14;
+ uint32_t rm : 2;
+ uint32_t cfg : 4;
+ uint32_t rsvd1 : 1;
+ uint32_t vl : 4;
+ uint32_t rsvd0 : 1;
+ uint32_t crd_vl : 4;
+ uint32_t ack : 1;
+ uint32_t crd : 1;
+};
+#define UB_CLAN_LINK_CFG 6
+
+struct ClanNetworkHeader {
+ /* DW0 */
+ uint32_t dcna : 16;
+ uint32_t scna : 16;
+ /* DW1 */
+#define NTH_NLP_WITH_TPH 0
+#define NTH_NLP_WITHOUT_TPH 1
+ uint32_t nth_nlp : 3;
+ uint32_t mgmt : 1;
+ uint32_t sl : 4;
+ uint32_t lb : 8;
+ uint32_t cc : 16;
+};
+
+typedef struct MsgExtendedHeader {
+ uint32_t plen : 12;
+ uint32_t rsvd : 4;
+ uint32_t rsp_status : 8;
+ union {
+ struct {
+ uint8_t type : 1;
+ uint8_t msg_code : 3;
+ uint8_t sub_msg_code : 4;
+ };
+ uint8_t code;
+ };
+} MsgExtendedHeader;
+
+typedef struct MsgPktHeader { /* TODO, check byte order */
+ /* DW0 */
+ struct UbLinkHeader ulh;
+ /* DW1-DW2 */
+ struct ClanNetworkHeader nth;
+ /* DW3 */
+ uint32_t seid_h : 8;
+ uint32_t upi : 16;
+#define CTPH_NLP_UPI_40BITS_UEID 2
+ uint32_t ctph_nlp : 4; /* tp header */
+ uint32_t pad : 2;
+#define CTPH_OPCODE_NOT_CNP 0
+ uint32_t tp_opcode : 2;
+ /* DW4 */
+ uint32_t deid : 20;
+ uint32_t seid_l : 12;
+ /* DW5 */
+ uint32_t src_tassn : 16;
+ uint32_t taver : 3;
+ uint32_t tk_vld : 1;
+ uint32_t udf : 4;
+#define TAH_OPCODE_MSG 0x14
+ uint32_t ta_opcode : 8;
+ /* DW6 */
+ uint32_t sjetty : 20;
+ uint32_t sjt_type : 2;
+ uint32_t rsv0 : 3;
+ uint32_t retry : 1;
+ uint32_t se : 1;
+ uint32_t jetty_en : 1;
+ uint32_t rsv1 : 1;
+ uint32_t odr : 3;
+ /* DW7 */
+ struct MsgExtendedHeader msgetah;
+
+ /* DW8~DW11 */
+ char payload[0]; /* payload */
+} MsgPktHeader;
+#define MSG_PKT_HEADER_SIZE 32
+
#endif
diff --git a/include/hw/ub/ub_config.h b/include/hw/ub/ub_config.h
new file mode 100644
index 0000000000000000000000000000000000000000..05b2c19c57a469f6156999f3f999e855347b0a5a
--- /dev/null
+++ b/include/hw/ub/ub_config.h
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+
+#ifndef UB_CONFIG_H
+#define UB_CONFIG_H
+
+#include "hw/ub/hisi/ubc.h"
+#include "hw/ub/ub_common.h"
+#include "hw/ub/ub.h"
+#include "qemu/units.h"
+
+enum UbCfgEmulatedSlice {
+ CFG0_BASIC = 0,
+ /* CFG0_CAP START */
+ CAP1_RSV,
+ CAP2_SHP,
+ CAP3_ERR_RECORD,
+ CAP4_ERR_INFO,
+ CAP5_EMQ,
+ /* CFG0_CAP END */
+ CFG1_BASIC,
+ /* CFG1_CAP START */
+ CAP1_DECODER,
+ CAP2_JETTY,
+ CAP3_INT_TYPE1,
+ CAP4_INT_TYPE2,
+ CAP5_RSV,
+ CAP6_UB_MEM,
+ /* CFG1_CAP END */
+ UB_CFG_GENERAL_SLICES_NUM,
+
+ /* dont add new here */
+ CFG0_PORT_BASIC,
+ CFG0_ROUTE_TABLE,
+ UB_CFG_EMULATED_SLICES_NUM,
+ /* dont add new here */
+};
+
+/* In UB spec, route table slice is 1GB, in virtualization,
+ * route table is not used. To redece mem overhead, the route
+ * table also emulated with 1k slice, so add 1 extra */
+#define UB_CFG_SLICE_NUMS (UB_CFG_GENERAL_SLICES_NUM + UB_DEV_MAX_NUM_OF_PORT + 1)
+#define UB_CFG_START_OFFSET_GRANU 4
+#define UB_CFG_SLICE_SIZE (1 * KiB)
+
+typedef struct UbCfgAddrMapEntry {
+ uint64_t start_addr;
+ uint64_t mapped_offset;
+} UbCfgAddrMapEntry;
+
+int ub_cfg_addr_map_table_init(void);
+
+enum UbCfgSubMsgCode {
+ UB_CFG0_READ = 0,
+ UB_CFG0_WRITE = 1,
+ UB_CFG1_READ = 2,
+ UB_CFG1_WRITE = 3,
+ UB_CFG_MAX_SUB_MSG_CODE,
+};
+
+typedef struct CfgMsgPldReq {
+ /* DW0 */
+ uint32_t rsvd0 : 4;
+ uint32_t byte_enable : 4;
+ uint32_t rsvd1 : 8;
+ uint32_t entity_idx : 16;
+
+ /* DW1 */
+ uint32_t req_addr;
+
+ /* DW2 */
+ uint32_t rsvd2;
+ /* DW3 */
+ uint32_t write_data;
+} CfgMsgPldReq;
+
+typedef struct CfgMsgPldRsp {
+ /* DW0 */
+ uint32_t read_data;
+ /* DW1 */
+ uint32_t rsvd1;
+ /* DW2 */
+ uint32_t rsvd2;
+ /* DW3 */
+ uint32_t rsvd3;
+} CfgMsgPldRsp;
+
+typedef struct CfgMsgPld {
+ union {
+ CfgMsgPldReq req;
+ CfgMsgPldRsp rsp;
+ };
+} CfgMsgPld;
+#define CFG_MSG_PLD_SIZE 16
+#define MSG_CFG_PKT_SIZE (MSG_PKT_HEADER_SIZE + CFG_MSG_PLD_SIZE) /* header 32bytes, pld 16bytes */
+
+void handle_msg_cfg(void *opaque, HiMsgSqe *sqe, void *payload);
+enum UbCfgBlockType {
+ UB_CFG0_BASIC_BLOCK_TYPE = 0,
+ UB_CFG_ROUTING_BLOCK_TYPE = 1,
+ UB_CFG_CAP_BLOCK_TYPE = 2,
+ UB_CFG_PORT_BLOCK_TYPE = 3,
+ UB_CFG_VD_BLOCK_TYPE = 4,
+ UB_CFG1_BASIC_BLOCK_TYPE = 5,
+ UB_CFG_BLOCK_NUMS
+};
+
+typedef struct CfgMsgPkt {
+ MsgPktHeader header;
+ CfgMsgPld pld;
+} CfgMsgPkt;
+
+typedef struct __attribute__ ((__packed__)) ConfigNetAddrInfo {
+ uint32_t primary_cna : 24; /* 0x1A */
+ uint32_t rsv : 8;
+ uint32_t rsv1; /* 0x1B */
+ uint32_t rsv2; /* 0x1C */
+ uint32_t rsv3; /* 0x1D */
+ uint32_t rsv4; /* 0x1E */
+} ConfigNetAddrInfo;
+
+typedef struct SliceHeader {
+ uint32_t slice_version : 4;
+ uint32_t slice_used_size : 28;
+} SliceHeader;
+
+typedef struct __attribute__ ((__packed__)) Cfg0SupportFeature {
+ union {
+ uint32_t rsv[4];
+ struct {
+ uint8_t entity_available : 1;
+ uint8_t mtu_supported : 3;
+ uint8_t route_table_supported : 1;
+ uint8_t upi_supported : 1;
+ uint8_t broker_supported : 1;
+ uint8_t switch_supported : 1;
+ uint8_t rsv : 1;
+ uint8_t cc_supported : 1;
+ } bits;
+ };
+} Cfg0SupportFeature;
+
+typedef struct __attribute__ ((__packed__)) UbEid {
+ uint32_t dw0;
+ uint32_t dw1;
+ uint32_t dw2;
+ uint32_t dw3;
+} UbEid;
+
+#define CAP_BITMAP_LEN 32
+#define RSV_LEN 4
+typedef struct __attribute__ ((__packed__)) UbCfg0Basic {
+ /* dw0 */
+ SliceHeader header; // RO
+ /* dw1 */
+ uint16_t total_num_of_port; // RO
+ uint16_t total_num_of_ue; // RO
+ /* dw2~dw9 */
+ uint8_t cap_bitmap[CAP_BITMAP_LEN]; // RO
+ /* dw10~dw14 */
+ Cfg0SupportFeature support_feature; // RO
+ /* dw14~dw17 */
+ UbGuid guid; // RO
+ /* dw18~dw21 */
+ UbEid eid; // RW
+ /* dw22~dw25 */
+ UbEid fm_eid;
+ /* dw26~dw30 */
+ ConfigNetAddrInfo net_addr_info; // RW
+ /* dw31~dw44 */
+ uint32_t upi : 16; // RW
+ uint32_t rsv1 : 16;
+ uint32_t module_id : 16; // HwInit
+ uint32_t vendor_id : 16;
+ uint32_t dev_rst : 1; // RW
+ uint32_t rsv3 : 31;
+ uint32_t rsv4;
+ uint32_t mtu_cfg : 3; // RW
+ uint32_t rsv5 : 29;
+ uint32_t cc_en : 1; // RW
+ uint32_t rsv6 : 31;
+ uint32_t th_en : 1; // RW
+ uint32_t rsv7 : 31;
+ uint32_t fm_cna : 24; // RW
+ uint32_t rsv8 : 8;
+ uint64_t ueid_low; // RW
+ uint64_t ueid_high; // RW
+ uint32_t ucna : 24; // RW
+ uint32_t rsv9 : 8;
+ uint32_t rsv10;
+} UbCfg0Basic;
+
+typedef struct __attribute__ ((__packed__)) UbSlotInfo {
+ /* dw2 */
+ uint8_t pps : 1;
+ uint8_t wlps : 1;
+ uint8_t plps : 1;
+ uint8_t pdss : 1;
+ uint8_t pwcs : 1;
+ uint32_t rsv1 : 27;
+ /* dw3 */
+ uint16_t start_port_idx;
+ uint16_t end_port_idx;
+ /* dw4~dw10 */
+ uint8_t pp_ctrl : 1;
+ uint32_t rsv2 : 31;
+ uint8_t wl_ctrl : 2;
+ uint32_t rsv3 : 30;
+ uint8_t pl_ctrl : 2;
+ uint32_t rsv4 : 30;
+ uint8_t ms_ctrl : 1;
+ uint32_t rsv5 : 31;
+ uint8_t pd_ctrl : 1;
+ uint32_t rsv6 : 31;
+ uint8_t pds_ctrl : 1;
+ uint32_t rsv7 : 31;
+ uint8_t pw_ctrl : 1;
+ uint32_t rsv8 : 31;
+ /* dw11~dw13 */
+ uint8_t pp_st : 1;
+ uint32_t rsv9 : 31;
+ uint8_t pd_st : 1;
+ uint32_t rsv10 : 31;
+ uint8_t pdsc_st : 1;
+ uint32_t rsv11 : 31;
+ /* dw14~dw17 */
+ uint32_t rsv[2];
+} UbSlotInfo;
+
+typedef struct __attribute__ ((__packed__)) UbCfg0ShpCap {
+ /* dw0 */
+ SliceHeader header; // RO
+ /* dw1 */
+ uint16_t slot_num; // RO
+ uint16_t rsv1;
+ /* dw2 ~ */
+ UbSlotInfo slot_info[0]; // RO
+} UbCfg0ShpCap;
+
+typedef struct __attribute__ ((__packed__)) ErrorMsgQueCtrl {
+ uint64_t correctable_err_report_enable : 1;
+ uint64_t uncorrectable_nonfatal_err_report_enable : 1;
+ uint64_t uncorrectable_fatal_err_report_enable : 1;
+ uint64_t rsv_1 : 5;
+ uint64_t interrupt_generation_enable : 1 ;
+ uint64_t rsv_2 : 55;
+} ErrorMsgQueCtrl;
+
+typedef struct __attribute__ ((__packed__)) UbCfg0EmqCap {
+ /* dw0 */
+ uint64_t segment_header;
+ ErrorMsgQueCtrl error_msg_que_ctrlr;
+} UbCfg0EmqCap;
+
+typedef struct __attribute__ ((__packed__)) Cfg1SupportFeature {
+ union {
+ uint32_t rsv[4];
+ struct {
+ uint8_t rsv1 : 2;
+ uint8_t mgs : 1;
+ uint8_t rsv2 : 2;
+ uint8_t ubbas : 1;
+ uint8_t ers0s : 1;
+ uint8_t ers1s : 1;
+ uint8_t ers2s : 1;
+ uint8_t cdmas : 1;
+ uint8_t matt_juris : 1;
+ } bits;
+ };
+} Cfg1SupportFeature;
+
+typedef struct __attribute__ ((__packed__)) UbCfg1DecoderCap {
+ /* dw0 */
+ SliceHeader header;
+#define DECODER_CAP_EVENT_SIZE 5
+#define DECODER_CAP_CMD_SIZE 5
+#define DECODER_CAP_MMIO_SIZE 7
+ /* dw1 */
+ struct {
+ uint16_t rsv1 : 4;
+ uint16_t event_size_sup : 4;
+ uint16_t rsv2 : 4;
+ uint16_t cmd_size_sup : 4;
+ uint16_t mmio_size_sup : 3;
+ uint16_t rsv3 : 13;
+ } decoder;
+ /* dw2 */
+ struct {
+ uint32_t decoder_en : 1;
+ uint32_t rsv : 31;
+ } decoder_ctrl;
+ /* dw3-4 */
+ uint64_t dec_matt_ba;
+ /* dw5-6 */
+ uint64_t dec_mmio_ba;
+ /* dw7 */
+ uint32_t dev_usi_idx;
+ /* dw 8-0xf */
+#define DECODER_CAP_RESERVED1_BYTES 8
+ uint32_t rsv1[DECODER_CAP_RESERVED1_BYTES];
+ /* dw 0x10 */
+ struct {
+ uint32_t cmdq_en : 1;
+ uint32_t rsv1 : 7;
+ uint32_t cmdq_size_use : 4;
+ uint32_t rsv2 : 20;
+ } decoder_cmdq_cfg;
+ /* dw 0x11 */
+ struct {
+ uint32_t cmdq_wr_idx : 11;
+ uint32_t rsv1 : 5;
+ uint32_t cmdq_err_resp : 1;
+ uint32_t rsv2 : 15;
+ } decoder_cmdq_prod;
+ /* dw 0x12 */
+ struct {
+ uint32_t cmdq_rd_idx : 11;
+ uint32_t rsv1 : 5;
+ uint32_t cmdq_err : 1;
+ uint32_t cmdq_err_res : 3;
+ uint32_t rsv2 : 12;
+ } decoder_cmdq_cons;
+ /* dw 0x13-0x14 */
+ struct {
+ uint64_t rsv1 : 6;
+ uint64_t cmdq_ba : 42;
+ uint64_t rsv2 : 16;
+ } decoder_cmdq_ba;
+ /* dw 0x15-0x1f */
+#define DECODER_CAP_RESERVED2_BYTES 11
+ uint32_t rsv2[DECODER_CAP_RESERVED2_BYTES];
+ /* dw 0x20 */
+ struct {
+ uint32_t evtq_en : 1;
+ uint32_t rsv1 : 7;
+ uint32_t evtq_size_use : 4;
+ uint32_t rsv2 : 20;
+ } decoder_evtq_cfg;
+ /* dw 0x21 */
+ struct {
+ uint32_t evtq_wr_idx : 11;
+ uint32_t rsv : 20;
+ uint32_t evtq_ovrl_err : 1;
+ } decoder_evtq_prod;
+ /* dw 0x22 */
+ struct {
+ uint32_t evtq_rd_idx : 11;
+ uint32_t rsv : 20;
+ uint32_t evtq_ovrl_err_resp : 1;
+ } decoder_evtq_cons;
+ /* dw 0x23 */
+ struct {
+ uint64_t rsv1 : 6;
+ uint64_t evtq_ba : 42;
+ uint64_t rsv2 : 16;
+ } decoder_evtq_ba;
+} UbCfg1DecoderCap;
+
+typedef struct __attribute__ ((__packed__)) UbCfg1IntType1Cap {
+ /* dw0 */
+ SliceHeader header;
+ /* dw1 */
+ uint32_t interrupt_enable : 1;
+ uint32_t rsv1 : 31;
+ /* dw2 */
+ uint32_t support_int_num : 3;
+ uint32_t rsv2 : 29;
+ /* dw3 */
+ uint32_t interrupt_enable_num : 3;
+ uint32_t rsv3 : 29;
+ /* dw4 */
+ uint32_t interrupt_data;
+ /* dw5-dw6 */
+ uint64_t interrupt_address;
+ /* dw7 */
+ uint32_t interrupt_id;
+ /* dw8 */
+ uint32_t interrupt_mask;
+ /* dw9 */
+ uint32_t interrupt_pending;
+} UbCfg1IntType1Cap;
+
+typedef struct __attribute__ ((__packed__)) UbCfg1IntType2Cap {
+ /* dw0 */
+ SliceHeader header;
+ /* dw1 */
+ uint16_t vec_table_num;
+ uint16_t add_table_num;
+ /* dw2 ~ dw8 */
+ uint64_t vec_table_start_addr;
+ uint64_t add_table_start_addr;
+ uint64_t pend_table_start_addr;
+ uint32_t interrupt_id;
+ uint32_t interrupt_mask : 1;
+ uint32_t rsv1 : 31;
+ uint32_t interrupt_enable : 1;
+ uint32_t rsv2 : 31;
+} UbCfg1IntType2Cap;
+
+typedef struct __attribute__ ((__packed__)) UbCfg1Basic {
+ /* dw0 */
+ SliceHeader header; // RO
+ /* dw1~dw8 */
+ uint8_t cap_bitmap[CAP_BITMAP_LEN]; // RO
+ /* dw9~dw12 */
+ Cfg1SupportFeature support_feature; // RO
+ /* dw13~dw42 */
+ uint32_t ers_space_size[UB_NUM_REGIONS];
+ uint64_t ers_start_addr[UB_NUM_REGIONS];
+ uint64_t ers_ubba[UB_NUM_REGIONS];
+ uint32_t elr : 1;
+ uint32_t rsv1 : 31;
+ uint32_t elr_done : 1;
+ uint32_t rsv2 : 31;
+ uint32_t mig_ctrl : 8;
+ uint32_t rsv3 : 24;
+ uint32_t mig_status : 8;
+ uint32_t rsv4 : 24;
+ uint32_t ers_att : 3;
+ uint32_t rsv5 : 29;
+ uint32_t sys_pgs : 1;
+ uint32_t rsv6 : 31;
+ uint64_t eid_upi_tab;
+ uint32_t eid_upi_ten;
+ uint64_t rsv7;
+ uint64_t rsv8;
+ uint32_t class_code : 16;
+ uint32_t rsv9 : 16;
+ uint32_t tpid_num : 16;
+ uint32_t rsv10 : 16;
+ uint32_t ctp_tb_bypass : 1;
+ uint32_t rsv11 : 31;
+ uint32_t crystal_dma_en : 1;
+ uint32_t rsv12 : 31;
+ uint32_t dev_token_id : 20;
+ uint32_t rsv13 : 12;
+ uint32_t bus_access_en : 1;
+ uint32_t rsv14 : 31;
+ uint32_t dev_rs_access_en : 1;
+ uint32_t rsv15 : 31;
+} UbCfg1Basic;
+
+typedef struct __attribute__ ((__packed__)) ConfigPortInfo {
+ uint16_t port_idx : 16;
+ uint8_t port_type : 1;
+ uint8_t enum_boundary : 1;
+ uint16_t rsv : 14;
+} ConfigPortInfo;
+
+typedef struct __attribute__ ((__packed__)) ConfigNeighborPortInfo {
+ uint16_t neighbor_port_idx : 16;
+ uint16_t rsv : 16;
+ UbGuid neighbot_port_guid;
+} ConfigNeighborPortInfo;
+
+#define PORT_CAP_BITMAP_LEN 32
+typedef struct __attribute__ ((__packed__)) ConfigPortBasic {
+ SliceHeader header;
+ uint8_t port_cap_bitmap[PORT_CAP_BITMAP_LEN];
+ ConfigPortInfo port_info;
+ ConfigNeighborPortInfo neighbor_port_info;
+ uint32_t port_cna : 24;
+ uint32_t rsv1 : 8;
+ uint8_t port_reset : 1;
+ uint32_t rsv2 : 31;
+} ConfigPortBasic;
+
+typedef struct __attribute__ ((__packed__)) UbRouteTable {
+ SliceHeader header;
+ uint32_t entry_num : 16;
+ uint32_t ers : 1;
+ uint32_t rsv1 : 15;
+ uint32_t er_en : 1;
+ uint32_t rsv2 : 31;
+ uint32_t entry[0];
+} UbRouteTable;
+
+#define SUPPORTED 1
+#define NOT_SUPPORTED 0
+
+#define UBFM 1
+#define UB_DRIVE 0
+
+/* slice header default value, unit (4 bytes) */
+#define UB_SLICE_VERSION 0x0
+#define UB_CFG0_BASIC_SLICE_USED_SIZE 0x24
+#define UB_CFG1_BASIC_SLICE_USED_SIZE 0x20
+#define UB_PORT_BASIC_SLICE_USED_SIZE 0x11
+
+/* ub dev cap */
+#define BITS_PER_CAP_BIT_MAP 128
+#define CFG0_RSV_INDEX 1
+#define CFG0_CAP2_SHP_INDEX 2
+#define CFG1_DECODER_CAP_INDEX 1
+#define CFG1_JETTY_CAP_INDEX 2
+#define CFG1_INT_CAP_INDEX 3
+
+/* ub dev config space CFG0 addr offset, unit (bytes) */
+#define UB_SLICE_SZ (0x00000100 * DWORD_SIZE)
+#define UB_CFG0_BASIC_START 0x00000000
+#define UB_CFG0_BASIC_CAP_BITMAP (UB_CFG0_BASIC_START + 0x02 * DWORD_SIZE)
+#define UB_CFG0_BASIC_GUID_START (UB_CFG0_BASIC_START + 0x0E * DWORD_SIZE)
+#define UB_CFG0_BASIC_NA_INFO_START (UB_CFG0_BASIC_START + 0x1A * DWORD_SIZE)
+#define UB_CFG0_DEV_UEID_OFFSET (UB_CFG0_BASIC_START + 0x27 * DWORD_SIZE)
+#define UB_CFG0_CAP1_RSV_START (UB_CFG0_BASIC_START + UB_SLICE_SZ)
+#define UB_CFG0_CAP2_SHP_START (UB_CFG0_CAP1_RSV_START + UB_SLICE_SZ)
+#define UB_CFG0_CAP3_ERR_RECORD_START (UB_CFG0_CAP2_SHP_START + UB_SLICE_SZ)
+#define UB_CFG0_CAP4_ERR_INFO_START (UB_CFG0_CAP3_ERR_RECORD_START + UB_SLICE_SZ)
+#define UB_CFG0_EMQ_CAP_START (UB_CFG0_CAP4_ERR_INFO_START + UB_SLICE_SZ)
+/* ub dev config space CFG1 addr offset, unit (bytes) */
+#define UB_CFG1_BASIC_START (0x00010000 * DWORD_SIZE)
+#define UB_CFG1_CAP1_DECODER (UB_CFG1_BASIC_START + UB_SLICE_SZ)
+#define UB_CFG1_CAP2_JETTY (UB_CFG1_CAP1_DECODER + UB_SLICE_SZ)
+#define UB_CFG1_CAP3_INT_TYPE1 (UB_CFG1_CAP2_JETTY + UB_SLICE_SZ)
+#define UB_CFG1_CAP4_INT_TYPE2 (UB_CFG1_CAP3_INT_TYPE1 + UB_SLICE_SZ)
+#define UB_CFG1_CAP5_RSV (UB_CFG1_CAP4_INT_TYPE2 + UB_SLICE_SZ)
+#define UB_CFG1_CAP6_UB_MEM (UB_CFG1_CAP5_RSV + UB_SLICE_SZ)
+/* ub dev config space PORT addr offset, unit (bytes) */
+#define UB_PORT_SLICE_START (0x00020000 * DWORD_SIZE)
+#define UB_PORT_SZ (0x00010000 * DWORD_SIZE)
+/* ub dev config space ROUT TABLE addr offset, unit (bytes) */
+#define UB_ROUTE_TABLE_START (0xF0000000ULL * DWORD_SIZE)
+#define UB_ROUTE_TABLE_SIZE (0x10000000 * DWORD_SIZE)
+/* ub dev config space CFG1 system page granule size define */
+#define UB_CFG1_BASIC_SYSTEM_GRANULE_SIZE_4K (4 * 1024)
+#define UB_CFG1_BASIC_SYSTEM_GRANULE_SIZE_64K (64 * 1024)
+/* ub dev config space CFG1 dev_toke id offset 0xB4 */
+#define UB_CFG1_DEV_TOKEN_ID_OFFSET (UB_CFG1_BASIC_START + 0x2D * DWORD_SIZE)
+#define UB_TOKEN_ID_MASK 0xfffff
+/* ub dev config space CFG1 dev_rs_access_en offset 0xBC */
+#define UB_CFG1_DEV_RS_ACCESS_EN_OFFSET (UB_CFG1_BASIC_START + 0x2F * DWORD_SIZE)
+#define UB_DEV_RS_ACCESS_EN_MASK 0x1
+/* ub dev config space CFG1 bus_access_en offset 0xB8 */
+#define UB_CFG1_BUS_ACCESS_EN_OFFSET (UB_CFG1_BASIC_START + 0x2E * DWORD_SIZE)
+#define UB_BUS_ACCESS_EN_MASK 0x1
+/* ub dev config space INT TYPE2 CAP addr offset, unit (bytes) */
+#define UB_CFG1_CAP4_INT_TYPE2_NUMOF_INT_VEC_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 1 * DWORD_SIZE)
+#define UB_CFG1_CAP4_INT_TYPE2_NUMOF_INT_ADDR_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 1 * DWORD_SIZE + WORD_SIZE)
+#define UB_CFG1_CAP4_INT_TYPE2_INT_VEC_TAB_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 2 * DWORD_SIZE)
+#define UB_CFG1_CAP4_INT_TYPE2_INT_ADDR_TAB_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 4 * DWORD_SIZE)
+#define UB_CFG1_CAP4_INT_TYPE2_INT_PENDING_TAB_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 6 * DWORD_SIZE)
+#define UB_CFG1_CAP4_INT_TYPE2_INT_ID_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 8 * DWORD_SIZE)
+#define UB_CFG1_CAP4_INT_TYPE2_INT_MASK_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 9 * DWORD_SIZE)
+/* ub dev usi vec&addr&pend table entrys size uint (bytes) */
+#define USI_VEC_TABLE_ENTRY_SIZE 0x8
+#define USI_ADDR_TABLE_ENTRY_SIZE 0x20
+#define USI_PEND_TABLE_ENTRY_SIZE 0x4
+#define USI_PEND_TABLE_ENTRY_BIT_NUM 32
+/* ub dev usi addr table valid bit offset */
+#define USI_ADDR_TABLE_VALID_BIT_OFFSET 10
+#define USI_ADDR_TABLE_VALID_BIT_MASK 0x10
+/* usi config space */
+#define UB_CFG1_CAP4_INT_TYPE2_MASK_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 0x24)
+#define UB_CFG1_CAP4_INT_TYPE2_MASKBIT 0x1
+#define UB_CFG1_CAP4_INT_TYPE2_ENABLE_OFFSET (UB_CFG1_CAP4_INT_TYPE2 + 0x28)
+#define UB_CFG1_CAP4_INT_TYPE2_ENABLEBIT 0x1
+/* usi vec table source */
+#define USI_VEC_TABLE_MASK_OFFSET 0x6
+#define USI_VEC_TABLE_MASKBIT 0x1
+#define USI_VEC_TABLE_ADDR_INDEX_OFFSET 0x4
+
+uint32_t ub_emulated_config_size(void);
+uint64_t ub_cfg_offset_to_emulated_offset(uint64_t offset, bool check_success);
+
+#endif
\ No newline at end of file
diff --git a/include/hw/ub/ub_ummu.h b/include/hw/ub/ub_ummu.h
new file mode 100644
index 0000000000000000000000000000000000000000..f8b65a0bbe861af11f709e1b790c9fd5b4fdb30a
--- /dev/null
+++ b/include/hw/ub/ub_ummu.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see .
+ */
+
+#ifndef UB_UMMU_H
+#define UB_UMMU_H
+
+#include "hw/sysbus.h"
+#include "qom/object.h"
+#include "hw/ub/hisi/ubc.h"
+#include "hw/ub/ub_bus.h"
+#include "hw/ub/ub_ubc.h"
+
+#define UMMU_INTERRUPT_ID 0x8989 // UMMU DEVICE ID need allocate later
+
+#endif