From 91cc1d01be508e1ed827a63f179560cc19162e68 Mon Sep 17 00:00:00 2001 From: Mingzheng Xing Date: Thu, 4 Dec 2025 16:10:24 +0800 Subject: [PATCH] riscv: upgrade to 6.6.0-126.0.0 - RISC-V kernel upgrade to 6.6.0-126.0.0 - Add support for LRW, DP1000 - Add IOMMU related support - Backport mainline RISC-V features, sync from RVCK project. Signed-off-by: Mingzheng Xing --- 0001-riscv-kernel.patch | 24293 ++++++++++++++++++++++++++++++++------ kernel.spec | 8 +- 2 files changed, 20646 insertions(+), 3655 deletions(-) diff --git a/0001-riscv-kernel.patch b/0001-riscv-kernel.patch index 1af65239..32d9672b 100644 --- a/0001-riscv-kernel.patch +++ b/0001-riscv-kernel.patch @@ -1,6 +1,6 @@ -From c3cef44f1214d884ee99bbd1b9752ef06789eb12 Mon Sep 17 00:00:00 2001 +From 843321537ee3e992a30f6a86209f4e65a3a4e87f Mon Sep 17 00:00:00 2001 From: Mingzheng Xing -Date: Wed, 5 Nov 2025 22:20:23 +0800 +Date: Thu, 4 Dec 2025 18:25:24 +0800 Subject: [PATCH] riscv kernel Signed-off-by: Mingzheng Xing @@ -10,13 +10,14 @@ Signed-off-by: Mingzheng Xing .../{ => arch}/riscv/boot-image-header.rst | 0 Documentation/{ => arch}/riscv/boot.rst | 0 Documentation/{ => arch}/riscv/features.rst | 0 - Documentation/arch/riscv/hwprobe.rst | 271 + + Documentation/arch/riscv/hwprobe.rst | 303 + Documentation/{ => arch}/riscv/index.rst | 0 .../{ => arch}/riscv/patch-acceptance.rst | 0 Documentation/{ => arch}/riscv/uabi.rst | 0 Documentation/{ => arch}/riscv/vector.rst | 0 Documentation/{ => arch}/riscv/vm-layout.rst | 0 .../hwlock/xuantie,th1520-hwspinlock.yaml | 34 + + .../devicetree/bindings/i2c/lrw,lrw-i2c.yaml | 99 + .../bindings/iio/adc/thead,th1520-adc.yaml | 52 + .../bindings/iio/adc/xuantie,th1520-adc.yaml | 52 + .../interrupt-controller/riscv,aplic.yaml | 172 + @@ -35,6 +36,7 @@ Signed-off-by: Mingzheng Xing .../bindings/reset/xuantie,th1520-reset.yaml | 45 + .../devicetree/bindings/riscv/extensions.yaml | 6 + .../devicetree/bindings/rtc/xgene-rtc.txt | 16 + + .../bindings/serial/lrw,lrw-uart.yaml | 49 + .../bindings/serial/snps-dw-apb-uart.yaml | 4 + .../soc/xuantie/xuantie,th1520-event.yaml | 37 + .../bindings/sound/everest,es7210.txt | 12 + @@ -45,8 +47,9 @@ Signed-off-by: Mingzheng Xing .../bindings/spi/xuantie,th1520-qspi.yaml | 52 + .../bindings/spi/xuantie,th1520-spi.yaml | 58 + .../bindings/usb/xuantie,th1520-usb.yaml | 76 + - .../devicetree/bindings/vendor-prefixes.yaml | 2 + + .../devicetree/bindings/vendor-prefixes.yaml | 4 + .../bindings/watchdog/xuantie,th1520-wdt.yaml | 19 + + .../locking/queued-spinlocks/arch-support.txt | 2 +- .../membarrier-sync-core/arch-support.txt | 18 +- .../maintainer/maintainer-entry-profile.rst | 2 +- Documentation/process/index.rst | 2 +- @@ -60,17 +63,23 @@ Signed-off-by: Mingzheng Xing .../{ => arch}/riscv/patch-acceptance.rst | 4 +- .../zh_CN/{ => arch}/riscv/vm-layout.rst | 4 +- .../maintainer/maintainer-entry-profile.rst | 2 +- - MAINTAINERS | 29 +- + MAINTAINERS | 89 +- arch/arm64/Kconfig | 1 - arch/arm64/include/asm/tlb.h | 5 +- + arch/arm64/kernel/kexec_image.c | 6 +- + arch/arm64/kernel/machine_kexec.c | 26 +- + arch/arm64/kernel/machine_kexec_file.c | 12 +- arch/arm64/kernel/pci.c | 191 - arch/ia64/Kconfig | 1 - arch/loongarch/Kconfig | 1 - arch/loongarch/include/asm/pgalloc.h | 1 + arch/loongarch/kernel/dma.c | 9 +- arch/mips/include/asm/pgalloc.h | 1 + - arch/riscv/Kconfig | 176 +- - arch/riscv/Kconfig.socs | 59 + + arch/parisc/kernel/kexec_file.c | 8 +- + arch/powerpc/kexec/elf_64.c | 8 +- + arch/powerpc/kexec/file_load_64.c | 18 +- + arch/riscv/Kconfig | 237 +- + arch/riscv/Kconfig.socs | 34 + arch/riscv/Kconfig.vendor | 19 + arch/riscv/Makefile | 23 +- arch/riscv/Makefile.isa | 15 + @@ -98,8 +107,8 @@ Signed-off-by: Mingzheng Xing arch/riscv/boot/dts/sophgo/mango.dtsi | 938 + arch/riscv/boot/dts/spacemit/Makefile | 2 + .../boot/dts/spacemit/k1-bananapi-f3.dts | 448 + - arch/riscv/boot/dts/spacemit/k1-x.dtsi | 1221 ++ - .../riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi | 1192 ++ + arch/riscv/boot/dts/spacemit/k1.dtsi | 1221 ++ + arch/riscv/boot/dts/spacemit/k1_pinctrl.dtsi | 1192 ++ arch/riscv/boot/dts/thead/Makefile | 3 +- .../boot/dts/thead/th1520-beaglev-ahead.dts | 222 +- .../dts/thead/th1520-lichee-module-4a.dtsi | 440 +- @@ -109,45 +118,53 @@ Signed-off-by: Mingzheng Xing .../boot/dts/thead/th1520-lpi4a-hx8279.dts | 63 + arch/riscv/boot/dts/thead/th1520.dtsi | 2048 +- arch/riscv/boot/dts/ultrarisc/Makefile | 4 + - .../dts/ultrarisc/dp1000-evb-pinctrl.dtsi | 149 + - .../boot/dts/ultrarisc/dp1000-evb-v1.dts | 61 + - .../boot/dts/ultrarisc/dp1000-mo-pinctrl.dtsi | 146 + - .../riscv/boot/dts/ultrarisc/dp1000-mo-v1.dts | 60 + - arch/riscv/boot/dts/ultrarisc/dp1000.dts | 526 + - arch/riscv/configs/defconfig | 24 +- + .../dts/ultrarisc/dp1000-evb-pinctrl.dtsi | 136 + + .../boot/dts/ultrarisc/dp1000-evb-v1.dts | 67 + + .../boot/dts/ultrarisc/dp1000-mo-pinctrl.dtsi | 133 + + .../riscv/boot/dts/ultrarisc/dp1000-mo-v1.dts | 66 + + .../dts/ultrarisc/dp1000-titan-pinctrl.dtsi | 173 + + .../boot/dts/ultrarisc/dp1000-titan-v1.dts | 139 + + arch/riscv/boot/dts/ultrarisc/dp1000.dtsi | 515 + + arch/riscv/configs/defconfig | 38 +- arch/riscv/configs/dp1000_defconfig | 5530 ++++++ - arch/riscv/configs/k1_defconfig | 31 + - arch/riscv/configs/openeuler_defconfig | 1969 +- + arch/riscv/configs/k1_defconfig | 27 + + arch/riscv/configs/openeuler_defconfig | 2022 +- arch/riscv/configs/sg2042_defconfig | 9 + arch/riscv/configs/th1520_defconfig | 470 + arch/riscv/errata/andes/errata.c | 13 +- arch/riscv/errata/sifive/errata.c | 3 + arch/riscv/errata/thead/errata.c | 3 + - arch/riscv/include/asm/acpi.h | 21 +- + arch/riscv/include/asm/Kbuild | 4 +- + arch/riscv/include/asm/acpi.h | 41 +- arch/riscv/include/asm/arch_hweight.h | 78 + arch/riscv/include/asm/archrandom.h | 72 + arch/riscv/include/asm/asm-prototypes.h | 27 + + arch/riscv/include/asm/asm.h | 14 +- arch/riscv/include/asm/atomic.h | 17 +- - arch/riscv/include/asm/barrier.h | 58 +- + arch/riscv/include/asm/barrier.h | 63 +- arch/riscv/include/asm/bitops.h | 258 +- - arch/riscv/include/asm/cmpxchg.h | 496 +- + arch/riscv/include/asm/cacheflush.h | 19 +- + arch/riscv/include/asm/cmpxchg.h | 593 +- arch/riscv/include/asm/compat.h | 1 - arch/riscv/include/asm/cpufeature-macros.h | 66 + arch/riscv/include/asm/cpufeature.h | 69 + - arch/riscv/include/asm/csr.h | 13 + + arch/riscv/include/asm/csr.h | 30 + arch/riscv/include/asm/dmi.h | 24 + arch/riscv/include/asm/elf.h | 2 +- arch/riscv/include/asm/entry-common.h | 17 + arch/riscv/include/asm/errata_list.h | 45 +- arch/riscv/include/asm/fence.h | 10 +- - arch/riscv/include/asm/hwcap.h | 141 +- + arch/riscv/include/asm/fixmap.h | 8 + + arch/riscv/include/asm/hwcap.h | 147 +- arch/riscv/include/asm/hwprobe.h | 26 +- - arch/riscv/include/asm/insn-def.h | 4 + - arch/riscv/include/asm/io.h | 12 +- - arch/riscv/include/asm/irq.h | 60 + + arch/riscv/include/asm/image.h | 2 + + arch/riscv/include/asm/insn-def.h | 70 + + arch/riscv/include/asm/io.h | 15 +- + arch/riscv/include/asm/irq.h | 69 + + arch/riscv/include/asm/kexec.h | 6 + arch/riscv/include/asm/kvm_aia_aplic.h | 58 - arch/riscv/include/asm/kvm_aia_imsic.h | 38 - - arch/riscv/include/asm/kvm_host.h | 9 + + arch/riscv/include/asm/kvm_host.h | 33 + arch/riscv/include/asm/kvm_vcpu_sbi.h | 25 +- arch/riscv/include/asm/membarrier.h | 19 + arch/riscv/include/asm/mmio.h | 5 +- @@ -156,15 +173,16 @@ Signed-off-by: Mingzheng Xing arch/riscv/include/asm/paravirt_api_clock.h | 1 + arch/riscv/include/asm/pgalloc.h | 53 +- arch/riscv/include/asm/pgtable-64.h | 14 +- - arch/riscv/include/asm/pgtable.h | 21 +- - arch/riscv/include/asm/processor.h | 47 +- - arch/riscv/include/asm/sbi.h | 42 + + arch/riscv/include/asm/pgtable.h | 45 +- + arch/riscv/include/asm/processor.h | 72 +- + arch/riscv/include/asm/sbi.h | 103 + arch/riscv/include/asm/simd.h | 64 + - arch/riscv/include/asm/sparsemem.h | 2 +- + arch/riscv/include/asm/spinlock.h | 47 + + arch/riscv/include/asm/sse.h | 47 + arch/riscv/include/asm/suspend.h | 5 +- - arch/riscv/include/asm/switch_to.h | 20 +- + arch/riscv/include/asm/switch_to.h | 42 +- arch/riscv/include/asm/sync_core.h | 29 + - arch/riscv/include/asm/thread_info.h | 2 + + arch/riscv/include/asm/thread_info.h | 10 + arch/riscv/include/asm/tlb.h | 18 + arch/riscv/include/asm/vdso/processor.h | 8 +- arch/riscv/include/asm/vector.h | 102 +- @@ -172,16 +190,22 @@ Signed-off-by: Mingzheng Xing .../include/asm/vendor_extensions/andes.h | 19 + arch/riscv/include/asm/vendorid_list.h | 2 +- arch/riscv/include/asm/xor.h | 68 + - arch/riscv/include/uapi/asm/hwprobe.h | 52 +- - arch/riscv/include/uapi/asm/kvm.h | 14 + - arch/riscv/kernel/Makefile | 6 + + arch/riscv/include/uapi/asm/hwprobe.h | 61 +- + arch/riscv/include/uapi/asm/kvm.h | 71 + + arch/riscv/kernel/Makefile | 9 +- arch/riscv/kernel/acpi.c | 135 +- arch/riscv/kernel/acpi_numa.c | 130 + arch/riscv/kernel/alternative.c | 2 +- - arch/riscv/kernel/cpufeature.c | 579 +- - arch/riscv/kernel/entry.S | 24 +- + arch/riscv/kernel/asm-offsets.c | 22 + + arch/riscv/kernel/cpufeature.c | 652 +- + arch/riscv/kernel/elf_kexec.c | 475 - + arch/riscv/kernel/entry.S | 111 +- arch/riscv/kernel/head.S | 18 +- arch/riscv/kernel/kernel_mode_vector.c | 247 + + arch/riscv/kernel/kexec_elf.c | 144 + + arch/riscv/kernel/kexec_image.c | 96 + + arch/riscv/kernel/machine_kexec.c | 26 - + arch/riscv/kernel/machine_kexec_file.c | 361 + arch/riscv/kernel/mcount.S | 10 +- arch/riscv/kernel/module.c | 83 +- arch/riscv/kernel/paravirt.c | 135 + @@ -189,12 +213,14 @@ Signed-off-by: Mingzheng Xing arch/riscv/kernel/ptrace.c | 7 +- arch/riscv/kernel/sbi-ipi.c | 46 +- arch/riscv/kernel/sbi.c | 66 + - arch/riscv/kernel/setup.c | 8 +- + arch/riscv/kernel/setup.c | 45 +- arch/riscv/kernel/signal.c | 20 +- arch/riscv/kernel/smp.c | 17 + arch/riscv/kernel/smpboot.c | 4 +- + arch/riscv/kernel/sse.c | 154 + + arch/riscv/kernel/sse_entry.S | 204 + arch/riscv/kernel/suspend.c | 100 +- - arch/riscv/kernel/sys_hwprobe.c | 349 + + arch/riscv/kernel/sys_hwprobe.c | 362 + arch/riscv/kernel/sys_riscv.c | 267 - arch/riscv/kernel/time.c | 3 + arch/riscv/kernel/vdso/hwprobe.c | 86 +- @@ -202,41 +228,54 @@ Signed-off-by: Mingzheng Xing arch/riscv/kernel/vendor_extensions.c | 56 + arch/riscv/kernel/vendor_extensions/Makefile | 3 + arch/riscv/kernel/vendor_extensions/andes.c | 18 + - arch/riscv/kvm/Kconfig | 1 + + arch/riscv/kvm/Kconfig | 4 + arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/aia.c | 37 +- arch/riscv/kvm/aia_aplic.c | 2 +- arch/riscv/kvm/aia_device.c | 2 +- - arch/riscv/kvm/aia_imsic.c | 2 +- + arch/riscv/kvm/aia_imsic.c | 145 +- arch/riscv/kvm/main.c | 2 +- arch/riscv/kvm/tlb.c | 2 +- - arch/riscv/kvm/vcpu.c | 13 + + arch/riscv/kvm/trace.h | 67 + + arch/riscv/kvm/vcpu.c | 104 +- + arch/riscv/kvm/vcpu_exit.c | 39 +- arch/riscv/kvm/vcpu_fp.c | 2 +- - arch/riscv/kvm/vcpu_onereg.c | 98 +- + arch/riscv/kvm/vcpu_insn.c | 28 + + arch/riscv/kvm/vcpu_onereg.c | 271 +- arch/riscv/kvm/vcpu_sbi.c | 189 +- arch/riscv/kvm/vcpu_sbi_replace.c | 32 + arch/riscv/kvm/vcpu_sbi_sta.c | 208 + arch/riscv/kvm/vcpu_vector.c | 2 +- + arch/riscv/kvm/vm.c | 31 + arch/riscv/lib/Makefile | 8 +- arch/riscv/lib/crc32.c | 294 + arch/riscv/lib/memmove.S | 54 +- arch/riscv/lib/riscv_v_helpers.c | 45 + - arch/riscv/lib/uaccess.S | 10 + + arch/riscv/lib/uaccess.S | 14 +- arch/riscv/lib/uaccess_vector.S | 53 + arch/riscv/lib/xor.S | 81 + - arch/riscv/mm/cacheflush.c | 25 +- + arch/riscv/mm/cacheflush.c | 41 +- arch/riscv/mm/dma-noncoherent.c | 9 +- - arch/riscv/mm/pgtable.c | 2 + + arch/riscv/mm/init.c | 2 + + arch/riscv/mm/pgtable.c | 17 +- arch/riscv/mm/tlbflush.c | 31 + + arch/riscv/net/bpf_jit.h | 185 + + arch/riscv/net/bpf_jit_comp32.c | 3 +- + arch/riscv/net/bpf_jit_comp64.c | 213 +- arch/sw_64/Kconfig | 1 - arch/x86/include/asm/hw_irq.h | 2 - + arch/x86/kernel/crash.c | 4 +- + arch/x86/kernel/kexec-bzimage64.c | 23 +- + arch/x86/kvm/x86.c | 8 +- arch/x86/mm/pgtable.c | 3 + - drivers/acpi/Kconfig | 2 +- + drivers/acpi/Kconfig | 6 +- drivers/acpi/Makefile | 2 +- drivers/acpi/acpi_apd.c | 21 +- drivers/acpi/acpi_lpss.c | 15 +- - drivers/acpi/arm64/dma.c | 17 +- - drivers/acpi/arm64/iort.c | 20 +- + drivers/acpi/apei/Kconfig | 5 + + drivers/acpi/apei/ghes.c | 92 +- + drivers/acpi/arm64/dma.c | 22 +- + drivers/acpi/arm64/iort.c | 39 +- drivers/acpi/bus.c | 4 + drivers/acpi/internal.h | 8 + drivers/acpi/mipi-disco-img.c | 292 + @@ -244,18 +283,25 @@ Signed-off-by: Mingzheng Xing drivers/acpi/numa/srat.c | 34 +- drivers/acpi/pci_link.c | 2 + drivers/acpi/pci_mcfg.c | 17 + - drivers/acpi/riscv/Makefile | 4 +- + drivers/acpi/riscv/Kconfig | 7 + + drivers/acpi/riscv/Makefile | 5 +- drivers/acpi/riscv/cppc.c | 157 + drivers/acpi/riscv/cpuidle.c | 81 + - drivers/acpi/riscv/init.c | 13 + - drivers/acpi/riscv/init.h | 4 + + drivers/acpi/riscv/init.c | 15 + + drivers/acpi/riscv/init.h | 5 + drivers/acpi/riscv/irq.c | 335 + drivers/acpi/riscv/rhct.c | 93 +- - drivers/acpi/scan.c | 151 +- + drivers/acpi/riscv/rimt.c | 520 + + drivers/acpi/scan.c | 199 +- drivers/acpi/thermal.c | 56 +- drivers/acpi/utils.c | 138 +- + drivers/acpi/viot.c | 11 +- + drivers/amba/bus.c | 3 +- drivers/base/arch_numa.c | 2 +- drivers/base/platform-msi.c | 149 +- + drivers/base/platform.c | 3 +- + drivers/bus/fsl-mc/fsl-mc-bus.c | 3 +- + drivers/cdx/cdx.c | 3 +- drivers/char/ipmi/ipmi_si_hardcode.c | 26 +- drivers/char/ipmi/ipmi_si_intf.c | 3 +- drivers/char/ipmi/ipmi_si_pci.c | 6 + @@ -268,8 +314,8 @@ Signed-off-by: Mingzheng Xing drivers/clk/sophgo/clk.h | 152 + drivers/clk/spacemit/Kconfig | 9 + drivers/clk/spacemit/Makefile | 11 + - drivers/clk/spacemit/ccu-spacemit-k1x.c | 2123 ++ - drivers/clk/spacemit/ccu-spacemit-k1x.h | 81 + + drivers/clk/spacemit/ccu-spacemit-k1.c | 2123 ++ + drivers/clk/spacemit/ccu-spacemit-k1.h | 81 + drivers/clk/spacemit/ccu_ddn.c | 161 + drivers/clk/spacemit/ccu_ddn.h | 86 + drivers/clk/spacemit/ccu_ddr.c | 272 + @@ -298,6 +344,7 @@ Signed-off-by: Mingzheng Xing drivers/cpufreq/Kconfig | 38 + drivers/cpufreq/Kconfig.arm | 26 - drivers/cpufreq/Makefile | 1 + + drivers/cpufreq/cppc_cpufreq.c | 19 + drivers/cpufreq/th1520-cpufreq.c | 588 + drivers/cpuidle/cpuidle-riscv-sbi.c | 49 +- drivers/dma/Kconfig | 7 + @@ -307,11 +354,15 @@ Signed-off-by: Mingzheng Xing drivers/dma/mv_xor_v2.c | 8 +- drivers/dma/qcom/hidma.c | 6 +- drivers/dma/spacemit-k1-dma.c | 1515 ++ - drivers/firmware/Kconfig | 3 +- - drivers/firmware/Makefile | 1 + + drivers/firmware/Kconfig | 4 +- + drivers/firmware/Makefile | 2 + + drivers/firmware/efi/cper.c | 3 + drivers/firmware/efi/libstub/Makefile | 2 +- drivers/firmware/efi/riscv-runtime.c | 13 + drivers/firmware/qemu_fw_cfg.c | 2 +- + drivers/firmware/riscv/Kconfig | 15 + + drivers/firmware/riscv/Makefile | 3 + + drivers/firmware/riscv/riscv_sse.c | 840 + drivers/firmware/xuantie/Kconfig | 23 + drivers/firmware/xuantie/Makefile | 4 + drivers/firmware/xuantie/th1520_aon.c | 341 + @@ -320,7 +371,7 @@ Signed-off-by: Mingzheng Xing drivers/gpio/Kconfig | 9 + drivers/gpio/Makefile | 1 + drivers/gpio/gpio-dwapb.c | 15 +- - drivers/gpio/gpio-k1x.c | 407 + + drivers/gpio/gpio-k1.c | 407 + drivers/gpio/gpio-pca953x.c | 12 +- drivers/gpu/drm/Kconfig | 4 + drivers/gpu/drm/Makefile | 2 + @@ -918,12 +969,16 @@ Signed-off-by: Mingzheng Xing drivers/gpu/drm/img-rogue/vz_vmm_vm.c | 221 + drivers/gpu/drm/img-rogue/xuantie_sys.c | 521 + drivers/gpu/drm/img-rogue/xuantie_sys.h | 75 + + drivers/gpu/drm/msm/msm_iommu.c | 7 +- + .../drm/nouveau/nvkm/engine/device/tegra.c | 4 +- drivers/gpu/drm/panel/Kconfig | 9 + drivers/gpu/drm/panel/Makefile | 3 +- drivers/gpu/drm/panel/panel-himax-hx8279.c | 326 + .../gpu/drm/panel/panel-jadard-jd9365da-h3.c | 37 +- drivers/gpu/drm/panel/panel-jadard-jd9365da.c | 356 + drivers/gpu/drm/radeon/radeon_irq_kms.c | 2 + + drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 10 +- + drivers/gpu/drm/tegra/drm.c | 5 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 5 +- drivers/gpu/drm/ttm/ttm_module.c | 3 +- drivers/gpu/drm/ttm/ttm_resource.c | 7 +- @@ -958,18 +1013,22 @@ Signed-off-by: Mingzheng Xing drivers/gpu/drm/verisilicon/vs_type.h | 70 + drivers/gpu/drm/verisilicon/vs_virtual.c | 359 + drivers/gpu/drm/verisilicon/vs_virtual.h | 37 + + drivers/gpu/host1x/dev.c | 7 +- drivers/hwmon/mr75203.c | 35 +- drivers/hwspinlock/Kconfig | 8 + drivers/hwspinlock/Makefile | 1 + drivers/hwspinlock/th1520_hwspinlock.c | 129 + - drivers/i2c/busses/Kconfig | 8 + - drivers/i2c/busses/Makefile | 2 + + drivers/i2c/busses/Kconfig | 23 + + drivers/i2c/busses/Makefile | 6 + drivers/i2c/busses/i2c-designware-common.c | 27 + drivers/i2c/busses/i2c-designware-core.h | 22 +- drivers/i2c/busses/i2c-designware-master.c | 77 +- .../i2c/busses/i2c-designware-master_dma.c | 348 + .../i2c/busses/i2c-designware-master_dma.h | 6 + drivers/i2c/busses/i2c-designware-platdrv.c | 3 + + drivers/i2c/busses/i2c-lrw-core.h | 350 + + drivers/i2c/busses/i2c-lrw-master.c | 1075 + + drivers/i2c/busses/i2c-lrw-platdrv.c | 235 + drivers/i2c/busses/i2c-spacemit-k1.c | 1299 ++ drivers/i2c/busses/i2c-spacemit-k1.h | 225 + drivers/iio/adc/Kconfig | 23 + @@ -977,14 +1036,15 @@ Signed-off-by: Mingzheng Xing drivers/iio/adc/spacemit-p1-adc.c | 278 + drivers/iio/adc/th1520-adc.c | 573 + drivers/iio/adc/th1520-adc.h | 192 + + drivers/infiniband/hw/usnic/usnic_uiom.c | 6 +- drivers/input/misc/Kconfig | 10 + drivers/input/misc/Makefile | 1 + drivers/input/misc/spacemit-p1-pwrkey.c | 211 + - drivers/iommu/Kconfig | 1 + + drivers/iommu/Kconfig | 3 +- drivers/iommu/Makefile | 2 +- drivers/iommu/apple-dart.c | 3 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 8 +- - drivers/iommu/arm/arm-smmu/arm-smmu.c | 3 +- + drivers/iommu/arm/arm-smmu/arm-smmu.c | 7 +- drivers/iommu/arm/arm-smmu/qcom_iommu.c | 3 +- drivers/iommu/exynos-iommu.c | 2 +- drivers/iommu/intel/dmar.c | 16 +- @@ -994,22 +1054,26 @@ Signed-off-by: Mingzheng Xing drivers/iommu/intel/pasid.c | 18 +- drivers/iommu/intel/svm.c | 11 +- drivers/iommu/iommu-pages.h | 154 + - drivers/iommu/iommu.c | 2 +- + drivers/iommu/iommu-priv.h | 11 + + drivers/iommu/iommu.c | 144 +- + drivers/iommu/iommufd/hw_pagetable.c | 7 +- drivers/iommu/ipmmu-vmsa.c | 4 +- drivers/iommu/msm_iommu.c | 4 +- drivers/iommu/mtk_iommu.c | 3 +- - drivers/iommu/mtk_iommu_v1.c | 3 +- + drivers/iommu/mtk_iommu_v1.c | 17 +- + drivers/iommu/of_iommu.c | 59 +- drivers/iommu/riscv/Kconfig | 20 + drivers/iommu/riscv/Makefile | 3 + - drivers/iommu/riscv/iommu-bits.h | 784 + - drivers/iommu/riscv/iommu-pci.c | 120 + - drivers/iommu/riscv/iommu-platform.c | 92 + - drivers/iommu/riscv/iommu.c | 1661 ++ - drivers/iommu/riscv/iommu.h | 88 + + drivers/iommu/riscv/iommu-bits.h | 795 + + drivers/iommu/riscv/iommu-ir.c | 697 + + drivers/iommu/riscv/iommu-pci.c | 128 + + drivers/iommu/riscv/iommu-platform.c | 179 + + drivers/iommu/riscv/iommu.c | 1697 ++ + drivers/iommu/riscv/iommu.h | 165 + drivers/iommu/rockchip-iommu.c | 2 +- drivers/iommu/sprd-iommu.c | 3 +- drivers/iommu/sun50i-iommu.c | 2 +- - drivers/iommu/tegra-smmu.c | 4 +- + drivers/iommu/tegra-smmu.c | 7 +- drivers/iommu/virtio-iommu.c | 3 +- drivers/irqchip/Kconfig | 45 + drivers/irqchip/Makefile | 5 + @@ -1018,17 +1082,19 @@ Signed-off-by: Mingzheng Xing drivers/irqchip/irq-riscv-aplic-main.h | 53 + drivers/irqchip/irq-riscv-aplic-msi.c | 285 + drivers/irqchip/irq-riscv-imsic-early.c | 263 + - drivers/irqchip/irq-riscv-imsic-platform.c | 395 + + drivers/irqchip/irq-riscv-imsic-platform.c | 397 + drivers/irqchip/irq-riscv-imsic-state.c | 891 + drivers/irqchip/irq-riscv-imsic-state.h | 108 + drivers/irqchip/irq-riscv-intc.c | 152 +- drivers/irqchip/irq-sg2044-msi.c | 403 + - drivers/irqchip/irq-sifive-plic.c | 451 +- + drivers/irqchip/irq-sifive-plic.c | 452 +- drivers/irqchip/irq-thead-c900-aclint-sswi.c | 351 + drivers/mailbox/Kconfig | 8 + drivers/mailbox/Makefile | 2 + drivers/mailbox/bcm-flexrm-mailbox.c | 8 +- drivers/mailbox/th1520-mailbox.c | 614 + + .../media/platform/nvidia/tegra-vde/iommu.c | 7 +- + drivers/media/platform/qcom/venus/firmware.c | 6 +- drivers/mfd/Kconfig | 12 + drivers/mfd/Makefile | 2 + drivers/mfd/spacemit-p1.c | 481 + @@ -1192,29 +1258,47 @@ Signed-off-by: Mingzheng Xing .../wireless/aic8800/aic8800_fdrv/sdio_host.h | 41 + .../wireless/aic8800/aic8800_fdrv/usb_host.c | 146 + .../wireless/aic8800/aic8800_fdrv/usb_host.h | 41 + + drivers/net/wireless/ath/ath10k/snoc.c | 6 +- + drivers/net/wireless/ath/ath11k/ahb.c | 6 +- drivers/nvmem/Kconfig | 10 + drivers/nvmem/Makefile | 2 + drivers/nvmem/th1520-efuse.c | 1197 ++ - drivers/of/device.c | 42 +- + drivers/of/device.c | 79 +- drivers/pci/controller/cadence/Kconfig | 11 + drivers/pci/controller/cadence/Makefile | 1 + .../controller/cadence/pcie-cadence-sophgo.c | 936 + .../controller/cadence/pcie-cadence-sophgo.h | 6 + drivers/pci/controller/dwc/Kconfig | 26 + drivers/pci/controller/dwc/Makefile | 2 + - .../pci/controller/dwc/pcie-designware-host.c | 81 + + drivers/pci/controller/dwc/pci-keystone.c | 7 - + .../pci/controller/dwc/pcie-designware-host.c | 58 +- drivers/pci/controller/dwc/pcie-designware.c | 4 + drivers/pci/controller/dwc/pcie-designware.h | 39 + drivers/pci/controller/dwc/pcie-dw-sophgo.c | 1687 ++ drivers/pci/controller/dwc/pcie-dw-sophgo.h | 251 + - drivers/pci/controller/dwc/pcie-ultrarisc.c | 156 + + drivers/pci/controller/dwc/pcie-ultrarisc.c | 202 + + .../controller/mobiveil/pcie-mobiveil-host.c | 11 +- + drivers/pci/controller/pci-aardvark.c | 10 +- + drivers/pci/controller/pci-tegra.c | 10 +- + drivers/pci/controller/pcie-altera-msi.c | 11 +- + drivers/pci/controller/pcie-brcmstb.c | 11 +- + drivers/pci/controller/pcie-mediatek-gen3.c | 13 +- + drivers/pci/controller/pcie-mediatek.c | 11 +- + drivers/pci/controller/pcie-rcar-host.c | 10 +- + drivers/pci/controller/pcie-xilinx-nwl.c | 11 +- + drivers/pci/controller/pcie-xilinx.c | 9 +- + drivers/pci/controller/vmd.c | 13 +- drivers/pci/msi/msi.c | 61 +- drivers/pci/pci-acpi.c | 248 +- + drivers/pci/pci-driver.c | 3 +- drivers/pci/pci.h | 4 +- drivers/pci/pcie/portdrv.c | 2 +- - drivers/perf/Kconfig | 14 + + drivers/perf/Kconfig | 41 + + drivers/perf/Makefile | 1 + drivers/perf/arm_smmuv3_pmu.c | 4 +- - drivers/perf/riscv_pmu_sbi.c | 44 +- + drivers/perf/lrw_ddr_pmu.c | 759 + + drivers/perf/riscv_pmu.c | 19 + + drivers/perf/riscv_pmu_sbi.c | 111 +- drivers/phy/Kconfig | 1 + drivers/phy/Makefile | 3 +- drivers/phy/synopsys/Kconfig | 13 + @@ -1222,7 +1306,7 @@ Signed-off-by: Mingzheng Xing drivers/phy/synopsys/phy-dw-mipi-dphy.c | 824 + drivers/pinctrl/Kconfig | 37 +- drivers/pinctrl/Makefile | 5 + - drivers/pinctrl/pinctrl-spacemit-k1x.c | 2101 ++ + drivers/pinctrl/pinctrl-spacemit-k1.c | 2101 ++ drivers/pinctrl/pinctrl-spacemit-p1.c | 631 + drivers/pinctrl/pinctrl-th1520.c | 1180 ++ drivers/pinctrl/sophgo/Makefile | 2 + @@ -1231,9 +1315,9 @@ Signed-off-by: Mingzheng Xing drivers/pinctrl/sophgo/pinctrl-sophgo.h | 70 + drivers/pinctrl/ultrarisc/Kconfig | 23 + drivers/pinctrl/ultrarisc/Makefile | 4 + - .../ultrarisc/pinctrl-ultrarisc-dp1000.c | 122 + - drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c | 499 + - drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h | 77 + + .../ultrarisc/pinctrl-ultrarisc-dp1000.c | 123 + + drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c | 566 + + drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h | 78 + .../platform/surface/surface_acpi_notify.c | 14 +- drivers/pwm/Kconfig | 13 +- drivers/pwm/Makefile | 2 + @@ -1244,10 +1328,11 @@ Signed-off-by: Mingzheng Xing drivers/regulator/Makefile | 2 + drivers/regulator/spacemit-p1-regulator.c | 268 + drivers/regulator/th1520-aon-regulator.c | 770 + + drivers/remoteproc/remoteproc_core.c | 6 +- drivers/reset/Kconfig | 16 + drivers/reset/Makefile | 3 + drivers/reset/reset-sophgo.c | 163 + - drivers/reset/reset-spacemit-k1x.c | 669 + + drivers/reset/reset-spacemit-k1.c | 669 + drivers/reset/reset-th1520.c | 170 + drivers/rpmsg/Kconfig | 4 + drivers/rpmsg/Makefile | 1 + @@ -1259,6 +1344,7 @@ Signed-off-by: Mingzheng Xing drivers/rtc/rtc-xgene.c | 32 + drivers/soc/Kconfig | 2 + drivers/soc/Makefile | 3 + + drivers/soc/fsl/qbman/qman_portal.c | 5 +- drivers/soc/sophgo/Makefile | 3 + drivers/soc/sophgo/tach/sophgo-tach.c | 330 + drivers/soc/sophgo/top/top_intc.c | 412 + @@ -1269,7 +1355,7 @@ Signed-off-by: Mingzheng Xing drivers/soc/xuantie/Kconfig | 34 + drivers/soc/xuantie/Makefile | 13 + drivers/soc/xuantie/nna/GPLHEADER | 356 + - drivers/soc/xuantie/nna/Kconfig | 64 + + drivers/soc/xuantie/nna/Kconfig | 65 + drivers/soc/xuantie/nna/Makefile | 7 + drivers/soc/xuantie/nna/README | 29 + drivers/soc/xuantie/nna/build.mk | 161 + @@ -1480,32 +1566,43 @@ Signed-off-by: Mingzheng Xing drivers/tee/optee/smc_abi.c | 37 + drivers/tty/hvc/Kconfig | 2 +- drivers/tty/hvc/hvc_riscv_sbi.c | 37 +- + drivers/tty/serial/8250/8250_core.c | 74 + drivers/tty/serial/8250/8250_dma.c | 134 +- drivers/tty/serial/8250/8250_dw.c | 167 +- drivers/tty/serial/8250/8250_dwlib.c | 3 +- drivers/tty/serial/8250/8250_dwlib.h | 33 +- - drivers/tty/serial/8250/8250_port.c | 12 +- - drivers/tty/serial/Kconfig | 21 +- + drivers/tty/serial/8250/8250_of.c | 218 +- + drivers/tty/serial/8250/8250_port.c | 36 +- + drivers/tty/serial/Kconfig | 35 +- drivers/tty/serial/Makefile | 1 + drivers/tty/serial/earlycon-riscv-sbi.c | 27 +- + drivers/tty/serial/lrw_uart.c | 2839 +++ drivers/tty/serial/serial_port.c | 145 + - drivers/tty/serial/spacemit_k1x_uart.c | 1979 ++ drivers/ufs/host/ufs-qcom.c | 9 +- drivers/usb/dwc3/Kconfig | 20 + drivers/usb/dwc3/Makefile | 2 + drivers/usb/dwc3/core.c | 22 +- drivers/usb/dwc3/dwc3-xuantie.c | 275 + + drivers/vfio/Kconfig | 2 +- + drivers/vfio/vfio_iommu_type1.c | 7 +- + drivers/vhost/vdpa.c | 14 +- drivers/watchdog/Kconfig | 14 + drivers/watchdog/Makefile | 1 + drivers/watchdog/dw_wdt.c | 13 +- drivers/watchdog/th1520_wdt.c | 393 + - include/acpi/acpi_bus.h | 37 +- + include/acpi/acpi_bus.h | 40 +- + include/acpi/actbl1.h | 3 +- + include/acpi/actbl2.h | 83 + include/acpi/actbl3.h | 18 +- include/asm-generic/pgalloc.h | 7 +- + include/asm-generic/qspinlock.h | 2 + + include/asm-generic/spinlock.h | 85 +- + include/asm-generic/spinlock_types.h | 12 +- + include/asm-generic/ticket_spinlock.h | 105 + include/drm/bridge/dw_hdmi.h | 5 + .../dt-bindings/clock/sophgo-mango-clock.h | 165 + include/dt-bindings/clock/sophgo.h | 15 + - .../dt-bindings/clock/spacemit-k1x-clock.h | 223 + + include/dt-bindings/clock/spacemit-k1-clock.h | 223 + include/dt-bindings/clock/th1520-audiosys.h | 35 + include/dt-bindings/clock/th1520-dspsys.h | 33 + .../dt-bindings/clock/th1520-fm-ap-clock.h | 513 + @@ -1516,15 +1613,16 @@ Signed-off-by: Mingzheng Xing include/dt-bindings/dma/spacemit-k1-dma.h | 54 + include/dt-bindings/firmware/xuantie/rsrc.h | 18 + include/dt-bindings/mmc/spacemit-k1-sdhci.h | 62 + - include/dt-bindings/pinctrl/k1-x-pinctrl.h | 198 + + include/dt-bindings/pinctrl/k1-pinctrl.h | 198 + .../dt-bindings/pinctrl/ur-dp1000-pinctrl.h | 64 + .../dt-bindings/reset/sophgo-mango-resets.h | 96 + - .../dt-bindings/reset/spacemit-k1x-reset.h | 126 + + include/dt-bindings/reset/spacemit-k1-reset.h | 126 + .../dt-bindings/reset/xuantie,th1520-reset.h | 28 + .../dt-bindings/soc/th1520_system_status.h | 38 + .../dt-bindings/soc/xuantie,th1520-iopmp.h | 41 + include/linux/acpi.h | 15 + include/linux/acpi_iort.h | 4 +- + include/linux/acpi_rimt.h | 28 + include/linux/cpuhotplug.h | 3 + include/linux/cpumask.h | 17 + include/linux/crc32.h | 3 + @@ -1532,17 +1630,21 @@ Signed-off-by: Mingzheng Xing include/linux/find.h | 27 + include/linux/firmware/xuantie/ipc.h | 167 + include/linux/firmware/xuantie/th1520_event.h | 35 + - include/linux/iommu.h | 4 +- + include/linux/iommu.h | 40 +- include/linux/irqchip/riscv-aplic.h | 145 + include/linux/irqchip/riscv-imsic.h | 96 + include/linux/irqdomain.h | 17 + - include/linux/irqdomain_defs.h | 2 + + include/linux/irqdomain_defs.h | 3 + + include/linux/kexec.h | 7 +- + include/linux/kvm_host.h | 7 +- include/linux/mfd/spacemit_p1.h | 250 + include/linux/mlx4/device.h | 2 +- include/linux/mm.h | 16 + - include/linux/msi.h | 28 +- + include/linux/msi.h | 34 +- include/linux/pci-ecam.h | 1 + + include/linux/perf/riscv_pmu.h | 3 + .../linux/platform_data/spacemit_k1_sdhci.h | 99 + + include/linux/riscv_sse.h | 75 + include/linux/serial_core.h | 2 + include/linux/sizes.h | 9 + include/linux/string_choices.h | 11 + @@ -1553,23 +1655,28 @@ Signed-off-by: Mingzheng Xing include/soc/xuantie/th1520_system_status.h | 36 + include/uapi/drm/drm_fourcc.h | 90 + include/uapi/drm/vs_drm.h | 50 + + include/uapi/linux/kexec.h | 1 + + include/uapi/linux/serial_core.h | 3 + init/Kconfig | 3 + + kernel/crash_core.c | 8 +- kernel/irq/irqdomain.c | 28 +- kernel/irq/matrix.c | 28 +- - kernel/irq/msi.c | 184 +- + kernel/irq/msi.c | 186 +- + kernel/kexec_core.c | 2 + + kernel/kexec_file.c | 14 +- kernel/panic.c | 8 + kernel/sched/core.c | 11 +- kernel/sched/fair.c | 3 + kernel/sched/membarrier.c | 13 +- kernel/time/tick-oneshot.c | 2 +- lib/find_bit.c | 12 + - mm/memblock.c | 6 +- mm/pgtable-generic.c | 1 + net/rfkill/Makefile | 1 + net/rfkill/rfkill-bt.c | 244 + net/rfkill/rfkill-wlan.c | 283 + scripts/package/builddeb | 4 +- scripts/package/kernel.spec | 10 + + security/integrity/ima/ima_kexec.c | 4 +- sound/core/pcm_lib.c | 1 + sound/pci/hda/hda_intel.c | 5 +- sound/soc/Kconfig | 1 + @@ -1598,7 +1705,26 @@ Signed-off-by: Mingzheng Xing sound/soc/xuantie/th1520-tdm.c | 610 + sound/soc/xuantie/th1520-tdm.h | 122 + tools/lib/perf/cpumap.c | 10 +- - tools/perf/pmu-events/arch/riscv/mapfile.csv | 2 + + tools/perf/arch/riscv/Makefile | 1 + + tools/perf/arch/riscv/util/Build | 1 + + tools/perf/arch/riscv/util/kvm-stat.c | 78 + + .../arch/riscv/util/riscv_exception_types.h | 35 + + .../arch/riscv/lrw/lrw-core/branch.json | 77 + + .../arch/riscv/lrw/lrw-core/exception.json | 102 + + .../arch/riscv/lrw/lrw-core/firmware.json | 68 + + .../arch/riscv/lrw/lrw-core/general_cpu.json | 12 + + .../arch/riscv/lrw/lrw-core/general_inst.json | 57 + + .../arch/riscv/lrw/lrw-core/l1dcache.json | 197 + + .../arch/riscv/lrw/lrw-core/l1icache.json | 97 + + .../arch/riscv/lrw/lrw-core/l2cache.json | 112 + + .../arch/riscv/lrw/lrw-core/l3cache.json | 32 + + .../arch/riscv/lrw/lrw-core/ldst.json | 67 + + .../arch/riscv/lrw/lrw-core/mem.json | 182 + + .../arch/riscv/lrw/lrw-core/pipeline.json | 132 + + .../arch/riscv/lrw/lrw-core/spe.json | 7 + + .../arch/riscv/lrw/lrw-core/tlb.json | 82 + + .../arch/riscv/lrw/lrw-core/vec.json | 82 + + tools/perf/pmu-events/arch/riscv/mapfile.csv | 3 + .../arch/riscv/thead/c900-legacy/cache.json | 67 + .../riscv/thead/c900-legacy/firmware.json | 68 + .../riscv/thead/c900-legacy/instruction.json | 72 + @@ -1611,12 +1737,18 @@ Signed-off-by: Mingzheng Xing .../selftests/kvm/include/riscv/processor.h | 40 +- .../selftests/kvm/lib/riscv/processor.c | 4 +- .../selftests/kvm/riscv/get-reg-list.c | 4 +- + tools/testing/selftests/riscv/Makefile | 2 +- .../testing/selftests/riscv/hwprobe/Makefile | 9 +- tools/testing/selftests/riscv/hwprobe/cbo.c | 228 + .../testing/selftests/riscv/hwprobe/hwprobe.c | 64 +- .../testing/selftests/riscv/hwprobe/hwprobe.h | 15 + + tools/testing/selftests/riscv/sse/Makefile | 5 + + .../selftests/riscv/sse/module/Makefile | 16 + + .../riscv/sse/module/riscv_sse_test.c | 513 + + .../selftests/riscv/sse/run_sse_test.sh | 44 + .../selftests/riscv/vector/vstate_prctl.c | 10 +- - 1611 files changed, 608647 insertions(+), 2794 deletions(-) + virt/kvm/eventfd.c | 12 +- + 1743 files changed, 620657 insertions(+), 4201 deletions(-) rename Documentation/{ => arch}/riscv/acpi.rst (100%) rename Documentation/{ => arch}/riscv/boot-image-header.rst (100%) rename Documentation/{ => arch}/riscv/boot.rst (100%) @@ -1628,6 +1760,7 @@ Signed-off-by: Mingzheng Xing rename Documentation/{ => arch}/riscv/vector.rst (100%) rename Documentation/{ => arch}/riscv/vm-layout.rst (100%) create mode 100644 Documentation/devicetree/bindings/hwlock/xuantie,th1520-hwspinlock.yaml + create mode 100644 Documentation/devicetree/bindings/i2c/lrw,lrw-i2c.yaml create mode 100644 Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml create mode 100644 Documentation/devicetree/bindings/iio/adc/xuantie,th1520-adc.yaml create mode 100644 Documentation/devicetree/bindings/interrupt-controller/riscv,aplic.yaml @@ -1641,6 +1774,7 @@ Signed-off-by: Mingzheng Xing create mode 100644 Documentation/devicetree/bindings/pinctrl/ultrarisc,dp1000-pinctrl.yaml create mode 100644 Documentation/devicetree/bindings/pwm/xuantie,th1520-pwm.yaml create mode 100644 Documentation/devicetree/bindings/reset/xuantie,th1520-reset.yaml + create mode 100644 Documentation/devicetree/bindings/serial/lrw,lrw-uart.yaml create mode 100644 Documentation/devicetree/bindings/soc/xuantie/xuantie,th1520-event.yaml create mode 100644 Documentation/devicetree/bindings/sound/everest,es7210.txt create mode 100644 Documentation/devicetree/bindings/sound/everest,es8156.yaml @@ -1682,8 +1816,8 @@ Signed-off-by: Mingzheng Xing create mode 100644 arch/riscv/boot/dts/sophgo/mango.dtsi create mode 100644 arch/riscv/boot/dts/spacemit/Makefile create mode 100644 arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts - create mode 100644 arch/riscv/boot/dts/spacemit/k1-x.dtsi - create mode 100644 arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi + create mode 100644 arch/riscv/boot/dts/spacemit/k1.dtsi + create mode 100644 arch/riscv/boot/dts/spacemit/k1_pinctrl.dtsi create mode 100644 arch/riscv/boot/dts/thead/th1520-lichee-pi-4a-16g.dts create mode 100644 arch/riscv/boot/dts/thead/th1520-lpi4a-dsi0.dts create mode 100644 arch/riscv/boot/dts/thead/th1520-lpi4a-hx8279.dts @@ -1692,7 +1826,9 @@ Signed-off-by: Mingzheng Xing create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000-mo-pinctrl.dtsi create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000-mo-v1.dts - create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000.dts + create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000-titan-pinctrl.dtsi + create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000-titan-v1.dts + create mode 100644 arch/riscv/boot/dts/ultrarisc/dp1000.dtsi create mode 100644 arch/riscv/configs/dp1000_defconfig create mode 100644 arch/riscv/configs/k1_defconfig create mode 100644 arch/riscv/configs/sg2042_defconfig @@ -1706,28 +1842,38 @@ Signed-off-by: Mingzheng Xing create mode 100644 arch/riscv/include/asm/paravirt.h create mode 100644 arch/riscv/include/asm/paravirt_api_clock.h create mode 100644 arch/riscv/include/asm/simd.h + create mode 100644 arch/riscv/include/asm/spinlock.h + create mode 100644 arch/riscv/include/asm/sse.h create mode 100644 arch/riscv/include/asm/sync_core.h create mode 100644 arch/riscv/include/asm/vendor_extensions.h create mode 100644 arch/riscv/include/asm/vendor_extensions/andes.h create mode 100644 arch/riscv/include/asm/xor.h create mode 100644 arch/riscv/kernel/acpi_numa.c + delete mode 100644 arch/riscv/kernel/elf_kexec.c create mode 100644 arch/riscv/kernel/kernel_mode_vector.c + create mode 100644 arch/riscv/kernel/kexec_elf.c + create mode 100644 arch/riscv/kernel/kexec_image.c create mode 100644 arch/riscv/kernel/paravirt.c + create mode 100644 arch/riscv/kernel/sse.c + create mode 100644 arch/riscv/kernel/sse_entry.S create mode 100644 arch/riscv/kernel/sys_hwprobe.c create mode 100644 arch/riscv/kernel/vendor_extensions.c create mode 100644 arch/riscv/kernel/vendor_extensions/Makefile create mode 100644 arch/riscv/kernel/vendor_extensions/andes.c + create mode 100644 arch/riscv/kvm/trace.h create mode 100644 arch/riscv/kvm/vcpu_sbi_sta.c create mode 100644 arch/riscv/lib/crc32.c create mode 100644 arch/riscv/lib/riscv_v_helpers.c create mode 100644 arch/riscv/lib/uaccess_vector.S create mode 100644 arch/riscv/lib/xor.S create mode 100644 drivers/acpi/mipi-disco-img.c + create mode 100644 drivers/acpi/riscv/Kconfig create mode 100644 drivers/acpi/riscv/cppc.c create mode 100644 drivers/acpi/riscv/cpuidle.c create mode 100644 drivers/acpi/riscv/init.c create mode 100644 drivers/acpi/riscv/init.h create mode 100644 drivers/acpi/riscv/irq.c + create mode 100644 drivers/acpi/riscv/rimt.c create mode 100644 drivers/clk/sophgo/Makefile create mode 100644 drivers/clk/sophgo/clk-dummy.c create mode 100644 drivers/clk/sophgo/clk-mango.c @@ -1735,8 +1881,8 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/clk/sophgo/clk.h create mode 100644 drivers/clk/spacemit/Kconfig create mode 100644 drivers/clk/spacemit/Makefile - create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1x.c - create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1x.h + create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1.c + create mode 100644 drivers/clk/spacemit/ccu-spacemit-k1.h create mode 100644 drivers/clk/spacemit/ccu_ddn.c create mode 100644 drivers/clk/spacemit/ccu_ddn.h create mode 100644 drivers/clk/spacemit/ccu_ddr.c @@ -1763,12 +1909,15 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/clk/xuantie/gate/xuantie-gate.c create mode 100644 drivers/cpufreq/th1520-cpufreq.c create mode 100644 drivers/dma/spacemit-k1-dma.c + create mode 100644 drivers/firmware/riscv/Kconfig + create mode 100644 drivers/firmware/riscv/Makefile + create mode 100644 drivers/firmware/riscv/riscv_sse.c create mode 100644 drivers/firmware/xuantie/Kconfig create mode 100644 drivers/firmware/xuantie/Makefile create mode 100644 drivers/firmware/xuantie/th1520_aon.c create mode 100644 drivers/firmware/xuantie/th1520_aon_pd.c create mode 100644 drivers/firmware/xuantie/th1520_proc_debug.c - create mode 100644 drivers/gpio/gpio-k1x.c + create mode 100644 drivers/gpio/gpio-k1.c create mode 100644 drivers/gpu/drm/img-rogue/Kconfig create mode 100644 drivers/gpu/drm/img-rogue/Makefile create mode 100644 drivers/gpu/drm/img-rogue/allocmem.c @@ -2384,6 +2533,9 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/hwspinlock/th1520_hwspinlock.c create mode 100644 drivers/i2c/busses/i2c-designware-master_dma.c create mode 100644 drivers/i2c/busses/i2c-designware-master_dma.h + create mode 100644 drivers/i2c/busses/i2c-lrw-core.h + create mode 100644 drivers/i2c/busses/i2c-lrw-master.c + create mode 100644 drivers/i2c/busses/i2c-lrw-platdrv.c create mode 100644 drivers/i2c/busses/i2c-spacemit-k1.c create mode 100644 drivers/i2c/busses/i2c-spacemit-k1.h create mode 100644 drivers/iio/adc/spacemit-p1-adc.c @@ -2394,6 +2546,7 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/iommu/riscv/Kconfig create mode 100644 drivers/iommu/riscv/Makefile create mode 100644 drivers/iommu/riscv/iommu-bits.h + create mode 100644 drivers/iommu/riscv/iommu-ir.c create mode 100644 drivers/iommu/riscv/iommu-pci.c create mode 100644 drivers/iommu/riscv/iommu-platform.c create mode 100644 drivers/iommu/riscv/iommu.c @@ -2559,10 +2712,11 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/pci/controller/dwc/pcie-dw-sophgo.c create mode 100644 drivers/pci/controller/dwc/pcie-dw-sophgo.h create mode 100644 drivers/pci/controller/dwc/pcie-ultrarisc.c + create mode 100644 drivers/perf/lrw_ddr_pmu.c create mode 100644 drivers/phy/synopsys/Kconfig create mode 100644 drivers/phy/synopsys/Makefile create mode 100644 drivers/phy/synopsys/phy-dw-mipi-dphy.c - create mode 100644 drivers/pinctrl/pinctrl-spacemit-k1x.c + create mode 100644 drivers/pinctrl/pinctrl-spacemit-k1.c create mode 100644 drivers/pinctrl/pinctrl-spacemit-p1.c create mode 100644 drivers/pinctrl/pinctrl-th1520.c create mode 100644 drivers/pinctrl/sophgo/Makefile @@ -2579,7 +2733,7 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/regulator/spacemit-p1-regulator.c create mode 100644 drivers/regulator/th1520-aon-regulator.c create mode 100644 drivers/reset/reset-sophgo.c - create mode 100644 drivers/reset/reset-spacemit-k1x.c + create mode 100644 drivers/reset/reset-spacemit-k1.c create mode 100644 drivers/reset/reset-th1520.c create mode 100644 drivers/rpmsg/th1520_rpmsg.c create mode 100644 drivers/rtc/rtc-astbmc.c @@ -2795,12 +2949,13 @@ Signed-off-by: Mingzheng Xing create mode 100644 drivers/spi/spi-spacemit-k1-qspi.c create mode 100644 drivers/spi/spi-spacemit-k1.c create mode 100644 drivers/spi/spi-spacemit-k1.h - create mode 100644 drivers/tty/serial/spacemit_k1x_uart.c + create mode 100644 drivers/tty/serial/lrw_uart.c create mode 100644 drivers/usb/dwc3/dwc3-xuantie.c create mode 100644 drivers/watchdog/th1520_wdt.c + create mode 100644 include/asm-generic/ticket_spinlock.h create mode 100644 include/dt-bindings/clock/sophgo-mango-clock.h create mode 100644 include/dt-bindings/clock/sophgo.h - create mode 100644 include/dt-bindings/clock/spacemit-k1x-clock.h + create mode 100644 include/dt-bindings/clock/spacemit-k1-clock.h create mode 100644 include/dt-bindings/clock/th1520-audiosys.h create mode 100644 include/dt-bindings/clock/th1520-dspsys.h create mode 100644 include/dt-bindings/clock/th1520-fm-ap-clock.h @@ -2811,19 +2966,21 @@ Signed-off-by: Mingzheng Xing create mode 100644 include/dt-bindings/dma/spacemit-k1-dma.h create mode 100644 include/dt-bindings/firmware/xuantie/rsrc.h create mode 100644 include/dt-bindings/mmc/spacemit-k1-sdhci.h - create mode 100644 include/dt-bindings/pinctrl/k1-x-pinctrl.h + create mode 100644 include/dt-bindings/pinctrl/k1-pinctrl.h create mode 100644 include/dt-bindings/pinctrl/ur-dp1000-pinctrl.h create mode 100644 include/dt-bindings/reset/sophgo-mango-resets.h - create mode 100644 include/dt-bindings/reset/spacemit-k1x-reset.h + create mode 100644 include/dt-bindings/reset/spacemit-k1-reset.h create mode 100644 include/dt-bindings/reset/xuantie,th1520-reset.h create mode 100644 include/dt-bindings/soc/th1520_system_status.h create mode 100644 include/dt-bindings/soc/xuantie,th1520-iopmp.h + create mode 100644 include/linux/acpi_rimt.h create mode 100644 include/linux/firmware/xuantie/ipc.h create mode 100644 include/linux/firmware/xuantie/th1520_event.h create mode 100644 include/linux/irqchip/riscv-aplic.h create mode 100644 include/linux/irqchip/riscv-imsic.h create mode 100644 include/linux/mfd/spacemit_p1.h create mode 100644 include/linux/platform_data/spacemit_k1_sdhci.h + create mode 100644 include/linux/riscv_sse.h create mode 100644 include/linux/th1520_proc_debug.h create mode 100644 include/linux/th1520_rpmsg.h create mode 100644 include/soc/xuantie/th1520_system_monitor.h @@ -2851,6 +3008,23 @@ Signed-off-by: Mingzheng Xing create mode 100644 sound/soc/xuantie/th1520-spdif.h create mode 100644 sound/soc/xuantie/th1520-tdm.c create mode 100644 sound/soc/xuantie/th1520-tdm.h + create mode 100644 tools/perf/arch/riscv/util/kvm-stat.c + create mode 100644 tools/perf/arch/riscv/util/riscv_exception_types.h + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/branch.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/exception.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/firmware.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_cpu.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_inst.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1dcache.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1icache.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l2cache.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l3cache.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/ldst.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/mem.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/pipeline.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/spe.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/tlb.json + create mode 100644 tools/perf/pmu-events/arch/riscv/lrw/lrw-core/vec.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json @@ -2859,6 +3033,10 @@ Signed-off-by: Mingzheng Xing create mode 100644 tools/perf/pmu-events/arch/riscv/thead/th1520-ddr/uncore-ddr-pmu.json create mode 100644 tools/testing/selftests/riscv/hwprobe/cbo.c create mode 100644 tools/testing/selftests/riscv/hwprobe/hwprobe.h + create mode 100644 tools/testing/selftests/riscv/sse/Makefile + create mode 100644 tools/testing/selftests/riscv/sse/module/Makefile + create mode 100644 tools/testing/selftests/riscv/sse/module/riscv_sse_test.c + create mode 100644 tools/testing/selftests/riscv/sse/run_sse_test.sh diff --git a/Documentation/arch/index.rst b/Documentation/arch/index.rst index 84b80255b851..f4794117e56b 100644 @@ -2891,10 +3069,10 @@ rename from Documentation/riscv/features.rst rename to Documentation/arch/riscv/features.rst diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst new file mode 100644 -index 000000000000..971370894bfd +index 000000000000..24c69be798c1 --- /dev/null +++ b/Documentation/arch/riscv/hwprobe.rst -@@ -0,0 +1,271 @@ +@@ -0,0 +1,303 @@ +.. SPDX-License-Identifier: GPL-2.0 + +RISC-V Hardware Probing Interface @@ -3080,6 +3258,9 @@ index 000000000000..971370894bfd + defined in the Atomic Compare-and-Swap (CAS) instructions manual starting + from commit 5059e0ca641c ("update to ratified"). + ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICNTR`: The Zicntr extension version 2.0 ++ is supported as defined in the RISC-V ISA manual. ++ + * :c:macro:`RISCV_HWPROBE_EXT_ZICOND`: The Zicond extension is supported as + defined in the RISC-V Integer Conditional (Zicond) operations extension + manual starting from commit 95cf1f9 ("Add changes requested by Ved @@ -3089,6 +3270,9 @@ index 000000000000..971370894bfd + supported as defined in the RISC-V ISA manual starting from commit + d8ab5c78c207 ("Zihintpause is ratified"). + ++ * :c:macro:`RISCV_HWPROBE_EXT_ZIHPM`: The Zihpm extension version 2.0 ++ is supported as defined in the RISC-V ISA manual. ++ + * :c:macro:`RISCV_HWPROBE_EXT_ZVE32X`: The Vector sub-extension Zve32x is + supported, as defined by version 1.0 of the RISC-V Vector extension manual. + @@ -3136,9 +3320,32 @@ index 000000000000..971370894bfd + ratified in commit 98918c844281 ("Merge pull request #1217 from + riscv/zawrs") of riscv-isa-manual. + ++ * :c:macro:`RISCV_HWPROBE_EXT_ZAAMO`: The Zaamo extension is supported as ++ defined in the in the RISC-V ISA manual starting from commit e87412e621f1 ++ ("integrate Zaamo and Zalrsc text (#1304)"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZALRSC`: The Zalrsc extension is supported as ++ defined in the in the RISC-V ISA manual starting from commit e87412e621f1 ++ ("integrate Zaamo and Zalrsc text (#1304)"). ++ + * :c:macro:`RISCV_HWPROBE_EXT_SUPM`: The Supm extension is supported as + defined in version 1.0 of the RISC-V Pointer Masking extensions. + ++ * :c:macro:`RISCV_HWPROBE_EXT_ZFBFMIN`: The Zfbfmin extension is supported as ++ defined in the RISC-V ISA manual starting from commit 4dc23d6229de ++ ("Added Chapter title to BF16"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFBFMIN`: The Zvfbfmin extension is supported as ++ defined in the RISC-V ISA manual starting from commit 4dc23d6229de ++ ("Added Chapter title to BF16"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZVFBFWMA`: The Zvfbfwma extension is supported as ++ defined in the RISC-V ISA manual starting from commit 4dc23d6229de ++ ("Added Chapter title to BF16"). ++ ++ * :c:macro:`RISCV_HWPROBE_EXT_ZICBOM`: The Zicbom extension is supported, as ++ ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs. ++ +* :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance + information about the selected set of processors. + @@ -3166,6 +3373,9 @@ index 000000000000..971370894bfd + represent the highest userspace virtual address usable. + +* :c:macro:`RISCV_HWPROBE_KEY_TIME_CSR_FREQ`: Frequency (in Hz) of `time CSR`. ++ ++* :c:macro:`RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE`: An unsigned int which ++ represents the size of the Zicbom block in bytes. diff --git a/Documentation/riscv/index.rst b/Documentation/arch/riscv/index.rst similarity index 100% rename from Documentation/riscv/index.rst @@ -3226,6 +3436,111 @@ index 000000000000..8d36beae9676 + reg = <0xff 0xefc10000 0x0 0x10000>; + status = "disabled"; + }; +diff --git a/Documentation/devicetree/bindings/i2c/lrw,lrw-i2c.yaml b/Documentation/devicetree/bindings/i2c/lrw,lrw-i2c.yaml +new file mode 100644 +index 000000000000..380864aadff8 +--- /dev/null ++++ b/Documentation/devicetree/bindings/i2c/lrw,lrw-i2c.yaml +@@ -0,0 +1,99 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++ ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/i2c/lrw-i2c.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: LRW I2C ++ ++maintainers: ++ - Fei Liu ++ - Xiaowei Han ++ - Qingtao Liu ++ ++description: | ++ Should be something similar to "lrw,-i2c" ++ for the I2C as integrated on a particular chip, It supports ++ multiple CPU architectures, currently including e.g. RISC-V and ARM. ++ ++properties: ++ compatible: ++ const: lrw,sc-i2c ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++ clock-frequency: ++ description: | ++ frequency of the bus clock in Hz defaults to 100 kHz when not specified ++ ++ fs_hcnt: ++ description: | ++ High-level duration count in Standard/Fast Mode (100kHz / 400kHz / 1MHz) ++ Represents the number of clock cycles (clk cycles) that the signal remains at high level ++ during Standard or Fast Mode communication ++ ++ fs_lcnt: ++ description: | ++ Low-level duration count in Standard/Fast Mode (100kHz / 400kHz / 1MHz) ++ Represents the number of clock cycles (clk cycles) that the signal remains at low level ++ during Standard or Fast Mode communication ++ ++ hs_hcnt: ++ description: | ++ High-level duration count in High-Speed Mode (>1MHz). ++ Represents the number of clock cycles (clk cycles) that the signal remains at high level ++ during High-Speed Mode communication (baud rate greater than 1MHz) ++ ++ hs_lcnt: ++ description: | ++ Low-level duration count in High-Speed Mode (>1MHz). ++ Represents the number of clock cycles (clk cycles) that the signal remains at low level ++ during High-Speed Mode communication (baud rate greater than 1MHz) ++ ++ sda_hold_time: ++ description: | ++ Hold time configuration for SDA signal, in clock cycles (clk cycles) ++ The value is a 32-bit integer with bitfield division: ++ - Bits [23:16]: Hold time in RX mode (SDA signal hold duration when receiving data) ++ - Bits [15:0]: Hold time in TX mode (SDA signal hold duration when transmitting data) ++ ++ sda_stuck_at_low_timeout: ++ description: | ++ Timeout threshold when SDA signal is stuck at low level, in clock cycles (clk cycles). ++ When the SDA signal remains low for a duration exceeding this count, the I2C controller ++ will trigger the bus recovery mechanism to restore normal communication ++ ++required: ++ - compatible ++ - interrupts ++ - reg ++ - clock-frequency ++ - clocks ++ ++additionalProperties: false ++ ++examples: ++ - | ++ i2c@825e822000 { ++ compatible = "lrw,sc-i2c"; ++ interrupt-parent = <&iod1_socbar_s0_aplic>; ++ interrupts = <0x77 0x4>; ++ reg = <0x82 0x08121000 0x00 0x1000>; ++ clocks = <&sc_i2c_clk>; ++ clock-frequency = <100000>; ++ fs_hcnt = <0x1ef>; ++ fs_lcnt = <0x1f4>; ++ hs_hcnt = <0x2d>; ++ hs_lcnt = <0x33>; ++ sda_hold_time = <0x10008>; ++ sda_stuck_at_low_timeout = <0x262599>; ++ }; ++ diff --git a/Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml b/Documentation/devicetree/bindings/iio/adc/thead,th1520-adc.yaml new file mode 100644 index 000000000000..a4bb8f1b0e17 @@ -4769,6 +5084,61 @@ index fd195c358446..25ba8cf0cc31 100644 + prescaler = <0x8000>; + status = "okay"; +}; +diff --git a/Documentation/devicetree/bindings/serial/lrw,lrw-uart.yaml b/Documentation/devicetree/bindings/serial/lrw,lrw-uart.yaml +new file mode 100644 +index 000000000000..a2d41c278c4f +--- /dev/null ++++ b/Documentation/devicetree/bindings/serial/lrw,lrw-uart.yaml +@@ -0,0 +1,49 @@ ++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause ++ ++%YAML 1.2 ++--- ++$id: http://devicetree.org/schemas/serial/lrw-uart.yaml# ++$schema: http://devicetree.org/meta-schemas/core.yaml# ++ ++title: LRW serial UART ++ ++maintainers: ++ - Wenhong Liu ++ - Qingtao Liu ++ ++description: | ++ Should be something similar to "lrw,-uart" ++ for the UART as integrated on a particular chip, It supports ++ multiple CPU architectures, currently including e.g. RISC-V and ARM. ++ ++properties: ++ compatible: ++ const: lrw,lrw-uart ++ ++ reg: ++ maxItems: 1 ++ ++ interrupts: ++ maxItems: 1 ++ ++ clocks: ++ maxItems: 1 ++ ++required: ++ - compatible ++ - reg ++ - current-speed ++ - clocks ++ ++additionalProperties: false ++ ++examples: ++ - | ++ uart0: serial@e0001800 { ++ compatible = "lrw,lrw-uart"; ++ interrupt-parent = <&aplic0>; ++ interrupts = <0x12 0x4>; ++ reg = <0xe0001800 0x100>; ++ clocks = <&bar_clk>; ++ current-speed = <115200>; ++ }; diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml index 17c553123f96..ba5c8cd476c7 100644 --- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml @@ -5371,10 +5741,10 @@ index 000000000000..f4a63904c3bc + }; \ No newline at end of file diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml -index 93258265c6b0..c4037bf14937 100644 +index 7ab77b7e52be..cbe4d295896b 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml -@@ -1440,6 +1440,8 @@ patternProperties: +@@ -1442,6 +1442,8 @@ patternProperties: description: Ufi Space Co., Ltd. "^ugoos,.*": description: Ugoos Industrial Co., Ltd. @@ -5383,6 +5753,15 @@ index 93258265c6b0..c4037bf14937 100644 "^uniwest,.*": description: United Western Technologies Corp (UniWest) "^upisemi,.*": +@@ -1600,6 +1602,8 @@ patternProperties: + description: ZTE Corp. + "^zyxel,.*": + description: ZyXEL Communications Corp. ++ "^lrw,.*": ++ description: LRW Corp. + + # Normal property name match without a comma + # These should catch all node/property names without a prefix diff --git a/Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml b/Documentation/devicetree/bindings/watchdog/xuantie,th1520-wdt.yaml new file mode 100644 index 000000000000..23a2bc07210b @@ -5408,6 +5787,19 @@ index 000000000000..23a2bc07210b + +allOf: + - $ref: watchdog.yaml# +diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt +index 2d3961bfef5d..963ee9c75752 100644 +--- a/Documentation/features/locking/queued-spinlocks/arch-support.txt ++++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt +@@ -21,7 +21,7 @@ + | openrisc: | ok | + | parisc: | TODO | + | powerpc: | ok | +- | riscv: | TODO | ++ | riscv: | ok | + | s390: | TODO | + | sh: | TODO | + | sparc: | ok | diff --git a/Documentation/features/sched/membarrier-sync-core/arch-support.txt b/Documentation/features/sched/membarrier-sync-core/arch-support.txt index 23260ca44946..76597adfb7d5 100644 --- a/Documentation/features/sched/membarrier-sync-core/arch-support.txt @@ -5736,10 +6128,76 @@ index a1ee99c4786e..0f5acfb1012e 100644 - ../../../riscv/patch-acceptance + ../../../arch/riscv/patch-acceptance diff --git a/MAINTAINERS b/MAINTAINERS -index 33eeabab5088..c34609dd468e 100644 +index e6199cc299b1..dd554863f372 100644 --- a/MAINTAINERS +++ b/MAINTAINERS -@@ -13822,7 +13822,9 @@ M: Mathieu Desnoyers +@@ -336,6 +336,7 @@ L: linux-acpi@vger.kernel.org + L: linux-riscv@lists.infradead.org + S: Maintained + F: drivers/acpi/riscv/ ++F: include/linux/acpi_rimt.h + + ACPI PCC(Platform Communication Channel) MAILBOX DRIVER + M: Sudeep Holla +@@ -12547,6 +12548,18 @@ S: Maintained + F: Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml + F: drivers/thermal/loongson2_thermal.c + ++LRW CORE PERF PMU EVENTS (RISC-V) ++M: Lin Shen ++R: Qingtao Liu ++S: Maintained ++F: tools/perf/pmu-events/arch/riscv/lrw/ ++ ++LRW DDR PMU DRIVER ++M: Jie Feng ++R: Qingtao Liu ++S: Maintained ++F: drivers/perf/lrw_ddr_pmu.c ++ + LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) + M: Sathya Prakash + M: Sreekanth Reddy +@@ -12640,6 +12653,38 @@ S: Supported + F: drivers/net/pcs/pcs-lynx.c + F: include/linux/pcs-lynx.h + ++LRW SERIAL DEVICE TREE SUPPORT ++M: Wenhong Liu ++R: Qingtao Liu ++L: devicetree@vger.kernel.org ++S: Maintained ++F: Documentation/devicetree/bindings/serial/lrw,lrw-uart.yaml ++ ++LRW SERIAL DRIVER ++M: Wenhong Liu ++R: Qingtao Liu ++L: linux-serial@vger.kernel.org ++S: Maintained ++F: drivers/tty/serial/lrw_uart.c ++ ++LRW I2C DEVICE TREE SUPPORT ++M: Fei Liu ++M: Xiaowei Han ++R: Qingtao Liu ++L: devicetree@vger.kernel.org ++S: Maintained ++F: Documentation/devicetree/bindings/i2c/lrw,lrw-i2c.yaml ++ ++LRW I2C DRIVER ++M: Fei Liu ++M: Xiaowei Han ++R: Qingtao Liu ++L: linux-i2c@vger.kernel.org ++S: Maintained ++F: drivers/i2c/busses/i2c-lrw-core.h ++F: drivers/i2c/busses/i2c-lrw-master.c ++F: drivers/i2c/busses/i2c-lrw-platdrv.c ++ + M68K ARCHITECTURE + M: Geert Uytterhoeven + L: linux-m68k@lists.linux-m68k.org +@@ -13834,7 +13879,9 @@ M: Mathieu Desnoyers M: "Paul E. McKenney" L: linux-kernel@vger.kernel.org S: Supported @@ -5749,7 +6207,7 @@ index 33eeabab5088..c34609dd468e 100644 F: include/uapi/linux/membarrier.h F: kernel/sched/membarrier.c -@@ -18571,6 +18573,20 @@ S: Maintained +@@ -18583,6 +18630,20 @@ S: Maintained F: drivers/mtd/nand/raw/r852.c F: drivers/mtd/nand/raw/r852.h @@ -5770,7 +6228,7 @@ index 33eeabab5088..c34609dd468e 100644 RISC-V ARCHITECTURE M: Paul Walmsley M: Palmer Dabbelt -@@ -18579,12 +18595,21 @@ L: linux-riscv@lists.infradead.org +@@ -18591,12 +18652,28 @@ L: linux-riscv@lists.infradead.org S: Supported Q: https://patchwork.kernel.org/project/linux-riscv/list/ C: irc://irc.libera.chat/riscv @@ -5789,11 +6247,33 @@ index 33eeabab5088..c34609dd468e 100644 +T: git git://git.kernel.org/pub/scm/linux/kernel/git/iommu/linux.git +F: Documentation/devicetree/bindings/iommu/riscv,iommu.yaml +F: drivers/iommu/riscv/ ++ ++RISC-V FIRMWARE DRIVERS ++M: Conor Dooley ++L: linux-riscv@lists.infradead.org ++S: Maintained ++T: git git://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git ++F: drivers/firmware/riscv/* + RISC-V MICROCHIP FPGA SUPPORT M: Conor Dooley M: Daire McNamara -@@ -18642,6 +18667,8 @@ M: Fu Wei +@@ -18647,6 +18724,14 @@ F: drivers/perf/riscv_pmu.c + F: drivers/perf/riscv_pmu_legacy.c + F: drivers/perf/riscv_pmu_sbi.c + ++RISC-V SSE DRIVER ++M: Clément Léger ++R: Himanshu Chauhan ++L: linux-riscv@lists.infradead.org ++S: Maintained ++F: drivers/firmware/riscv/riscv_sse.c ++F: include/linux/riscv_sse.h ++ + RISC-V THEAD SoC SUPPORT + M: Jisheng Zhang + M: Guo Ren +@@ -18654,6 +18739,8 @@ M: Fu Wei L: linux-riscv@lists.infradead.org S: Maintained F: arch/riscv/boot/dts/thead/ @@ -5803,10 +6283,10 @@ index 33eeabab5088..c34609dd468e 100644 RNBD BLOCK DRIVERS M: Md. Haris Iqbal diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 95974b69e202..9193cd32a807 100644 +index c3b38c890b45..a736a98cc77b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -1632,7 +1632,6 @@ config ARM64_BOOTPARAM_HOTPLUG_CPU0 +@@ -1633,7 +1633,6 @@ config ARM64_BOOTPARAM_HOTPLUG_CPU0 config NUMA bool "NUMA Memory Allocation and Scheduler Support" select GENERIC_ARCH_NUMA @@ -5830,6 +6310,97 @@ index 2c29239d05c3..846c563689a8 100644 } #endif +diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c +index 636be6715155..532d72ea42ee 100644 +--- a/arch/arm64/kernel/kexec_image.c ++++ b/arch/arm64/kernel/kexec_image.c +@@ -122,9 +122,9 @@ static void *image_load(struct kimage *image, + kernel_segment->memsz -= text_offset; + image->start = kernel_segment->mem; + +- pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- kernel_segment->mem, kbuf.bufsz, +- kernel_segment->memsz); ++ kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ kernel_segment->mem, kbuf.bufsz, ++ kernel_segment->memsz); + + return NULL; + } +diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c +index 40607a4fe3a5..314cd24133e6 100644 +--- a/arch/arm64/kernel/machine_kexec.c ++++ b/arch/arm64/kernel/machine_kexec.c +@@ -33,26 +33,12 @@ + static void _kexec_image_info(const char *func, int line, + const struct kimage *kimage) + { +- unsigned long i; +- +- pr_debug("%s:%d:\n", func, line); +- pr_debug(" kexec kimage info:\n"); +- pr_debug(" type: %d\n", kimage->type); +- pr_debug(" start: %lx\n", kimage->start); +- pr_debug(" head: %lx\n", kimage->head); +- pr_debug(" nr_segments: %lu\n", kimage->nr_segments); +- pr_debug(" dtb_mem: %pa\n", &kimage->arch.dtb_mem); +- pr_debug(" kern_reloc: %pa\n", &kimage->arch.kern_reloc); +- pr_debug(" el2_vectors: %pa\n", &kimage->arch.el2_vectors); +- +- for (i = 0; i < kimage->nr_segments; i++) { +- pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", +- i, +- kimage->segment[i].mem, +- kimage->segment[i].mem + kimage->segment[i].memsz, +- kimage->segment[i].memsz, +- kimage->segment[i].memsz / PAGE_SIZE); +- } ++ kexec_dprintk("%s:%d:\n", func, line); ++ kexec_dprintk(" kexec kimage info:\n"); ++ kexec_dprintk(" type: %d\n", kimage->type); ++ kexec_dprintk(" head: %lx\n", kimage->head); ++ kexec_dprintk(" kern_reloc: %pa\n", &kimage->arch.kern_reloc); ++ kexec_dprintk(" el2_vectors: %pa\n", &kimage->arch.el2_vectors); + } + + void machine_kexec_cleanup(struct kimage *kimage) +diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c +index a11a6e14ba89..0e017358f4ba 100644 +--- a/arch/arm64/kernel/machine_kexec_file.c ++++ b/arch/arm64/kernel/machine_kexec_file.c +@@ -127,8 +127,8 @@ int load_other_segments(struct kimage *image, + image->elf_load_addr = kbuf.mem; + image->elf_headers_sz = headers_sz; + +- pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- image->elf_load_addr, kbuf.bufsz, kbuf.memsz); ++ kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ image->elf_load_addr, kbuf.bufsz, kbuf.memsz); + } + + /* load initrd */ +@@ -148,8 +148,8 @@ int load_other_segments(struct kimage *image, + goto out_err; + initrd_load_addr = kbuf.mem; + +- pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- initrd_load_addr, kbuf.bufsz, kbuf.memsz); ++ kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ initrd_load_addr, kbuf.bufsz, kbuf.memsz); + } + + /* load dtb */ +@@ -179,8 +179,8 @@ int load_other_segments(struct kimage *image, + image->arch.dtb = dtb; + image->arch.dtb_mem = kbuf.mem; + +- pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- kbuf.mem, kbuf.bufsz, kbuf.memsz); ++ kexec_dprintk("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ kbuf.mem, kbuf.bufsz, kbuf.memsz); + + return 0; + diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index f872c57e9909..fd9a7bed83ce 100644 --- a/arch/arm64/kernel/pci.c @@ -6050,10 +6621,10 @@ index 53faa122b0f4..88182df75060 100644 select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_SUPPORTS_ACPI diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig -index 8739e15c137b..4ce4b491edcd 100644 +index 11d4cce41a4f..1bca19a32689 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig -@@ -468,7 +468,6 @@ config NR_CPUS +@@ -469,7 +469,6 @@ config NR_CPUS config NUMA bool "NUMA Support" select SMP @@ -6112,22 +6683,160 @@ index 40e40a7eb94a..f4440edcd8fe 100644 pud = ptdesc_address(ptdesc); pud_init(pud); +diff --git a/arch/parisc/kernel/kexec_file.c b/arch/parisc/kernel/kexec_file.c +index 8c534204f0fd..3fc82130b6c3 100644 +--- a/arch/parisc/kernel/kexec_file.c ++++ b/arch/parisc/kernel/kexec_file.c +@@ -38,8 +38,8 @@ static void *elf_load(struct kimage *image, char *kernel_buf, + for (i = 0; i < image->nr_segments; i++) + image->segment[i].mem = __pa(image->segment[i].mem); + +- pr_debug("Loaded the kernel at 0x%lx, entry at 0x%lx\n", +- kernel_load_addr, image->start); ++ kexec_dprintk("Loaded the kernel at 0x%lx, entry at 0x%lx\n", ++ kernel_load_addr, image->start); + + if (initrd != NULL) { + kbuf.buffer = initrd; +@@ -51,7 +51,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf, + if (ret) + goto out; + +- pr_debug("Loaded initrd at 0x%lx\n", kbuf.mem); ++ kexec_dprintk("Loaded initrd at 0x%lx\n", kbuf.mem); + image->arch.initrd_start = kbuf.mem; + image->arch.initrd_end = kbuf.mem + initrd_len; + } +@@ -68,7 +68,7 @@ static void *elf_load(struct kimage *image, char *kernel_buf, + if (ret) + goto out; + +- pr_debug("Loaded cmdline at 0x%lx\n", kbuf.mem); ++ kexec_dprintk("Loaded cmdline at 0x%lx\n", kbuf.mem); + image->arch.cmdline = kbuf.mem; + } + out: +diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c +index eeb258002d1e..904016cf89ea 100644 +--- a/arch/powerpc/kexec/elf_64.c ++++ b/arch/powerpc/kexec/elf_64.c +@@ -59,7 +59,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, + if (ret) + goto out; + +- pr_debug("Loaded the kernel at 0x%lx\n", kernel_load_addr); ++ kexec_dprintk("Loaded the kernel at 0x%lx\n", kernel_load_addr); + + ret = kexec_load_purgatory(image, &pbuf); + if (ret) { +@@ -67,7 +67,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, + goto out; + } + +- pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem); ++ kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem); + + /* Load additional segments needed for panic kernel */ + if (image->type == KEXEC_TYPE_CRASH) { +@@ -99,7 +99,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, + goto out; + initrd_load_addr = kbuf.mem; + +- pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr); ++ kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_load_addr); + } + + fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr, +@@ -132,7 +132,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf, + + fdt_load_addr = kbuf.mem; + +- pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr); ++ kexec_dprintk("Loaded device tree at 0x%lx\n", fdt_load_addr); + + slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset; + ret = setup_purgatory_ppc64(image, slave_code, fdt, kernel_load_addr, +diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c +index 7b71737ae24c..26616fbd0ffe 100644 +--- a/arch/powerpc/kexec/file_load_64.c ++++ b/arch/powerpc/kexec/file_load_64.c +@@ -577,7 +577,7 @@ static int add_usable_mem_property(void *fdt, struct device_node *dn, + NODE_PATH_LEN, dn); + return -EOVERFLOW; + } +- pr_debug("Memory node path: %s\n", path); ++ kexec_dprintk("Memory node path: %s\n", path); + + /* Now that we know the path, find its offset in kdump kernel's fdt */ + node = fdt_path_offset(fdt, path); +@@ -590,8 +590,8 @@ static int add_usable_mem_property(void *fdt, struct device_node *dn, + /* Get the address & size cells */ + n_mem_addr_cells = of_n_addr_cells(dn); + n_mem_size_cells = of_n_size_cells(dn); +- pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells, +- n_mem_size_cells); ++ kexec_dprintk("address cells: %d, size cells: %d\n", n_mem_addr_cells, ++ n_mem_size_cells); + + um_info->idx = 0; + if (!check_realloc_usable_mem(um_info, 2)) { +@@ -664,7 +664,7 @@ static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem) + + node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory"); + if (node == -FDT_ERR_NOTFOUND) +- pr_debug("No dynamic reconfiguration memory found\n"); ++ kexec_dprintk("No dynamic reconfiguration memory found\n"); + else if (node < 0) { + pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n"); + return -EINVAL; +@@ -776,8 +776,8 @@ static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr) + for (i = 0; i < ehdr->e_phnum; i++) { + if (phdr->p_paddr == BACKUP_SRC_START) { + phdr->p_offset = image->arch.backup_start; +- pr_debug("Backup region offset updated to 0x%lx\n", +- image->arch.backup_start); ++ kexec_dprintk("Backup region offset updated to 0x%lx\n", ++ image->arch.backup_start); + return; + } + } +@@ -850,7 +850,7 @@ int load_crashdump_segments_ppc64(struct kimage *image, + pr_err("Failed to load backup segment\n"); + return ret; + } +- pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem); ++ kexec_dprintk("Loaded the backup region at 0x%lx\n", kbuf->mem); + + /* Load elfcorehdr segment - to export crashing kernel's vmcore */ + ret = load_elfcorehdr_segment(image, kbuf); +@@ -858,8 +858,8 @@ int load_crashdump_segments_ppc64(struct kimage *image, + pr_err("Failed to load elfcorehdr segment\n"); + return ret; + } +- pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n", +- image->elf_load_addr, kbuf->bufsz, kbuf->memsz); ++ kexec_dprintk("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n", ++ image->elf_load_addr, kbuf->bufsz, kbuf->memsz); + + return 0; + } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig -index 3be10e723b2c..a5be297fc1b6 100644 +index 3be10e723b2c..2402e7a0a677 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig -@@ -13,7 +13,10 @@ config 32BIT +@@ -13,7 +13,11 @@ config 32BIT config RISCV def_bool y select ACPI_GENERIC_GSI if ACPI + select ACPI_PPTT if ACPI + select ACPI_MCFG if (ACPI && PCI) select ACPI_REDUCED_HARDWARE_ONLY if ACPI ++ select ACPI_RIMT if ACPI + select ACPI_SPCR_TABLE if ACPI select ARCH_DMA_DEFAULT_COHERENT select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 -@@ -26,20 +29,25 @@ config RISCV +@@ -26,20 +30,26 @@ config RISCV select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE @@ -6149,11 +6858,18 @@ index 3be10e723b2c..a5be297fc1b6 100644 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_VDSO_DATA ++ select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_KEEP_MEMBLOCK if ACPI select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_STACKWALK -@@ -64,7 +72,7 @@ config RISCV +@@ -59,12 +69,13 @@ config RISCV + select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL + select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP + select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE ++ select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS + select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU + select BUILDTIME_TABLE_SORT if MMU select CLINT_TIMER if !MMU select CLONE_BACKWARDS select COMMON_CLK @@ -6162,7 +6878,15 @@ index 3be10e723b2c..a5be297fc1b6 100644 select EDAC_SUPPORT select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE) select GENERIC_ARCH_TOPOLOGY -@@ -119,6 +127,7 @@ config RISCV +@@ -90,6 +101,7 @@ config RISCV + select GENERIC_VDSO_TIME_NS if HAVE_GENERIC_VDSO + select HARDIRQS_SW_RESEND + select HAS_IOPORT if MMU ++ select HAVE_ALIGNED_STRUCT_PAGE + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP + select HAVE_ARCH_HUGE_VMAP if MMU && 64BIT && !XIP_KERNEL +@@ -119,6 +131,7 @@ config RISCV select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION select HAVE_EBPF_JIT if MMU @@ -6170,7 +6894,15 @@ index 3be10e723b2c..a5be297fc1b6 100644 select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ERROR_INJECTION select HAVE_GCC_PLUGINS -@@ -147,14 +156,18 @@ config RISCV +@@ -132,6 +145,7 @@ config RISCV + select HAVE_MOVE_PMD + select HAVE_MOVE_PUD + select HAVE_PCI ++ select HAVE_ACPI_APEI if ACPI + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP +@@ -147,14 +161,18 @@ config RISCV select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN select LOCK_MM_AND_FIND_VMA @@ -6189,7 +6921,7 @@ index 3be10e723b2c..a5be297fc1b6 100644 select RISCV_INTC select RISCV_TIMER if RISCV_SBI select SIFIVE_PLIC -@@ -223,6 +236,20 @@ config KASAN_SHADOW_OFFSET +@@ -223,6 +241,20 @@ config KASAN_SHADOW_OFFSET default 0xdfffffff00000000 if 64BIT default 0xffffffff if 32BIT @@ -6210,7 +6942,7 @@ index 3be10e723b2c..a5be297fc1b6 100644 config ARCH_FLATMEM_ENABLE def_bool !NUMA -@@ -281,6 +308,7 @@ config RISCV_DMA_NONCOHERENT +@@ -281,6 +313,7 @@ config RISCV_DMA_NONCOHERENT select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_DEVICE select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB @@ -6218,7 +6950,7 @@ index 3be10e723b2c..a5be297fc1b6 100644 config RISCV_NONSTANDARD_CACHE_OPS bool -@@ -298,6 +326,7 @@ config AS_HAS_OPTION_ARCH +@@ -298,6 +331,7 @@ config AS_HAS_OPTION_ARCH source "arch/riscv/Kconfig.socs" source "arch/riscv/Kconfig.errata" @@ -6226,7 +6958,47 @@ index 3be10e723b2c..a5be297fc1b6 100644 menu "Platform type" -@@ -507,7 +536,7 @@ config RISCV_ISA_V +@@ -432,6 +466,39 @@ config NODES_SHIFT + Specify the maximum number of NUMA Nodes available on the target + system. Increases memory reserved to accommodate various tables. + ++choice ++ prompt "RISC-V spinlock type" ++ default RISCV_COMBO_SPINLOCKS ++ ++config RISCV_TICKET_SPINLOCKS ++ bool "Using ticket spinlock" ++ ++config RISCV_QUEUED_SPINLOCKS ++ bool "Using queued spinlock" ++ depends on SMP && MMU && NONPORTABLE ++ select ARCH_USE_QUEUED_SPINLOCKS ++ help ++ The queued spinlock implementation requires the forward progress ++ guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or ++ LR/SC with Ziccrse provide such guarantee. ++ ++ Select this if and only if Zabha or Ziccrse is available on your ++ platform, RISCV_QUEUED_SPINLOCKS must not be selected for platforms ++ without one of those extensions. ++ ++ If unsure, select RISCV_COMBO_SPINLOCKS, which will use qspinlocks ++ when supported and otherwise ticket spinlocks. ++ ++config RISCV_COMBO_SPINLOCKS ++ bool "Using combo spinlock" ++ depends on SMP && MMU ++ select ARCH_USE_QUEUED_SPINLOCKS ++ help ++ Embed both queued spinlock and ticket lock so that the spinlock ++ implementation can be chosen at runtime. ++ ++endchoice ++ + config RISCV_ALTERNATIVE + bool + depends on !XIP_KERNEL +@@ -507,7 +574,7 @@ config RISCV_ISA_V depends on TOOLCHAIN_HAS_V depends on FPU select DYNAMIC_SIGFRAME @@ -6235,7 +7007,7 @@ index 3be10e723b2c..a5be297fc1b6 100644 help Say N here if you want to disable all vector related procedure in the kernel. -@@ -525,6 +554,75 @@ config RISCV_ISA_V_DEFAULT_ENABLE +@@ -525,6 +592,75 @@ config RISCV_ISA_V_DEFAULT_ENABLE If you don't know what to do here, say Y. @@ -6311,7 +7083,26 @@ index 3be10e723b2c..a5be297fc1b6 100644 config TOOLCHAIN_HAS_ZBB bool default y -@@ -549,6 +647,29 @@ config RISCV_ISA_ZBB +@@ -533,6 +669,18 @@ config TOOLCHAIN_HAS_ZBB + depends on LLD_VERSION >= 150000 || LD_VERSION >= 23900 + depends on AS_HAS_OPTION_ARCH + ++config RISCV_ISA_ZBA ++ bool "Zba extension support for bit manipulation instructions" ++ default y ++ help ++ Add support for enabling optimisations in the kernel when the Zba ++ extension is detected at boot. ++ ++ The Zba extension provides instructions to accelerate the generation ++ of addresses that index into arrays of basic data types. ++ ++ If you don't know what to do here, say Y. ++ + config RISCV_ISA_ZBB + bool "Zbb extension support for bit manipulation instructions" + depends on TOOLCHAIN_HAS_ZBB +@@ -549,6 +697,29 @@ config RISCV_ISA_ZBB If you don't know what to do here, say Y. @@ -6341,21 +7132,33 @@ index 3be10e723b2c..a5be297fc1b6 100644 config RISCV_ISA_ZICBOM bool "Zicbom extension support for non-coherent DMA operation" depends on MMU -@@ -579,13 +700,6 @@ config RISCV_ISA_ZICBOZ +@@ -579,12 +750,20 @@ config RISCV_ISA_ZICBOZ If you don't know what to do here, say Y. -config TOOLCHAIN_HAS_ZIHINTPAUSE - bool -- default y ++config RISCV_ISA_ZICBOP ++ bool "Zicbop extension support for cache block prefetch" ++ depends on MMU ++ depends on RISCV_ALTERNATIVE + default y - depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zihintpause) - depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause) - depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600 -- ++ help ++ Adds support to dynamically detect the presence of the ZICBOP ++ extension (Cache Block Prefetch Operations) and enable its ++ usage. ++ ++ The Zicbop extension can be used to prefetch cache blocks for ++ read/write fetch. ++ ++ If you don't know what to do here, say Y. + config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI def_bool y - # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc -@@ -697,6 +811,20 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY +@@ -697,6 +876,20 @@ config ARCH_SUPPORTS_KEXEC_PURGATORY config ARCH_SUPPORTS_CRASH_DUMP def_bool y @@ -6376,7 +7179,7 @@ index 3be10e723b2c..a5be297fc1b6 100644 config COMPAT bool "Kernel support for 32-bit U-mode" default 64BIT -@@ -709,6 +837,25 @@ config COMPAT +@@ -709,6 +902,25 @@ config COMPAT If you want to execute 32-bit userspace applications, say Y. @@ -6402,7 +7205,7 @@ index 3be10e723b2c..a5be297fc1b6 100644 config RELOCATABLE bool "Build a relocatable kernel" depends on MMU && 64BIT && !XIP_KERNEL -@@ -811,6 +958,17 @@ config EFI +@@ -811,6 +1023,17 @@ config EFI allow the kernel to be booted as an EFI application. This is only useful on systems that have UEFI firmware. @@ -6421,7 +7224,7 @@ index 3be10e723b2c..a5be297fc1b6 100644 def_bool $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=tp -mstack-protector-guard-offset=0) diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs -index 30fd6a512828..046bbbfb0010 100644 +index 30fd6a512828..a5fb3bc6f716 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -22,6 +22,11 @@ config SOC_SIFIVE @@ -6460,7 +7263,7 @@ index 30fd6a512828..046bbbfb0010 100644 config ARCH_VIRT def_bool SOC_VIRT -@@ -111,4 +133,41 @@ config SOC_CANAAN_K210_DTB_SOURCE +@@ -111,4 +133,16 @@ config SOC_CANAAN_K210_DTB_SOURCE endif # ARCH_CANAAN @@ -6470,36 +7273,11 @@ index 30fd6a512828..046bbbfb0010 100644 + help + This enables support for Spacemit SoCs platform hardware. + -+if SOC_SPACEMIT -+ -+choice -+ prompt "Spacemit SoCs platform" -+ default SOC_SPACEMIT_K1 -+ help -+ choice Spacemit Soc platform -+ -+ config SOC_SPACEMIT_K1 -+ bool "k1" -+ help -+ select Spacemit k1 Platform SoCs. -+endchoice -+ -+if SOC_SPACEMIT_K1 -+ -+choice -+ prompt "Spacemit K1 serial SoCs" -+ default SOC_SPACEMIT_K1X ++config SOC_SPACEMIT_K1 ++ bool "Spacemit k1 Platform SoCs." ++ depends on SOC_SPACEMIT + help -+ choice Spacemit K1 SoC platform -+ -+ config SOC_SPACEMIT_K1X -+ bool "k1-x" -+ help -+ This enables support for Spacemit k1-x Platform Hardware. -+endchoice -+ -+endif -+endif ++ select Spacemit k1 Platform SoCs. + endmenu # "SoC selection" diff --git a/arch/riscv/Kconfig.vendor b/arch/riscv/Kconfig.vendor @@ -14694,15 +15472,15 @@ index 000000000000..57f304fc778f +}; diff --git a/arch/riscv/boot/dts/spacemit/Makefile b/arch/riscv/boot/dts/spacemit/Makefile new file mode 100644 -index 000000000000..bc18f5f5cec9 +index 000000000000..492746086409 --- /dev/null +++ b/arch/riscv/boot/dts/spacemit/Makefile @@ -0,0 +1,2 @@ -+dtb-$(CONFIG_SOC_SPACEMIT_K1X) += k1-bananapi-f3.dtb ++dtb-$(CONFIG_SOC_SPACEMIT_K1) += k1-bananapi-f3.dtb +obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y)) diff --git a/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts new file mode 100644 -index 000000000000..16f7a19f701f +index 000000000000..64f848173446 --- /dev/null +++ b/arch/riscv/boot/dts/spacemit/k1-bananapi-f3.dts @@ -0,0 +1,448 @@ @@ -14711,8 +15489,8 @@ index 000000000000..16f7a19f701f + +/dts-v1/; + -+#include "k1-x.dtsi" -+#include "k1-x_pinctrl.dtsi" ++#include "k1.dtsi" ++#include "k1_pinctrl.dtsi" + +/ { + model = "Banana Pi BPI-F3"; @@ -14728,7 +15506,7 @@ index 000000000000..16f7a19f701f + }; + + chosen { -+ bootargs = "earlycon=sbi console=ttySP0,115200n8 loglevel=8 rdinit=/init"; ++ bootargs = "earlycon=sbi console=ttyS0,115200n8 loglevel=8 rdinit=/init"; + stdout-path = "serial0:115200n8"; + }; +}; @@ -15154,24 +15932,24 @@ index 000000000000..16f7a19f701f + }; + }; +}; -diff --git a/arch/riscv/boot/dts/spacemit/k1-x.dtsi b/arch/riscv/boot/dts/spacemit/k1-x.dtsi +diff --git a/arch/riscv/boot/dts/spacemit/k1.dtsi b/arch/riscv/boot/dts/spacemit/k1.dtsi new file mode 100644 -index 000000000000..3c7e2ad81529 +index 000000000000..1c89ea8f0f80 --- /dev/null -+++ b/arch/riscv/boot/dts/spacemit/k1-x.dtsi ++++ b/arch/riscv/boot/dts/spacemit/k1.dtsi @@ -0,0 +1,1221 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2022 Spacemit, Inc */ + +/dts-v1/; + -+#include -+#include ++#include ++#include +#include +#include + +/ { -+ compatible = "spacemit,k1-x"; ++ compatible = "spacemit,k1"; + #address-cells = <2>; + #size-cells = <2>; + @@ -15638,7 +16416,7 @@ index 000000000000..3c7e2ad81529 + }; + + ccu: clock-controller@d4050000 { -+ compatible = "spacemit,k1x-clock"; ++ compatible = "spacemit,k1-clock"; + reg = <0x0 0xd4050000 0x0 0x209c>, + <0x0 0xd4282800 0x0 0x400>, + <0x0 0xd4015000 0x0 0x1000>, @@ -15661,7 +16439,7 @@ index 000000000000..3c7e2ad81529 + }; + + reset: reset-controller@d4050000 { -+ compatible = "spacemit,k1x-reset"; ++ compatible = "spacemit,k1-reset"; + reg = <0x0 0xd4050000 0x0 0x209c>, + <0x0 0xd4282800 0x0 0x400>, + <0x0 0xd4015000 0x0 0x1000>, @@ -15699,7 +16477,7 @@ index 000000000000..3c7e2ad81529 + }; + + pinctrl: pinctrl@d401e000 { -+ compatible = "pinctrl-spacemit-k1x"; ++ compatible = "pinctrl-spacemit-k1"; + reg = <0x0 0xd401e000 0x0 0x250>, + <0x0 0xd4019800 0x0 0x10>, + <0x0 0xd4019000 0x0 0x800>; @@ -15743,12 +16521,12 @@ index 000000000000..3c7e2ad81529 + }; + + uart0: serial@d4017000 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + reg = <0x0 0xd4017000 0x0 0x100>; + interrupt-parent = <&intc>; + interrupts = <42>; + clocks = <&ccu CLK_UART1>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART1>; + reg-shift = <2>; + reg-io-width = <4>; @@ -15756,89 +16534,89 @@ index 000000000000..3c7e2ad81529 + }; + + uart2: uart@d4017100 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + reg = <0x0 0xd4017100 0x0 0x100>; + interrupt-parent = <&intc>; + interrupts = <44>; + clocks = <&ccu CLK_UART2>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART2>; + status = "disabled"; + }; + + uart3: uart@d4017200 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + reg = <0x0 0xd4017200 0x0 0x100>; + interrupt-parent = <&intc>; + interrupts = <45>; + clocks = <&ccu CLK_UART3>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART3>; + status = "disabled"; + }; + + uart4: uart@d4017300 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + interrupt-parent = <&intc>; + reg = <0x0 0xd4017300 0x0 0x100>; + interrupts = <46>; + clocks = <&ccu CLK_UART4>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART4>; + status = "disabled"; + }; + + uart5: uart@d4017400 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + interrupt-parent = <&intc>; + reg = <0x0 0xd4017400 0x0 0x100>; + interrupts = <47>; + clocks = <&ccu CLK_UART5>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART5>; + status = "disabled"; + }; + + uart6: uart@d4017500 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + interrupt-parent = <&intc>; + reg = <0x0 0xd4017500 0x0 0x100>; + interrupts = <48>; + clocks = <&ccu CLK_UART6>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART6>; + status = "disabled"; + }; + + uart7: uart@d4017600 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + interrupt-parent = <&intc>; + reg = <0x0 0xd4017600 0x0 0x100>; + interrupts = <49>; + clocks = <&ccu CLK_UART7>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART7>; + status = "disabled"; + }; + + uart8: uart@d4017700 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + interrupt-parent = <&intc>; + reg = <0x0 0xd4017700 0x0 0x100>; + interrupts = <50>; + clocks = <&ccu CLK_UART8>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART8>; + status = "disabled"; + }; + + uart9: uart@d4017800 { -+ compatible = "spacemit,k1x-uart"; ++ compatible = "spacemit,k1-uart", "intel,xscale-uart"; + interrupt-parent = <&intc>; + reg = <0x0 0xd4017800 0x0 0x100>; + interrupts = <51>; + clocks = <&ccu CLK_UART9>, <&ccu CLK_SLOW_UART>; -+ clock-names = "func", "gate"; ++ clock-names = "core", "bus"; + resets = <&reset RESET_UART9>; + status = "disabled"; + }; @@ -16265,7 +17043,7 @@ index 000000000000..3c7e2ad81529 + }; + + gpio: gpio@d4019000 { -+ compatible = "spacemit,k1x-gpio"; ++ compatible = "spacemit,k1-gpio"; + reg = <0x0 0xd4019000 0x0 0x800>; + gpio-controller; + #gpio-cells = <2>; @@ -16381,1200 +17159,1200 @@ index 000000000000..3c7e2ad81529 + }; + }; +}; -diff --git a/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi b/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi +diff --git a/arch/riscv/boot/dts/spacemit/k1_pinctrl.dtsi b/arch/riscv/boot/dts/spacemit/k1_pinctrl.dtsi new file mode 100644 -index 000000000000..46b826f6b681 +index 000000000000..42a6e499deee --- /dev/null -+++ b/arch/riscv/boot/dts/spacemit/k1-x_pinctrl.dtsi ++++ b/arch/riscv/boot/dts/spacemit/k1_pinctrl.dtsi @@ -0,0 +1,1192 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Copyright (c) 2023 Spacemit, Inc */ + -+#include ++#include +/* Pin Configuration Node: */ +/* Format: */ +&pinctrl { + pinctrl_uart0_0: uart0_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_DAT3, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(MMC1_DAT2, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_DAT3) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(MMC1_DAT2) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_uart0_1: uart0_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_CMD, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_80, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_CMD) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_80) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_uart0_2: uart0_2_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_68, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_69, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_68) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_69) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart2: uart2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_21, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_22, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_23, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_24, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_21) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_22) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_23) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_24) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart3_0: uart3_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_81, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_82, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_83, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_84, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_81) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_82) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_83) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_84) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart3_1: uart3_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_18, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_19, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_20, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_21, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_18) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_19) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_20) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_21) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart3_2: uart3_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_53, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_54, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_55, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_56, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_53) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_54) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_55) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_56) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart4_0: uart4_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(QSPI_DAT1, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(QSPI_DAT0, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(QSPI_DAT1) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(QSPI_DAT0) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_uart4_1: uart4_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_81, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_82, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_83, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_84, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_81) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_82) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_83) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_84) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart4_2: uart4_2_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_23, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_24, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_23) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_24) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart4_3: uart4_3_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_33, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_34, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_35, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_36, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_33) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_34) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_35) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_36) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart4_4: uart4_4_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_111, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_112, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_113, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_114, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_111) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_112) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_113) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_114) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart5_0: uart5_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(QSPI_CLK, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(QSPI_CSI, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(QSPI_CLK) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(QSPI_CSI) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_uart5_1: uart5_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_25, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_26, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_27, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_28, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_25) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_26) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_27) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_28) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart5_2: uart5_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_42, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_43, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_44, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_45, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_42) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_43) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_44) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_45) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart5_3: uart5_3_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(PRI_TDI, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TMS, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TCK, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TDO, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(PRI_TDI) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TMS) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TCK) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TDO) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart6_0: uart6_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_85, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_86, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_87, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_90, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_85) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_86) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_87) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_90) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart6_1: uart6_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_00, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_01, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_02, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_03, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_00) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_01) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_02) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_03) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart6_2: uart6_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_56, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_57, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_56) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_57) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart7_0: uart7_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_88, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_89, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_88) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_89) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart7_1: uart7_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_04, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_05, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_06, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_07, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_04) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_05) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_06) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_07) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart8_0: uart8_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_82, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_83, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_82) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_83) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart8_1: uart8_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_08, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_09, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_10, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_11, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_08) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_09) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_10) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_11) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart8_2: uart8_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_75, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_76, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_77, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_78, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_75) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_76) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_77) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_78) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_uart9_0: uart9_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_12, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_13, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_12) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_13) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart9_1: uart9_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_110, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_115, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_116, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_117, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_110) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_115) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_116) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_117) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_uart9_2: uart9_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(PRI_TCK, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TDO, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(PRI_TCK) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TDO) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c0: i2c0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_54, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_55, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_54) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_55) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_r_uart1: r_uart1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_49, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_50, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_51, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_52, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_49) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_50) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_51) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_52) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_i2c1: i2c1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_56, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_57, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_56) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_57) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c2_0: i2c2_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_84, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_85, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_84) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_85) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c2_1: i2c2_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(PRI_TDI, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TMS, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(PRI_TDI) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TMS) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c2_2: i2c2_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_68, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_69, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_68) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_69) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c3_0: i2c3_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_38, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_39, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_38) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_39) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c3_1: i2c3_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_47, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_48, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_47) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_48) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c3_2: i2c3_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_77, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_78, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_77) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_78) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c4_0: i2c4_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_40, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_41, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_40) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_41) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c4_1: i2c4_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_75, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) -+ K1X_PADCONF(GPIO_76, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ K1_PINCFGID(GPIO_75) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS2) ++ K1_PINCFGID(GPIO_76) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS2) + >; + }; + + pinctrl_i2c4_2: i2c4_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_51, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) -+ K1X_PADCONF(GPIO_52, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_3V_DS2)) ++ K1_PINCFGID(GPIO_51) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS2) ++ K1_PINCFGID(GPIO_52) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_3V_DS2) + >; + }; + + pinctrl_i2c5_0: i2c5_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_81, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_82, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_81) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_82) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c5_1: i2c5_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_54, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_55, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_54) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_55) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c6_0: i2c6_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_83, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_90, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_83) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_90) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c6_1: i2c6_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_118, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_119, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_118) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_119) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c6_2: i2c6_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_56, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_57, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_56) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_57) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c7: i2c7_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_118, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_119, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_118) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_119) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_i2c8: i2c8_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(PWR_SCL, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PWR_SDA, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(PWR_SCL) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PWR_SDA) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_one_wire_0: one_wire_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_110, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_110) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_one_wire_1: one_wire_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_47, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_47) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_ir_rx_0: ir_rx_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(DVL1, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(DVL1) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_ir_rx_1: ir_rx_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_79, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_79) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_ir_rx_2: ir_rx_2_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_58, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_58) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_r_ir_rx_0: r_ir_rx_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_48, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_48) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_r_ir_rx_1: r_ir_rx_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_44, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_44) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm0_0: pwm0_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_DAT3, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_DAT3) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_pwm0_1: pwm0_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_14, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_14) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm0_2: pwm0_2_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_22, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_22) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm1_0: pwm1_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_DAT2, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_DAT2) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_pwm1_1: pwm1_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_29, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_29) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm1_2: pwm1_2_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_23, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_23) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm2_0: pwm2_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_DAT1, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_DAT1) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_pwm2_1: pwm2_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_22, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_22) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm2_2: pwm2_2_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_30, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_30) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm2_3: pwm2_3_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_24, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_24) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm3_0: pwm3_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_DAT0, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_DAT0) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_pwm3_1: pwm3_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_33, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_33) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm3_2: pwm3_2_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_25, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_25) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm4_0: pwm4_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_CMD, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_CMD) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_pwm4_1: pwm4_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_34, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_34) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm5_0: pwm5_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_CLK, MUX_MODE5, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_CLK) MUX_MODE5 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_pwm5_1: pwm5_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_35, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_35) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm6_0: pwm6_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_88, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_88) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm6_1: pwm6_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_36, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_36) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm7_0: pwm7_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_92, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_92) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm7_1: pwm7_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_37, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_37) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm8_0: pwm8_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_00, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_00) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm8_1: pwm8_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_38, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_38) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm9_0: pwm9_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_01, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_01) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm9_1: pwm9_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_39, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_39) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm10_0: pwm10_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_02, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_02) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm10_1: pwm10_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_40, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_40) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm11_0: pwm11_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_03, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_03) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm11_1: pwm11_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_41, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_41) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm12_0: pwm12_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_04, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_04) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm12_1: pwm12_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_42, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_42) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm13_0: pwm13_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_05, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_05) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm13_1: pwm13_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_43, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_43) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm14_0: pwm14_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_06, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_06) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm14_1: pwm14_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_44, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_44) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm15_0: pwm15_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_07, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_07) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm15_1: pwm15_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_45, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_45) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm16_0: pwm16_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_09, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_09) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm16_1: pwm16_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_46, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_46) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm17_0: pwm17_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_10, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_10) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm17_1: pwm17_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_53, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_53) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm18_0: pwm18_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_11, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_11) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm18_1: pwm18_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_57, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_57) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm19_0: pwm19_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_13, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_13) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_pwm19_1: pwm19_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_63, MUX_MODE4, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_63) MUX_MODE4 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_rpwm2_0: rpwm2_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_79, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_79) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_rpwm9_0: rpwm9_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_74, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_74) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_sspa0_0: sspa0_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_118, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_119, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_120, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_121, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_122, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1_PINCFGID(GPIO_118) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_119) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_120) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_121) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_122) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) + >; + }; + + pinctrl_sspa0_1: sspa0_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_58, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_111, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_112, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_113, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_114, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1_PINCFGID(GPIO_58) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_111) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_112) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_113) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_114) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) + >; + }; + + pinctrl_sspa1: sspa1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_24, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_25, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_26, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_27, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) -+ K1X_PADCONF(GPIO_28, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS0)) ++ K1_PINCFGID(GPIO_24) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_25) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_26) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_27) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) ++ K1_PINCFGID(GPIO_28) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS0) + >; + }; + + pinctrl_ssp2_0: ssp2_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_75, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_76, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_77, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_78, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_75) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_76) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_77) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_78) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) + >; + }; + + pinctrl_ssp2_1: ssp2_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_64, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_65, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_66, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_67, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_64) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_65) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_66) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_67) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) + >; + }; + + pinctrl_ssp3_0: ssp3_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_75, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_76, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_77, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_78, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_75) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_76) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_77) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_78) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) + >; + }; + + pinctrl_ssp3_1: ssp3_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_59, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_60, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_61, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_62, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_59) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_60) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_61) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_62) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_qspi: qspi_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(QSPI_DAT3, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(QSPI_DAT2, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(QSPI_DAT1, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(QSPI_DAT0, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(QSPI_CLK, MUX_MODE0, (EDGE_NONE | PULL_DIS | PAD_3V_DS4)) -+ K1X_PADCONF(QSPI_CSI, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(QSPI_DAT3) MUX_MODE0 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(QSPI_DAT2) MUX_MODE0 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(QSPI_DAT1) MUX_MODE0 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(QSPI_DAT0) MUX_MODE0 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(QSPI_CLK) MUX_MODE0 (EDGE_NONE | PULL_DIS | PAD_3V_DS4) ++ K1_PINCFGID(QSPI_CSI) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_mmc1: mmc1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_DAT3, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(MMC1_DAT2, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(MMC1_DAT1, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(MMC1_DAT0, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(MMC1_CMD, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(MMC1_CLK, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_3V_DS4)) ++ K1_PINCFGID(MMC1_DAT3) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(MMC1_DAT2) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(MMC1_DAT1) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(MMC1_DAT0) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(MMC1_CMD) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(MMC1_CLK) MUX_MODE0 (EDGE_NONE | PULL_DOWN | PAD_3V_DS4) + >; + }; + + pinctrl_mmc1_fast: mmc1_fast_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(MMC1_DAT3, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) -+ K1X_PADCONF(MMC1_DAT2, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) -+ K1X_PADCONF(MMC1_DAT1, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) -+ K1X_PADCONF(MMC1_DAT0, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) -+ K1X_PADCONF(MMC1_CMD, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS3)) -+ K1X_PADCONF(MMC1_CLK, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS3)) ++ K1_PINCFGID(MMC1_DAT3) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS3) ++ K1_PINCFGID(MMC1_DAT2) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS3) ++ K1_PINCFGID(MMC1_DAT1) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS3) ++ K1_PINCFGID(MMC1_DAT0) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS3) ++ K1_PINCFGID(MMC1_CMD) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS3) ++ K1_PINCFGID(MMC1_CLK) MUX_MODE0 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS3) + >; + }; + + pinctrl_mmc2: mmc2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_15, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_16, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_17, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_18, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_19, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_20, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_15) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_16) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_17) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_18) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_19) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_20) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_usb0_0: usb0_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_125, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_126, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_127, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_125) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_126) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_127) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_usb0_1: usb0_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_64, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_65, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_63, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_64) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_65) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_63) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_usb1_0: usb1_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_124, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_124) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_usb1_1: usb1_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_66, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_66) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_usb2_0: usb2_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_121, MUX_MODE2, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_122, MUX_MODE2, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_123, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_121) MUX_MODE2 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_122) MUX_MODE2 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_123) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_usb2_1: usb2_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_68, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_69, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_67, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_68) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_69) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_67) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie0_0: pcie0_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_15, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_16, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_17, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_15) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_16) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_17) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie0_1: pcie0_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_29, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_30, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_31, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_29) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_30) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_31) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie0_2: pcie0_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_110, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_115, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_116, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_110) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_115) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_116) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie0_3: pcie0_3_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_53, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_54, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_55, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_53) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_54) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_55) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie1_0: pcie1_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_15, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_16, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_17, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_15) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_16) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_17) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie1_1: pcie1_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_32, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_33, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_34, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_32) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_33) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_34) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie1_2: pcie1_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_56, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_57, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_58, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_56) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_57) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_58) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie1_3: pcie1_3_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_59, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_60, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_61, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_59) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_60) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_61) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie2_0: pcie2_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_18, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_19, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_20, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_18) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_19) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_20) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie2_1: pcie2_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_35, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_36, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_37, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_35) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_36) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_37) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie2_2: pcie2_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_62, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_74, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_117, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_62) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_74) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_117) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie2_3: pcie2_3_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_111, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_112, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_113, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_111) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_112) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_113) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pcie2_4: pcie2_4_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_62, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_112, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_117, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_62) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_112) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_117) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_gmac0: gmac0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_00, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_01, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_02, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_03, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_04, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_05, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_06, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_07, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_08, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_09, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_10, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_11, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_12, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_13, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_14, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_45, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_00) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_01) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_02) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_03) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_04) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_05) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_06) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_07) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_08) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_09) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_10) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_11) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_12) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_13) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_14) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_45) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_gmac1: gmac1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_29, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_30, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_31, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_32, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_33, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_34, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_35, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_36, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_37, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_38, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_39, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_40, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_41, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_42, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_43, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_46, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_29) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_30) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_31) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_32) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_33) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_34) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_35) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_36) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_37) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_38) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_39) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_40) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_41) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_42) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_43) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_46) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_can_0: can_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_75, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) -+ K1X_PADCONF(GPIO_76, MUX_MODE3, (EDGE_NONE | PULL_UP | PAD_3V_DS4)) ++ K1_PINCFGID(GPIO_75) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) ++ K1_PINCFGID(GPIO_76) MUX_MODE3 (EDGE_NONE | PULL_UP | PAD_3V_DS4) + >; + }; + + pinctrl_can_1: can_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_54, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_55, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_54) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_55) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_r_can_0: r_can_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_47, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_48, MUX_MODE2, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_47) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_48) MUX_MODE2 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_r_can_1: r_can_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_110, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_115, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_110) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_115) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; + + pinctrl_hdmi_0: hdmi_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_86, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_87, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_88, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_89, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_86) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_87) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_88) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_89) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_hdmi_1: hdmi_1_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_59, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_60, MUX_MODE1, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_61, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_62, MUX_MODE1, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_59) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_60) MUX_MODE1 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_61) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_62) MUX_MODE1 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_spi_lcd_0: spi_lcd_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_86, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_87, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_88, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_89, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_90, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_91, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_92, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_86) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_87) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_88) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_89) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_90) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_91) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_92) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_spi_lcd_1: spi_lcd_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(PRI_TDI, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TMS, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TCK, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TDO, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_74, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_114, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_63, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(PRI_TDI) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TMS) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TCK) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TDO) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_74) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_114) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_63) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_camera0: camera0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_53, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_53) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_camera1: camera1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_58, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_58) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_camera2: camera2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_120, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_120) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pmic: pmic_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(VCXO_EN, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(DVL0, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) -+ K1X_PADCONF(DVL1, MUX_MODE0, (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2)) ++ K1_PINCFGID(VCXO_EN) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(DVL0) MUX_MODE0 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) ++ K1_PINCFGID(DVL1) MUX_MODE0 (EDGE_NONE | PULL_DOWN | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk_0: mn_clk_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_92, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_92) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk_1: mn_clk_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_81, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_81) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk_2: mn_clk_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_44, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_44) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk_3: mn_clk_3_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_20, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_20) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk_4: mn_clk_4_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_23, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_23) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk_5: mn_clk_5_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_32, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_32) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk2_0: mn_clk2_0_grp { + pinctrl-single,pins = < -+ K1X_PADCONF(GPIO_91, MUX_MODE1, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_91) MUX_MODE1 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_mn_clk2_1: mn_clk2_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_85, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_85) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_vcxo_0: vcxo_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(DVL0, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(DVL1, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(DVL0) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(DVL1) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_vcxo_1: vcxo_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_16, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_17, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_16) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_17) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_vcxo_2: vcxo_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_89, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) -+ K1X_PADCONF(GPIO_90, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_89) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) ++ K1_PINCFGID(GPIO_90) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_vcxo_out_0: vcxo_out_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_91, MUX_MODE2, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_91) MUX_MODE2 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_vcxo_out_1: vcxo_out_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_12, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_12) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_32k_out_0: 32k_out_0_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_21, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_21) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_32k_out_1: 32k_out_1_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_31, MUX_MODE3, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_31) MUX_MODE3 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_32k_out_2: 32k_out_2_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(GPIO_28, MUX_MODE4, (EDGE_NONE | PULL_DIS | PAD_1V8_DS2)) ++ K1_PINCFGID(GPIO_28) MUX_MODE4 (EDGE_NONE | PULL_DIS | PAD_1V8_DS2) + >; + }; + + pinctrl_pri: pri_grp { + pinctrl-single,pins =< -+ K1X_PADCONF(PRI_TDI, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TMS, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TCK, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) -+ K1X_PADCONF(PRI_TDO, MUX_MODE0, (EDGE_NONE | PULL_UP | PAD_1V8_DS2)) ++ K1_PINCFGID(PRI_TDI) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TMS) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TCK) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) ++ K1_PINCFGID(PRI_TDO) MUX_MODE0 (EDGE_NONE | PULL_UP | PAD_1V8_DS2) + >; + }; +}; @@ -22137,20 +22915,20 @@ index ff364709a6df..a47bf9f15d9a 100644 }; diff --git a/arch/riscv/boot/dts/ultrarisc/Makefile b/arch/riscv/boot/dts/ultrarisc/Makefile new file mode 100644 -index 000000000000..9eac56549340 +index 000000000000..df8efe1a3ed7 --- /dev/null +++ b/arch/riscv/boot/dts/ultrarisc/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 -+dtb-$(CONFIG_ARCH_ULTRARISC) += dp1000.dtb +dtb-$(CONFIG_ARCH_ULTRARISC) += dp1000-evb-v1.dtb +dtb-$(CONFIG_ARCH_ULTRARISC) += dp1000-mo-v1.dtb ++dtb-$(CONFIG_ARCH_ULTRARISC) += dp1000-titan-v1.dtb diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-evb-pinctrl.dtsi b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-pinctrl.dtsi new file mode 100644 -index 000000000000..1d6442c2bff7 +index 000000000000..93c8f0f31e64 --- /dev/null +++ b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-pinctrl.dtsi -@@ -0,0 +1,149 @@ +@@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * UltraRISC DP1000 pinctrl device Tree Source @@ -22160,152 +22938,139 @@ index 000000000000..1d6442c2bff7 + * + */ +#include ++#include "dp1000.dtsi" + -+/ { -+ -+ soc { -+ pmx0: pinmux@11081000 { -+ compatible = "ultrarisc,dp1000-pinctrl"; -+ reg = <0x0 0x11081000 0x0 0x1000>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ #pinctrl-cells = <2>; -+ pinctrl-single,register-width = <32>; -+ pinctrl-single,function-mask = <0x3ff>; -+ pinctrl-use-default; -+ -+ i2c0_pins: i2c0_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_A 12 UR_FUNC0 -+ UR_DP1000_IOMUX_A 13 UR_FUNC0 -+ >; ++&pmx0 { ++ i2c0_pins: i2c0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 12 UR_FUNC0 ++ UR_DP1000_IOMUX_A 13 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_A 12 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 13 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 12 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 13 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ i2c1_pins: i2c1_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_B 6 UR_FUNC0 -+ UR_DP1000_IOMUX_B 7 UR_FUNC0 -+ >; ++ i2c1_pins: i2c1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 6 UR_FUNC0 ++ UR_DP1000_IOMUX_B 7 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_B 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_B 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ i2c2_pins: i2c2_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_C 0 UR_FUNC0 -+ UR_DP1000_IOMUX_C 1 UR_FUNC0 -+ >; ++ i2c2_pins: i2c2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 0 UR_FUNC0 ++ UR_DP1000_IOMUX_C 1 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_C 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_C 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ i2c3_pins: i2c3_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_C 2 UR_FUNC0 -+ UR_DP1000_IOMUX_C 3 UR_FUNC0 -+ >; ++ i2c3_pins: i2c3_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 2 UR_FUNC0 ++ UR_DP1000_IOMUX_C 3 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_C 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_C 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ uart0_pins: uart0_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_A 8 UR_FUNC1 -+ UR_DP1000_IOMUX_A 9 UR_FUNC1 -+ >; ++ uart0_pins: uart0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 8 UR_FUNC1 ++ UR_DP1000_IOMUX_A 9 UR_FUNC1 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_A 8 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 9 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 8 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 9 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ uart1_pins: uart1_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_B 4 UR_FUNC0 -+ UR_DP1000_IOMUX_B 5 UR_FUNC0 -+ >; ++ uart1_pins: uart1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 4 UR_FUNC0 ++ UR_DP1000_IOMUX_B 5 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_B 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_B 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ uart2_pins: uart2_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_C 4 UR_FUNC0 -+ UR_DP1000_IOMUX_C 5 UR_FUNC0 -+ >; ++ uart2_pins: uart2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 4 UR_FUNC0 ++ UR_DP1000_IOMUX_C 5 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_C 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_C 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ spi0_pins: spi0_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_D 0 UR_FUNC1 -+ UR_DP1000_IOMUX_D 1 UR_FUNC1 -+ UR_DP1000_IOMUX_D 2 UR_FUNC1 -+ UR_DP1000_IOMUX_D 3 UR_FUNC1 -+ UR_DP1000_IOMUX_D 4 UR_FUNC1 -+ UR_DP1000_IOMUX_D 5 UR_FUNC1 -+ UR_DP1000_IOMUX_D 6 UR_FUNC1 -+ UR_DP1000_IOMUX_D 7 UR_FUNC1 -+ >; ++ spi0_pins: spi0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_D 0 UR_FUNC1 ++ UR_DP1000_IOMUX_D 1 UR_FUNC1 ++ UR_DP1000_IOMUX_D 2 UR_FUNC1 ++ UR_DP1000_IOMUX_D 3 UR_FUNC1 ++ UR_DP1000_IOMUX_D 4 UR_FUNC1 ++ UR_DP1000_IOMUX_D 5 UR_FUNC1 ++ UR_DP1000_IOMUX_D 6 UR_FUNC1 ++ UR_DP1000_IOMUX_D 7 UR_FUNC1 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_D 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_D 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ spi1_pins: spi1_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_A 0 UR_FUNC0 -+ UR_DP1000_IOMUX_A 1 UR_FUNC0 -+ UR_DP1000_IOMUX_A 2 UR_FUNC0 -+ UR_DP1000_IOMUX_A 3 UR_FUNC0 -+ >; ++ spi1_pins: spi1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 0 UR_FUNC0 ++ UR_DP1000_IOMUX_A 1 UR_FUNC0 ++ UR_DP1000_IOMUX_A 2 UR_FUNC0 ++ UR_DP1000_IOMUX_A 3 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_A 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; + }; +}; diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts new file mode 100644 -index 000000000000..4080b26957ff +index 000000000000..7af0c8c9e285 --- /dev/null +++ b/arch/riscv/boot/dts/ultrarisc/dp1000-evb-v1.dts -@@ -0,0 +1,61 @@ +@@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * UltraRISC DP1000 device Tree Source @@ -22314,10 +23079,16 @@ index 000000000000..4080b26957ff + * Copyright (C) 2025 UltraRISC Technology (Shanghai) Co., Ltd. + */ + -+#include "dp1000.dts" +#include "dp1000-evb-pinctrl.dtsi" +#include + ++/ { ++ chosen { ++ bootargs = "earlycon=sbi console=ttyS1,115200"; ++ stdout-path = &uart1; ++ }; ++}; ++ +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; @@ -22369,171 +23140,164 @@ index 000000000000..4080b26957ff +}; diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-mo-pinctrl.dtsi b/arch/riscv/boot/dts/ultrarisc/dp1000-mo-pinctrl.dtsi new file mode 100644 -index 000000000000..6af61ea8a6da +index 000000000000..85b013f66bbd --- /dev/null +++ b/arch/riscv/boot/dts/ultrarisc/dp1000-mo-pinctrl.dtsi -@@ -0,0 +1,146 @@ +@@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2025 UltraRISC Technology (Shanghai) Co., Ltd. + */ + +#include ++#include "dp1000.dtsi" + -+/ { -+ -+ soc { -+ pmx0: pinmux@11081000 { -+ compatible = "ultrarisc,dp1000-pinctrl"; -+ reg = <0x0 0x11081000 0x0 0x1000>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ #pinctrl-cells = <2>; -+ pinctrl-single,register-width = <32>; -+ pinctrl-single,function-mask = <0x3ff>; -+ pinctrl-use-default; -+ -+ i2c0_pins: i2c0_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_A 12 UR_FUNC0 -+ UR_DP1000_IOMUX_A 13 UR_FUNC0 -+ >; ++&pmx0 { ++ i2c0_pins: i2c0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 12 UR_FUNC0 ++ UR_DP1000_IOMUX_A 13 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_A 12 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 13 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 12 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 13 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ i2c1_pins: i2c1_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_B 6 UR_FUNC0 -+ UR_DP1000_IOMUX_B 7 UR_FUNC0 -+ >; ++ i2c1_pins: i2c1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 6 UR_FUNC0 ++ UR_DP1000_IOMUX_B 7 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_B 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_B 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ i2c2_pins: i2c2_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_C 0 UR_FUNC0 -+ UR_DP1000_IOMUX_C 1 UR_FUNC0 -+ >; ++ i2c2_pins: i2c2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 0 UR_FUNC0 ++ UR_DP1000_IOMUX_C 1 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_C 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_C 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ i2c3_pins: i2c3_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_C 2 UR_FUNC0 -+ UR_DP1000_IOMUX_C 3 UR_FUNC0 -+ >; ++ i2c3_pins: i2c3_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 2 UR_FUNC0 ++ UR_DP1000_IOMUX_C 3 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_C 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_C 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ uart0_pins: uart0_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_A 8 UR_FUNC1 -+ UR_DP1000_IOMUX_A 9 UR_FUNC1 -+ >; ++ uart0_pins: uart0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 8 UR_FUNC1 ++ UR_DP1000_IOMUX_A 9 UR_FUNC1 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_A 8 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 9 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 8 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 9 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ uart1_pins: uart1_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_B 4 UR_FUNC0 -+ UR_DP1000_IOMUX_B 5 UR_FUNC0 -+ >; ++ uart1_pins: uart1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 4 UR_FUNC0 ++ UR_DP1000_IOMUX_B 5 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_B 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_B 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ uart2_pins: uart2_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_C 4 UR_FUNC0 -+ UR_DP1000_IOMUX_C 5 UR_FUNC0 -+ >; ++ uart2_pins: uart2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 4 UR_FUNC0 ++ UR_DP1000_IOMUX_C 5 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_C 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_C 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ spi0_pins: spi0_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_D 0 UR_FUNC1 -+ UR_DP1000_IOMUX_D 1 UR_FUNC1 -+ UR_DP1000_IOMUX_D 2 UR_FUNC1 -+ UR_DP1000_IOMUX_D 3 UR_FUNC1 -+ UR_DP1000_IOMUX_D 4 UR_FUNC1 -+ UR_DP1000_IOMUX_D 5 UR_FUNC1 -+ UR_DP1000_IOMUX_D 6 UR_FUNC1 -+ UR_DP1000_IOMUX_D 7 UR_FUNC1 -+ >; ++ spi0_pins: spi0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_D 0 UR_FUNC1 ++ UR_DP1000_IOMUX_D 1 UR_FUNC1 ++ UR_DP1000_IOMUX_D 2 UR_FUNC1 ++ UR_DP1000_IOMUX_D 3 UR_FUNC1 ++ UR_DP1000_IOMUX_D 4 UR_FUNC1 ++ UR_DP1000_IOMUX_D 5 UR_FUNC1 ++ UR_DP1000_IOMUX_D 6 UR_FUNC1 ++ UR_DP1000_IOMUX_D 7 UR_FUNC1 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_D 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_D 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_D 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; + -+ spi1_pins: spi1_pins { -+ pinctrl-pins = < -+ UR_DP1000_IOMUX_A 0 UR_FUNC0 -+ UR_DP1000_IOMUX_A 1 UR_FUNC0 -+ UR_DP1000_IOMUX_A 2 UR_FUNC0 -+ UR_DP1000_IOMUX_A 3 UR_FUNC0 -+ >; ++ spi1_pins: spi1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 0 UR_FUNC0 ++ UR_DP1000_IOMUX_A 1 UR_FUNC0 ++ UR_DP1000_IOMUX_A 2 UR_FUNC0 ++ UR_DP1000_IOMUX_A 3 UR_FUNC0 ++ >; + -+ pinconf-pins = < -+ UR_DP1000_IOMUX_A 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ UR_DP1000_IOMUX_A 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) -+ >; -+ }; -+ }; ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; + }; +}; diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-mo-v1.dts b/arch/riscv/boot/dts/ultrarisc/dp1000-mo-v1.dts new file mode 100644 -index 000000000000..a74714629566 +index 000000000000..dc057cbaf59b --- /dev/null +++ b/arch/riscv/boot/dts/ultrarisc/dp1000-mo-v1.dts -@@ -0,0 +1,60 @@ +@@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright(C) 2025 UltraRISC Technology (Shanghai) Co., Ltd. + */ + -+#include "dp1000.dts" +#include "dp1000-mo-pinctrl.dtsi" +#include + ++/ { ++ chosen { ++ bootargs = "earlycon=sbi console=ttyS0,115200"; ++ stdout-path = &uart0; ++ }; ++}; ++ +&i2c0 { + pinctrl-names = "default"; + pinctrl-0 = <&i2c0_pins>; @@ -22585,12 +23349,336 @@ index 000000000000..a74714629566 + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; +}; -diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000.dts b/arch/riscv/boot/dts/ultrarisc/dp1000.dts +diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-titan-pinctrl.dtsi b/arch/riscv/boot/dts/ultrarisc/dp1000-titan-pinctrl.dtsi new file mode 100644 -index 000000000000..bce0a46c5425 +index 000000000000..35429e539832 --- /dev/null -+++ b/arch/riscv/boot/dts/ultrarisc/dp1000.dts -@@ -0,0 +1,526 @@ ++++ b/arch/riscv/boot/dts/ultrarisc/dp1000-titan-pinctrl.dtsi +@@ -0,0 +1,173 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(C) 2025 UltraRISC Technology (Shanghai) Co., Ltd. ++ */ ++ ++#include ++#include "dp1000.dtsi" ++ ++&pmx0 { ++ i2c0_pins: i2c0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 12 UR_FUNC0 ++ UR_DP1000_IOMUX_A 13 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 12 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 13 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ i2c1_pins: i2c1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 6 UR_FUNC0 ++ UR_DP1000_IOMUX_B 7 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ i2c2_pins: i2c2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 0 UR_FUNC0 ++ UR_DP1000_IOMUX_C 1 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ i2c3_pins: i2c3_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 2 UR_FUNC0 ++ UR_DP1000_IOMUX_C 3 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ uart0_pins: uart0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 8 UR_FUNC1 ++ UR_DP1000_IOMUX_A 9 UR_FUNC1 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 8 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 9 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ uart1_pins: uart1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_B 4 UR_FUNC0 ++ UR_DP1000_IOMUX_B 5 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_B 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ uart2_pins: uart2_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 4 UR_FUNC0 ++ UR_DP1000_IOMUX_C 5 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ uart3_pins: uart3_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_C 6 UR_FUNC0 ++ UR_DP1000_IOMUX_C 7 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_C 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_C 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ spi0_pins: spi0_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_D 0 UR_FUNC1 ++ UR_DP1000_IOMUX_D 1 UR_FUNC1 ++ UR_DP1000_IOMUX_D 2 UR_FUNC1 ++ UR_DP1000_IOMUX_D 3 UR_FUNC1 ++ UR_DP1000_IOMUX_D 4 UR_FUNC1 ++ UR_DP1000_IOMUX_D 5 UR_FUNC1 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_D 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 5 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ spi1_pins: spi1_pins { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 0 UR_FUNC0 ++ UR_DP1000_IOMUX_A 1 UR_FUNC0 ++ UR_DP1000_IOMUX_A 2 UR_FUNC0 ++ UR_DP1000_IOMUX_A 3 UR_FUNC0 ++ UR_DP1000_IOMUX_A 4 UR_FUNC0 ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 3 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 4 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++ ++ gpios_pin: gpios_pin { ++ pinctrl-pins = < ++ UR_DP1000_IOMUX_A 10 UR_FUNC_DEF ++ UR_DP1000_IOMUX_A 11 UR_FUNC_DEF ++ UR_DP1000_IOMUX_A 14 UR_FUNC_DEF ++ UR_DP1000_IOMUX_A 15 UR_FUNC_DEF ++ ++ UR_DP1000_IOMUX_B 0 UR_FUNC_DEF ++ UR_DP1000_IOMUX_B 1 UR_FUNC_DEF ++ UR_DP1000_IOMUX_B 2 UR_FUNC_DEF ++ ++ UR_DP1000_IOMUX_D 6 UR_FUNC_DEF ++ UR_DP1000_IOMUX_D 7 UR_FUNC_DEF ++ >; ++ ++ pinconf-pins = < ++ UR_DP1000_IOMUX_A 10 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 11 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 14 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_A 15 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ ++ UR_DP1000_IOMUX_B 0 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 1 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_B 2 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ ++ UR_DP1000_IOMUX_D 6 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ UR_DP1000_IOMUX_D 7 UR_DP1000_BIAS(UR_PULL_UP, UR_DRIVE_DEF) ++ >; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000-titan-v1.dts b/arch/riscv/boot/dts/ultrarisc/dp1000-titan-v1.dts +new file mode 100644 +index 000000000000..2cbdfa2ad813 +--- /dev/null ++++ b/arch/riscv/boot/dts/ultrarisc/dp1000-titan-v1.dts +@@ -0,0 +1,139 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright(C) 2025 UltraRISC Technology (Shanghai) Co., Ltd. ++ */ ++ ++#include "dp1000-titan-pinctrl.dtsi" ++#include ++#include ++#include ++#include ++ ++/ { ++ chosen { ++ bootargs = "earlycon=sbi console=ttyS0,115200"; ++ stdout-path = &uart0; ++ }; ++ ++ gpio-poweroff { ++ compatible = "gpio-poweroff"; ++ gpios = <&portb 0 GPIO_ACTIVE_LOW>; ++ active-delay-ms = <100>; ++ line-name = "power-off"; ++ status = "okay"; ++ }; ++ ++ gpio-restart { ++ compatible = "gpio-restart"; ++ gpios = <&portb 1 GPIO_ACTIVE_LOW>; ++ active-delay-ms = <100>; ++ line-name = "reset-system"; ++ status = "okay"; ++ }; ++ ++ gpio-keys { ++ compatible = "gpio-keys"; ++ ++ key-wakeup { ++ label = "Wake-Up"; ++ gpios = <&porta 14 GPIO_ACTIVE_LOW>; ++ linux,code = ; ++ linux,input-type = ; ++ debounce-interval = <10>; ++ wakeup-source; ++ wakeup-event-action = ; ++ }; ++ }; ++}; ++ ++&i2c0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c0_pins>; ++}; ++ ++&i2c1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c1_pins>; ++}; ++ ++&i2c2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c2_pins>; ++ ++ rtc@68 { ++ compatible = "st,m41t11"; ++ reg = <0x68>; ++ }; ++}; ++ ++&i2c3 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&i2c3_pins>; ++}; ++ ++&spi0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&spi0_pins>; ++}; ++ ++&spi1 { ++ num-cs = <1>; ++ ++ pinctrl-names = "default"; ++ pinctrl-0 = <&spi1_pins>; ++}; ++ ++&uart0 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart0_pins>; ++}; ++ ++&uart1 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart1_pins>; ++}; ++ ++&uart2 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart2_pins>; ++}; ++ ++&uart3 { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&uart3_pins>; ++}; ++ ++&porta { ++ pinctrl-names = "default"; ++ pinctrl-0 = <&gpios_pin>; ++ ++ i2c1-mux-hog { ++ gpio-hog; ++ gpios = <5 GPIO_ACTIVE_HIGH>; ++ /* LOW: DCDC(U6) connect MCU(EC) ++ * HIGH: DCDC(U6) connect CPU ++ */ ++ output-low; ++ line-name = "gpio-mux-dcdc"; ++ }; ++ ++ i2c3-mux-hog { ++ gpio-hog; ++ gpios = <6 GPIO_ACTIVE_LOW>; ++ /* LOW: CPU i2c3 connect nvme ++ * HIGH: CPU i2c3 connect pciex16 ++ */ ++ output-low; ++ line-name = "gpio-mux-i2c3"; ++ }; ++ ++ uart0-mux-hog { ++ gpio-hog; ++ gpios = <7 GPIO_ACTIVE_HIGH>; ++ /* LOW: uart_debug connect BMC ++ * HIGH: uart_debug connect CPU ++ */ ++ output-high; ++ line-name = "gpio-mux-debug"; ++ }; ++}; +diff --git a/arch/riscv/boot/dts/ultrarisc/dp1000.dtsi b/arch/riscv/boot/dts/ultrarisc/dp1000.dtsi +new file mode 100644 +index 000000000000..93b99d622a78 +--- /dev/null ++++ b/arch/riscv/boot/dts/ultrarisc/dp1000.dtsi +@@ -0,0 +1,515 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (c) 2019-2022 UltraRISC Technology (Shanghai) Co., Ltd. @@ -22605,11 +23693,6 @@ index 000000000000..bce0a46c5425 + compatible = "ultrarisc,dp1000"; + model = "ultrarisc,dp1000"; + -+ chosen { -+ bootargs = "earlycon=sbi console=ttyS1,115200"; -+ stdout-path = &uart1; -+ }; -+ + cpus { + #address-cells = <0x01>; + #size-cells = <0x00>; @@ -22756,6 +23839,7 @@ index 000000000000..bce0a46c5425 + clock-frequency = <62500000>; + #clock-cells = <0>; + }; ++ + csr_clk: csr_clk { + compatible = "fixed-clock"; + clock-frequency = <250000000>; @@ -22926,26 +24010,15 @@ index 000000000000..bce0a46c5425 + clocks = <&device_clk>; + }; + -+ timer0: timer@20220000 { -+ compatible = "snps,dw-apb-timer"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x20220000 0x0 0x100>; -+ clocks = <&device_clk>; -+ interrupt-parent = <0x01>; -+ interrupts = <35>; -+ status = "okay"; -+ }; -+ -+ timer1: timer@20230000 { -+ compatible = "snps,dw-apb-timer"; ++ pmx0: pinmux@11081000 { ++ compatible = "ultrarisc,dp1000-pinctrl"; ++ reg = <0x0 0x11081000 0x0 0x1000>; + #address-cells = <1>; + #size-cells = <0>; -+ reg = <0x0 0x20230000 0x0 0x100>; -+ clocks = <&device_clk>; -+ interrupt-parent = <0x01>; -+ interrupts = <36>; -+ status = "okay"; ++ #pinctrl-cells = <2>; ++ pinctrl-single,register-width = <32>; ++ pinctrl-single,function-mask = <0x3ff>; ++ pinctrl-use-default; + }; + + gpio: gpio@20200000 { @@ -22967,6 +24040,7 @@ index 000000000000..bce0a46c5425 + #interrupt-cells = <2>; + interrupt-parent = <0x01>; + interrupts = <34>; ++ gpio-ranges = <&pmx0 0 0 16>; + }; + + portb: gpio-port@1 { @@ -22975,6 +24049,7 @@ index 000000000000..bce0a46c5425 + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; ++ gpio-ranges = <&pmx0 16 0 8>; + }; + + portc: gpio-port@2 { @@ -22983,6 +24058,7 @@ index 000000000000..bce0a46c5425 + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; ++ gpio-ranges = <&pmx0 24 0 8>; + }; + + portd: gpio-port@3 { @@ -22991,6 +24067,7 @@ index 000000000000..bce0a46c5425 + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; ++ gpio-ranges = <&pmx0 32 0 8>; + }; + }; + @@ -23118,10 +24195,10 @@ index 000000000000..bce0a46c5425 + }; +}; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig -index ab86ec3b9eab..8f88aed3d5d8 100644 +index ab86ec3b9eab..2bf8ed8dae61 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig -@@ -28,6 +28,7 @@ CONFIG_PROFILING=y +@@ -28,17 +28,24 @@ CONFIG_PROFILING=y CONFIG_SOC_MICROCHIP_POLARFIRE=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_THEAD=y @@ -23129,7 +24206,10 @@ index ab86ec3b9eab..8f88aed3d5d8 100644 CONFIG_SOC_SIFIVE=y CONFIG_SOC_STARFIVE=y CONFIG_ARCH_SUNXI=y -@@ -36,6 +37,7 @@ CONFIG_SMP=y + CONFIG_SOC_VIRT=y ++CONFIG_SOC_SPACEMIT=y ++CONFIG_SOC_SPACEMIT_K1=y + CONFIG_SMP=y CONFIG_HOTPLUG_CPU=y CONFIG_PM=y CONFIG_CPU_IDLE=y @@ -23137,17 +24217,37 @@ index ab86ec3b9eab..8f88aed3d5d8 100644 CONFIG_VIRTUALIZATION=y CONFIG_KVM=m CONFIG_ACPI=y -@@ -133,6 +135,7 @@ CONFIG_SERIAL_8250_DW=y ++CONFIG_ACPI_APEI=y ++CONFIG_ACPI_APEI_GHES=y ++CONFIG_KPROBES=y + CONFIG_JUMP_LABEL=y + CONFIG_MODULES=y + CONFIG_MODULE_UNLOAD=y +@@ -123,6 +130,7 @@ CONFIG_VIRTIO_NET=y + CONFIG_MACB=y + CONFIG_E1000E=y + CONFIG_R8169=y ++CONFIG_LRW_DDR_PMU=m + CONFIG_STMMAC_ETH=m + CONFIG_MICROSEMI_PHY=y + CONFIG_INPUT_MOUSEDEV=y +@@ -133,15 +141,27 @@ CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_SH_SCI=y CONFIG_VIRTIO_CONSOLE=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y ++CONFIG_SERIAL_LRW_UART=y ++CONFIG_SERIAL_LRW_UART_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y ++CONFIG_I2C_LRW_PLATFORM=m CONFIG_I2C_MV64XXX=m -@@ -142,6 +145,13 @@ CONFIG_SPI_SUN6I=y + CONFIG_SPI=y + CONFIG_SPI_SIFIVE=y + CONFIG_SPI_SUN6I=y # CONFIG_PTP_1588_CLOCK is not set CONFIG_GPIO_SIFIVE=y ++CONFIG_GPIO_K1=y CONFIG_WATCHDOG=y +CONFIG_DW_WATCHDOG=y +CONFIG_WATCHDOG_PRETIMEOUT_GOV=y @@ -23159,7 +24259,7 @@ index ab86ec3b9eab..8f88aed3d5d8 100644 CONFIG_SUNXI_WATCHDOG=y CONFIG_REGULATOR=y CONFIG_REGULATOR_FIXED_VOLTAGE=y -@@ -168,21 +178,25 @@ CONFIG_MMC=y +@@ -168,22 +188,30 @@ CONFIG_MMC=y CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_CADENCE=y @@ -23171,11 +24271,14 @@ index ab86ec3b9eab..8f88aed3d5d8 100644 CONFIG_DMADEVICES=y CONFIG_DMA_SUN6I=m +CONFIG_DW_AXI_DMAC=y ++CONFIG_VFIO=m ++CONFIG_VFIO_PCI=m CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_INPUT=y CONFIG_VIRTIO_MMIO=y CONFIG_SUN8I_DE2_CCU=m ++CONFIG_SPACEMIT_K1_CCU=y CONFIG_SUN50I_IOMMU=y +CONFIG_MAILBOX=y CONFIG_RPMSG_CHAR=y @@ -23183,9 +24286,11 @@ index ab86ec3b9eab..8f88aed3d5d8 100644 CONFIG_RPMSG_VIRTIO=y +CONFIG_RPMSG_TH1520=y CONFIG_ARCH_R9A07G043=y ++CONFIG_RESET_K1_SPACEMIT=y CONFIG_PHY_SUN4I_USB=m CONFIG_LIBNVDIMM=y -@@ -238,5 +252,13 @@ CONFIG_DEBUG_SG=y + CONFIG_NVMEM_SUNXI_SID=y +@@ -238,5 +266,13 @@ CONFIG_DEBUG_SG=y # CONFIG_RCU_TRACE is not set CONFIG_RCU_EQS_DEBUG=y # CONFIG_FTRACE is not set @@ -28738,24 +29843,20 @@ index 000000000000..a1ea4921ed39 +CONFIG_MEMTEST=y diff --git a/arch/riscv/configs/k1_defconfig b/arch/riscv/configs/k1_defconfig new file mode 100644 -index 000000000000..72df9883c25c +index 000000000000..a027c9272a4b --- /dev/null +++ b/arch/riscv/configs/k1_defconfig -@@ -0,0 +1,31 @@ +@@ -0,0 +1,27 @@ +# +# Spacemit k1 SoC support +# +CONFIG_SOC_SPACEMIT=y +CONFIG_SOC_SPACEMIT_K1=y -+CONFIG_SOC_SPACEMIT_K1X=y +CONFIG_RISCV_ISA_ZICBOM=y -+CONFIG_SPACEMIT_K1X_CCU=y -+CONFIG_RESET_K1X_SPACEMIT=y -+CONFIG_PINCTRL_SPACEMIT_K1X=y -+CONFIG_GPIO_K1X=y -+CONFIG_SERIAL_SPACEMIT_K1X=y -+CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE=y -+CONFIG_SERIAL_DEV_BUS=y ++CONFIG_SPACEMIT_K1_CCU=y ++CONFIG_RESET_K1_SPACEMIT=y ++CONFIG_PINCTRL_SPACEMIT_K1=y ++CONFIG_GPIO_K1=y +CONFIG_SPACEMIT_MEM_RANGE=y +CONFIG_SPACEMIT_K1_DMA=y +CONFIG_I2C_SPACEMIT_K1=y @@ -28774,26 +29875,19 @@ index 000000000000..72df9883c25c +CONFIG_K1_EMAC=m + diff --git a/arch/riscv/configs/openeuler_defconfig b/arch/riscv/configs/openeuler_defconfig -index 61f2b2f12589..4ef5c9933ef9 100644 +index 61f2b2f12589..0f2b22d817e1 100644 --- a/arch/riscv/configs/openeuler_defconfig +++ b/arch/riscv/configs/openeuler_defconfig -@@ -2,6 +2,7 @@ - # Automatically generated file; DO NOT EDIT. - # Linux/riscv 6.6.0 Kernel Configuration - # -+CONFIG_GCC_ASM_GOTO_OUTPUT_BROKEN=y - CONFIG_IRQ_WORK=y - CONFIG_BUILDTIME_TABLE_SORT=y - CONFIG_THREAD_INFO_IN_TASK=y -@@ -44,6 +45,7 @@ CONFIG_IRQ_DOMAIN_HIERARCHY=y +@@ -44,6 +44,8 @@ CONFIG_IRQ_DOMAIN_HIERARCHY=y CONFIG_GENERIC_IRQ_IPI=y CONFIG_GENERIC_IRQ_IPI_MUX=y CONFIG_GENERIC_MSI_IRQ=y ++CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y # CONFIG_GENERIC_IRQ_DEBUGFS is not set -@@ -90,9 +92,10 @@ CONFIG_BPF_JIT_DEFAULT_ON=y +@@ -90,12 +92,14 @@ CONFIG_BPF_JIT_DEFAULT_ON=y # CONFIG_BPF_SCHED is not set # end of BPF subsystem @@ -28807,7 +29901,11 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_PREEMPT is not set CONFIG_PREEMPT_COUNT=y # CONFIG_PREEMPT_DYNAMIC is not set -@@ -148,7 +151,7 @@ CONFIG_GENERIC_SCHED_CLOCK=y ++# CONFIG_XCU_SCHEDULER is not set + + # + # CPU/Task time and stats accounting +@@ -148,7 +152,7 @@ CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" @@ -28816,7 +29914,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CC_NO_ARRAY_BOUNDS=y CONFIG_ARCH_SUPPORTS_INT128=y CONFIG_NUMA_BALANCING=y -@@ -160,6 +163,7 @@ CONFIG_MEMCG=y +@@ -160,6 +164,7 @@ CONFIG_MEMCG=y # CONFIG_MEMCG_V1_RECLAIM is not set # CONFIG_MEMCG_MEMFS_INFO is not set CONFIG_MEMCG_KMEM=y @@ -28824,7 +29922,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_BLK_CGROUP=y CONFIG_CGROUP_WRITEBACK=y # CONFIG_CGROUP_V1_WRITEBACK is not set -@@ -169,7 +173,6 @@ CONFIG_FAIR_GROUP_SCHED=y +@@ -169,7 +174,6 @@ CONFIG_FAIR_GROUP_SCHED=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y # CONFIG_QOS_SCHED_DYNAMIC_AFFINITY is not set @@ -28832,7 +29930,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y -@@ -186,6 +189,8 @@ CONFIG_SOCK_CGROUP_DATA=y +@@ -186,6 +190,8 @@ CONFIG_SOCK_CGROUP_DATA=y # CONFIG_CGROUP_V1_KILL is not set # CONFIG_CGROUP_V1_STAT is not set # CONFIG_CGROUP_FILES is not set @@ -28841,7 +29939,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_NAMESPACES=y CONFIG_UTS_NS=y CONFIG_TIME_NS=y -@@ -244,6 +249,8 @@ CONFIG_KALLSYMS=y +@@ -244,6 +250,8 @@ CONFIG_KALLSYMS=y # CONFIG_KALLSYMS_SELFTEST is not set CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_BASE_RELATIVE=y @@ -28850,7 +29948,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_KCMP=y CONFIG_RSEQ=y CONFIG_CACHESTAT_SYSCALL=y -@@ -262,8 +269,6 @@ CONFIG_DEBUG_PERF_USE_VMALLOC=y +@@ -262,8 +270,6 @@ CONFIG_DEBUG_PERF_USE_VMALLOC=y CONFIG_SYSTEM_DATA_VERIFICATION=y CONFIG_PROFILING=y CONFIG_TRACEPOINTS=y @@ -28859,7 +29957,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Kexec and crash features -@@ -288,6 +293,7 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17 +@@ -288,6 +294,7 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=17 CONFIG_RISCV_SBI=y CONFIG_MMU=y CONFIG_PAGE_OFFSET=0xff60000000000000 @@ -28867,7 +29965,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ARCH_SPARSEMEM_ENABLE=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_SUPPORTS_UPROBES=y -@@ -298,6 +304,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y +@@ -298,6 +305,7 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_CSUM=y CONFIG_GENERIC_HWEIGHT=y CONFIG_FIX_EARLYCON_MEM=y @@ -28875,7 +29973,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_PGTABLE_LEVELS=5 CONFIG_LOCKDEP_SUPPORT=y CONFIG_RISCV_DMA_NONCOHERENT=y -@@ -306,15 +313,21 @@ CONFIG_RISCV_DMA_NONCOHERENT=y +@@ -306,15 +314,20 @@ CONFIG_RISCV_DMA_NONCOHERENT=y # SoC selection # # CONFIG_SOC_MICROCHIP_POLARFIRE is not set @@ -28890,12 +29988,11 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +CONFIG_ARCH_SUNXI=y CONFIG_ARCH_THEAD=y +CONFIG_ARCH_XUANTIE=y -+# CONFIG_ARCH_ULTRARISC is not set ++CONFIG_ARCH_ULTRARISC=y CONFIG_ARCH_VIRT=y CONFIG_SOC_VIRT=y +CONFIG_SOC_SPACEMIT=y +CONFIG_SOC_SPACEMIT_K1=y -+CONFIG_SOC_SPACEMIT_K1X=y # end of SoC selection # @@ -28918,7 +30015,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Platform type # -@@ -339,7 +364,7 @@ CONFIG_ARCH_RV64I=y +@@ -339,12 +364,14 @@ CONFIG_ARCH_RV64I=y CONFIG_CMODEL_MEDANY=y CONFIG_MODULE_SECTIONS=y CONFIG_SMP=y @@ -28927,19 +30024,32 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_NR_CPUS=512 CONFIG_HOTPLUG_CPU=y CONFIG_TUNE_GENERIC=y -@@ -353,9 +378,10 @@ CONFIG_RISCV_ISA_SVPBMT=y + CONFIG_NUMA=y + CONFIG_NODES_SHIFT=7 ++# CONFIG_RISCV_TICKET_SPINLOCKS is not set ++CONFIG_RISCV_COMBO_SPINLOCKS=y + CONFIG_RISCV_ALTERNATIVE=y + CONFIG_RISCV_ALTERNATIVE_EARLY=y + CONFIG_RISCV_ISA_C=y +@@ -353,9 +380,16 @@ CONFIG_RISCV_ISA_SVPBMT=y CONFIG_TOOLCHAIN_HAS_V=y CONFIG_RISCV_ISA_V=y CONFIG_RISCV_ISA_V_DEFAULT_ENABLE=y +CONFIG_RISCV_ISA_ZAWRS=y ++CONFIG_RISCV_ISA_ZABHA=y ++CONFIG_RISCV_ISA_ZACAS=y +CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD=768 ++CONFIG_RISCV_ISA_ZBA=y ++CONFIG_RISCV_ISA_ZBB=y ++CONFIG_RISCV_ISA_ZBC=y CONFIG_RISCV_ISA_ZICBOM=y CONFIG_RISCV_ISA_ZICBOZ=y -CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE=y ++CONFIG_RISCV_ISA_ZICBOP=y CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI=y CONFIG_FPU=y CONFIG_IRQ_STACKS=y -@@ -380,6 +406,8 @@ CONFIG_ARCH_SELECTS_KEXEC_FILE=y +@@ -380,6 +414,8 @@ CONFIG_ARCH_SELECTS_KEXEC_FILE=y CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y CONFIG_COMPAT=y @@ -28948,7 +30058,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_RELOCATABLE is not set # CONFIG_RANDOMIZE_BASE is not set # end of Kernel features -@@ -390,6 +418,7 @@ CONFIG_COMPAT=y +@@ -390,6 +426,7 @@ CONFIG_COMPAT=y CONFIG_CMDLINE="" CONFIG_EFI_STUB=y CONFIG_EFI=y @@ -28956,7 +30066,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CC_HAVE_STACKPROTECTOR_TLS=y CONFIG_STACKPROTECTOR_PER_TASK=y CONFIG_RISCV_ISA_FALLBACK=y -@@ -420,7 +449,7 @@ CONFIG_PM_GENERIC_DOMAINS=y +@@ -420,7 +457,7 @@ CONFIG_PM_GENERIC_DOMAINS=y CONFIG_PM_GENERIC_DOMAINS_SLEEP=y CONFIG_PM_GENERIC_DOMAINS_OF=y CONFIG_CPU_PM=y @@ -28965,7 +30075,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ARCH_SUSPEND_POSSIBLE=y # end of Power management options -@@ -436,6 +465,7 @@ CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y +@@ -436,6 +473,7 @@ CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y # CONFIG_CPU_IDLE_GOV_LADDER is not set CONFIG_CPU_IDLE_GOV_MENU=y CONFIG_CPU_IDLE_GOV_TEO=y @@ -28973,21 +30083,28 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DT_IDLE_STATES=y CONFIG_DT_IDLE_GENPD=y -@@ -471,6 +501,8 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y +@@ -471,6 +509,9 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y # CONFIG_CPUFREQ_DT=y CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ=y -+# CONFIG_ACPI_CPPC_CPUFREQ is not set ++CONFIG_ACPI_CPPC_CPUFREQ=m ++CONFIG_ACPI_CPPC_CPUFREQ_FIE=y # end of CPU Frequency scaling # end of CPU Power Management -@@ -485,9 +517,53 @@ CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +@@ -480,14 +521,65 @@ CONFIG_HAVE_KVM_IRQ_ROUTING=y + CONFIG_HAVE_KVM_EVENTFD=y + CONFIG_KVM_MMIO=y + CONFIG_HAVE_KVM_MSI=y ++CONFIG_KVM_VFIO=y + CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y ++CONFIG_HAVE_KVM_IRQ_BYPASS=y + CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y CONFIG_KVM_XFER_TO_GUEST_WORK=y CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y CONFIG_VIRTUALIZATION=y --CONFIG_KVM=m -+CONFIG_KVM=y + CONFIG_KVM=m CONFIG_ARCH_SUPPORTS_ACPI=y -# CONFIG_ACPI is not set +CONFIG_ACPI=y @@ -29004,6 +30121,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y ++CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +# CONFIG_ACPI_IPMI is not set +CONFIG_ACPI_THERMAL=y @@ -29016,9 +30134,13 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +# CONFIG_ACPI_NFIT is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set ++CONFIG_HAVE_ACPI_APEI=y ++# CONFIG_ACPI_APEI is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set ++CONFIG_ACPI_RIMT=y +CONFIG_ACPI_PPTT=y ++CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_HAVE_LIVEPATCH_WO_FTRACE=y @@ -29038,16 +30160,20 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # General architecture-dependent options -@@ -524,6 +600,8 @@ CONFIG_HAVE_PERF_REGS=y +@@ -524,7 +616,12 @@ CONFIG_HAVE_PERF_REGS=y CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y CONFIG_MMU_LAZY_TLB_REFCOUNT=y ++CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y ++CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y ++CONFIG_ARCH_WEAK_RELEASE_ACQUIRE=y CONFIG_HAVE_ARCH_SECCOMP=y CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -@@ -565,7 +643,7 @@ CONFIG_VMAP_STACK=y + CONFIG_SECCOMP=y +@@ -565,7 +662,7 @@ CONFIG_VMAP_STACK=y CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y @@ -29056,7 +30182,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y CONFIG_STRICT_MODULE_RWX=y CONFIG_ARCH_USE_MEMREMAP_PROT=y -@@ -585,6 +663,11 @@ CONFIG_DYNAMIC_SIGFRAME=y +@@ -585,7 +682,14 @@ CONFIG_DYNAMIC_SIGFRAME=y CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y # end of GCOV-based kernel profiling @@ -29066,9 +30192,20 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +# end of Profile Guided Optimization (PGO) + CONFIG_HAVE_GCC_PLUGINS=y ++CONFIG_GCC_PLUGINS=y ++# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set CONFIG_FUNCTION_ALIGNMENT=0 # end of General architecture-dependent options -@@ -646,6 +729,7 @@ CONFIG_BLK_INLINE_ENCRYPTION=y + +@@ -638,6 +742,7 @@ CONFIG_BLK_WBT_MQ=y + # CONFIG_BLK_CGROUP_IOLATENCY is not set + # CONFIG_BLK_CGROUP_FC_APPID is not set + # CONFIG_BLK_CGROUP_IOCOST is not set ++# CONFIG_BLK_CGROUP_IOINFLIGHT is not set + # CONFIG_BLK_CGROUP_IOPRIO is not set + CONFIG_BLK_DEBUG_FS=y + CONFIG_BLK_DEBUG_FS_ZONED=y +@@ -646,6 +751,7 @@ CONFIG_BLK_INLINE_ENCRYPTION=y CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y # CONFIG_BLK_DEV_DETECT_WRITING_PART0 is not set # CONFIG_BLK_DEV_WRITE_MOUNTED_DUMP is not set @@ -29076,7 +30213,14 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_BLK_IO_HIERARCHY_STATS is not set # -@@ -706,6 +790,8 @@ CONFIG_QUEUED_RWLOCKS=y +@@ -701,11 +807,15 @@ CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y + CONFIG_MUTEX_SPIN_ON_OWNER=y + CONFIG_RWSEM_SPIN_ON_OWNER=y + CONFIG_LOCK_SPIN_ON_OWNER=y ++CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y ++CONFIG_QUEUED_SPINLOCKS=y + CONFIG_ARCH_USE_QUEUED_RWLOCKS=y + CONFIG_QUEUED_RWLOCKS=y CONFIG_ARCH_HAS_MMIOWB=y CONFIG_MMIOWB=y CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y @@ -29085,7 +30229,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y # CONFIG_PID_MAX_PER_NAMESPACE is not set CONFIG_FREEZER=y -@@ -771,6 +857,8 @@ CONFIG_SPARSEMEM_EXTREME=y +@@ -771,6 +881,8 @@ CONFIG_SPARSEMEM_EXTREME=y CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y CONFIG_SPARSEMEM_VMEMMAP=y CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y @@ -29094,7 +30238,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_MEMORY_ISOLATION=y CONFIG_EXCLUSIVE_SYSTEM_RAM=y CONFIG_SPLIT_PTLOCK_CPUS=4 -@@ -795,13 +883,14 @@ CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +@@ -795,13 +907,14 @@ CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y # CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set CONFIG_THP_SWAP=y # CONFIG_READ_ONLY_THP_FOR_FS is not set @@ -29112,7 +30256,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_GENERIC_EARLY_IOREMAP=y # CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set CONFIG_PAGE_IDLE_FLAG=y -@@ -833,6 +922,8 @@ CONFIG_LOCK_MM_AND_FIND_VMA=y +@@ -833,6 +946,8 @@ CONFIG_LOCK_MM_AND_FIND_VMA=y # # CONFIG_DAMON is not set # end of Data Access Monitoring @@ -29121,15 +30265,16 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # end of Memory Management options CONFIG_NET=y -@@ -870,6 +961,7 @@ CONFIG_NET_KEY=m +@@ -870,6 +985,8 @@ CONFIG_NET_KEY=m CONFIG_NET_KEY_MIGRATE=y # CONFIG_SMC is not set # CONFIG_XDP_SOCKETS is not set +# CONFIG_OENETCLS is not set ++CONFIG_UB_UMS=m CONFIG_NET_HANDSHAKE=y CONFIG_INET=y CONFIG_IP_MULTICAST=y -@@ -1346,10 +1438,10 @@ CONFIG_L2TP_DEBUGFS=m +@@ -1346,10 +1463,10 @@ CONFIG_L2TP_DEBUGFS=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m @@ -29142,7 +30287,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y # CONFIG_BRIDGE_MRP is not set -@@ -1358,7 +1450,7 @@ CONFIG_BRIDGE_VLAN_FILTERING=y +@@ -1358,7 +1475,7 @@ CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y @@ -29151,7 +30296,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_LLC2 is not set # CONFIG_ATALK is not set # CONFIG_X25 is not set -@@ -1496,6 +1588,7 @@ CONFIG_CGROUP_NET_CLASSID=y +@@ -1496,6 +1613,7 @@ CONFIG_CGROUP_NET_CLASSID=y CONFIG_NET_RX_BUSY_POLL=y CONFIG_BQL=y # CONFIG_BPF_STREAM_PARSER is not set @@ -29159,7 +30304,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_NET_FLOW_LIMIT=y # -@@ -1513,7 +1606,54 @@ CONFIG_CAN_BCM=m +@@ -1513,7 +1631,54 @@ CONFIG_CAN_BCM=m CONFIG_CAN_GW=m # CONFIG_CAN_J1939 is not set # CONFIG_CAN_ISOTP is not set @@ -29215,7 +30360,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_AF_RXRPC is not set # CONFIG_AF_KCM is not set CONFIG_STREAM_PARSER=y -@@ -1522,7 +1662,7 @@ CONFIG_FIB_RULES=y +@@ -1522,7 +1687,7 @@ CONFIG_FIB_RULES=y CONFIG_WIRELESS=y CONFIG_WEXT_CORE=y CONFIG_WEXT_PROC=y @@ -29224,7 +30369,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_NL80211_TESTMODE is not set # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set # CONFIG_CFG80211_CERTIFICATION_ONUS is not set -@@ -1532,7 +1672,7 @@ CONFIG_CFG80211_DEFAULT_PS=y +@@ -1532,7 +1697,7 @@ CONFIG_CFG80211_DEFAULT_PS=y # CONFIG_CFG80211_DEBUGFS is not set CONFIG_CFG80211_CRDA_SUPPORT=y CONFIG_CFG80211_WEXT=y @@ -29233,7 +30378,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_MAC80211_HAS_RC=y CONFIG_MAC80211_RC_MINSTREL=y CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -@@ -1543,7 +1683,7 @@ CONFIG_MAC80211_DEBUGFS=y +@@ -1543,7 +1708,7 @@ CONFIG_MAC80211_DEBUGFS=y # CONFIG_MAC80211_MESSAGE_TRACING is not set # CONFIG_MAC80211_DEBUG_MENU is not set CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 @@ -29242,7 +30387,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_RFKILL_LEDS=y CONFIG_RFKILL_INPUT=y CONFIG_RFKILL_GPIO=m -@@ -1573,6 +1713,7 @@ CONFIG_FAILOVER=y +@@ -1573,6 +1738,7 @@ CONFIG_FAILOVER=y CONFIG_ETHTOOL_NETLINK=y CONFIG_NETACC_BPF=y CONFIG_NETACC_TERRACE=y @@ -29250,7 +30395,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Device Drivers -@@ -1595,6 +1736,7 @@ CONFIG_PCIEASPM_DEFAULT=y +@@ -1595,6 +1761,7 @@ CONFIG_PCIEASPM_DEFAULT=y CONFIG_PCIE_PME=y CONFIG_PCIE_DPC=y # CONFIG_PCIE_PTM is not set @@ -29258,7 +30403,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_PCI_MSI=y CONFIG_PCI_QUIRKS=y # CONFIG_PCI_DEBUG is not set -@@ -1606,6 +1748,7 @@ CONFIG_PCI_ECAM=y +@@ -1606,6 +1773,7 @@ CONFIG_PCI_ECAM=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y CONFIG_PCI_PASID=y @@ -29266,7 +30411,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_PCI_DYNAMIC_OF_NODES is not set # CONFIG_PCIE_BUS_TUNE_OFF is not set CONFIG_PCIE_BUS_DEFAULT=y -@@ -1615,6 +1758,7 @@ CONFIG_PCIE_BUS_DEFAULT=y +@@ -1615,6 +1783,7 @@ CONFIG_PCIE_BUS_DEFAULT=y CONFIG_VGA_ARB=y CONFIG_VGA_ARB_MAX_GPUS=64 CONFIG_HOTPLUG_PCI=y @@ -29274,7 +30419,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_HOTPLUG_PCI_CPCI is not set CONFIG_HOTPLUG_PCI_SHPC=y -@@ -1625,6 +1769,8 @@ CONFIG_HOTPLUG_PCI_SHPC=y +@@ -1625,6 +1794,8 @@ CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_PCI_HOST_COMMON=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCIE_MICROCHIP_HOST=y @@ -29283,7 +30428,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_PCIE_XILINX=y # -@@ -1636,6 +1782,7 @@ CONFIG_PCIE_CADENCE_EP=y +@@ -1636,6 +1807,7 @@ CONFIG_PCIE_CADENCE_EP=y CONFIG_PCIE_CADENCE_PLAT=y CONFIG_PCIE_CADENCE_PLAT_HOST=y CONFIG_PCIE_CADENCE_PLAT_EP=y @@ -29291,7 +30436,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_PCI_J721E=y CONFIG_PCI_J721E_HOST=y # CONFIG_PCI_J721E_EP is not set -@@ -1647,6 +1794,7 @@ CONFIG_PCI_J721E_HOST=y +@@ -1647,11 +1819,13 @@ CONFIG_PCI_J721E_HOST=y CONFIG_PCIE_DW=y CONFIG_PCIE_DW_HOST=y CONFIG_PCIE_DW_EP=y @@ -29299,7 +30444,13 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_PCI_MESON is not set CONFIG_PCIE_DW_PLAT=y CONFIG_PCIE_DW_PLAT_HOST=y -@@ -1697,7 +1845,9 @@ CONFIG_FW_LOADER=y + CONFIG_PCIE_DW_PLAT_EP=y + CONFIG_PCIE_FU740=y ++CONFIG_PCIE_ULTRARISC=y + # end of DesignWare-based PCIe controllers + + # +@@ -1697,7 +1871,9 @@ CONFIG_FW_LOADER=y CONFIG_FW_LOADER_DEBUG=y CONFIG_EXTRA_FIRMWARE="" # CONFIG_FW_LOADER_USER_HELPER is not set @@ -29310,7 +30461,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_FW_CACHE=y # CONFIG_FW_UPLOAD is not set # end of Firmware loader -@@ -1709,10 +1859,12 @@ CONFIG_WANT_DEV_COREDUMP=y +@@ -1709,10 +1885,12 @@ CONFIG_WANT_DEV_COREDUMP=y # CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set # CONFIG_TEST_ASYNC_DRIVER_PROBE is not set CONFIG_GENERIC_CPU_DEVICES=y @@ -29325,7 +30476,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DMA_SHARED_BUFFER=y # CONFIG_DMA_FENCE_TRACE is not set CONFIG_GENERIC_ARCH_TOPOLOGY=y -@@ -1724,6 +1876,8 @@ CONFIG_GENERIC_ARCH_NUMA=y +@@ -1724,6 +1902,8 @@ CONFIG_GENERIC_ARCH_NUMA=y # Bus devices # # CONFIG_MOXTET is not set @@ -29334,7 +30485,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MHI_BUS is not set # CONFIG_MHI_BUS_EP is not set # end of Bus devices -@@ -1747,6 +1901,10 @@ CONFIG_PROC_EVENTS=y +@@ -1747,6 +1927,10 @@ CONFIG_PROC_EVENTS=y # end of ARM System Control and Management Interface Protocol # CONFIG_FIRMWARE_MEMMAP is not set @@ -29345,7 +30496,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SYSFB=y CONFIG_SYSFB_SIMPLEFB=y # CONFIG_GOOGLE_FIRMWARE is not set -@@ -1767,6 +1925,7 @@ CONFIG_EFI_GENERIC_STUB=y +@@ -1767,19 +1951,30 @@ CONFIG_EFI_GENERIC_STUB=y # CONFIG_RESET_ATTACK_MITIGATION is not set # CONFIG_EFI_DISABLE_PCI_DMA is not set CONFIG_EFI_EARLYCON=y @@ -29353,7 +30504,14 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_EFI_DISABLE_RUNTIME is not set # CONFIG_EFI_COCO_SECRET is not set # end of EFI (Extensible Firmware Interface) Support -@@ -1775,11 +1934,15 @@ CONFIG_EFI_EARLYCON=y + ++# ++# Risc-V Specific firmware drivers ++# ++CONFIG_RISCV_SSE=y ++# end of Risc-V Specific firmware drivers ++ + # # Tegra firmware driver # # end of Tegra firmware driver @@ -29371,7 +30529,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Partition parsers -@@ -1793,9 +1956,8 @@ CONFIG_MTD_OF_PARTS=m +@@ -1793,9 +1988,8 @@ CONFIG_MTD_OF_PARTS=m # # User Modules And Translation Layers # @@ -29383,7 +30541,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. -@@ -1862,8 +2024,8 @@ CONFIG_MTD_PHYSMAP_OF=y +@@ -1862,8 +2056,8 @@ CONFIG_MTD_PHYSMAP_OF=y # CONFIG_MTD_MCHP23K256 is not set # CONFIG_MTD_MCHP48L640 is not set # CONFIG_MTD_SST25L is not set @@ -29394,7 +30552,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MTD_MTDRAM is not set CONFIG_MTD_BLOCK2MTD=m -@@ -1876,13 +2038,15 @@ CONFIG_MTD_BLOCK2MTD=m +@@ -1876,13 +2070,15 @@ CONFIG_MTD_BLOCK2MTD=m # # NAND # @@ -29411,7 +30569,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MTD_NAND_ECC_SW_HAMMING is not set # CONFIG_MTD_NAND_ECC_SW_BCH is not set # CONFIG_MTD_NAND_ECC_MXIC is not set -@@ -1895,12 +2059,13 @@ CONFIG_MTD_BLOCK2MTD=m +@@ -1895,12 +2091,13 @@ CONFIG_MTD_BLOCK2MTD=m # CONFIG_MTD_LPDDR is not set # end of LPDDR & LPDDR2 PCM memory drivers @@ -29427,7 +30585,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_MTD_UBI_WL_THRESHOLD=4096 CONFIG_MTD_UBI_BEB_LIMIT=20 # CONFIG_MTD_UBI_FASTMAP is not set -@@ -1921,6 +2086,13 @@ CONFIG_OF_RESOLVE=y +@@ -1921,6 +2118,13 @@ CONFIG_OF_RESOLVE=y CONFIG_OF_OVERLAY=y CONFIG_OF_NUMA=y # CONFIG_PARPORT is not set @@ -29441,7 +30599,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_BLK_DEV=y CONFIG_BLK_DEV_NULL_BLK=m CONFIG_CDROM=y -@@ -1939,7 +2111,7 @@ CONFIG_BLK_DEV_LOOP=y +@@ -1939,7 +2143,7 @@ CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m # CONFIG_DRBD_FAULT_INJECTION is not set @@ -29450,7 +30608,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=16384 -@@ -2008,7 +2180,7 @@ CONFIG_MISC_RTSX=m +@@ -2008,7 +2212,7 @@ CONFIG_MISC_RTSX=m # # EEPROM support # @@ -29459,7 +30617,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_EEPROM_AT25 is not set CONFIG_EEPROM_LEGACY=m CONFIG_EEPROM_MAX6875=m -@@ -2028,7 +2200,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y +@@ -2028,7 +2232,6 @@ CONFIG_CB710_DEBUG_ASSUMPTIONS=y # CONFIG_TI_ST is not set # end of Texas Instruments shared transport line discipline @@ -29467,7 +30625,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SENSORS_LIS3_I2C=m CONFIG_ALTERA_STAPL=m # CONFIG_GENWQE is not set -@@ -2109,7 +2280,9 @@ CONFIG_SCSI_MPT3SAS=m +@@ -2109,7 +2312,9 @@ CONFIG_SCSI_MPT3SAS=m CONFIG_SCSI_MPT2SAS_MAX_SGE=128 CONFIG_SCSI_MPT3SAS_MAX_SGE=128 CONFIG_SCSI_MPT2SAS=m @@ -29477,7 +30635,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SCSI_SMARTPQI=m # CONFIG_SCSI_HPTIOP is not set # CONFIG_SCSI_BUSLOGIC is not set -@@ -2156,8 +2329,11 @@ CONFIG_SCSI_DH_ALUA=y +@@ -2156,8 +2361,11 @@ CONFIG_SCSI_DH_ALUA=y CONFIG_ATA=y CONFIG_SATA_HOST=y @@ -29489,7 +30647,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SATA_PMP=y # -@@ -2168,6 +2344,7 @@ CONFIG_SATA_MOBILE_LPM_POLICY=0 +@@ -2168,6 +2376,7 @@ CONFIG_SATA_MOBILE_LPM_POLICY=0 CONFIG_SATA_AHCI_PLATFORM=y # CONFIG_AHCI_DWC is not set # CONFIG_AHCI_CEVA is not set @@ -29497,7 +30655,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SATA_INIC162X is not set # CONFIG_SATA_ACARD_AHCI is not set # CONFIG_SATA_SIL24 is not set -@@ -2189,6 +2366,7 @@ CONFIG_ATA_PIIX=m +@@ -2189,6 +2398,7 @@ CONFIG_ATA_PIIX=m # CONFIG_SATA_MV is not set # CONFIG_SATA_NV is not set # CONFIG_SATA_PROMISE is not set @@ -29505,7 +30663,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SATA_SIL is not set # CONFIG_SATA_SIS is not set # CONFIG_SATA_SVW is not set -@@ -2247,6 +2425,7 @@ CONFIG_ATA_PIIX=m +@@ -2247,6 +2457,7 @@ CONFIG_ATA_PIIX=m # # Generic fallback / legacy drivers # @@ -29513,7 +30671,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ATA_GENERIC=m # CONFIG_PATA_LEGACY is not set CONFIG_MD=y -@@ -2265,14 +2444,14 @@ CONFIG_BCACHE=m +@@ -2265,14 +2476,14 @@ CONFIG_BCACHE=m # CONFIG_BCACHE_CLOSURES_DEBUG is not set # CONFIG_BCACHE_ASYNC_REGISTRATION is not set CONFIG_BLK_DEV_DM_BUILTIN=y @@ -29530,7 +30688,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m -@@ -2292,6 +2471,7 @@ CONFIG_DM_MULTIPATH_ST=m +@@ -2292,6 +2503,7 @@ CONFIG_DM_MULTIPATH_ST=m # CONFIG_DM_MULTIPATH_IOA is not set CONFIG_DM_DELAY=m # CONFIG_DM_DUST is not set @@ -29538,7 +30696,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DM_UEVENT=y CONFIG_DM_FLAKEY=m CONFIG_DM_VERITY=m -@@ -2322,7 +2502,7 @@ CONFIG_ISCSI_TARGET_CXGB4=m +@@ -2322,7 +2534,7 @@ CONFIG_ISCSI_TARGET_CXGB4=m # end of IEEE 1394 (FireWire) support CONFIG_NETDEVICES=y @@ -29547,7 +30705,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_NET_CORE=y CONFIG_BONDING=m CONFIG_DUMMY=m -@@ -2366,10 +2546,13 @@ CONFIG_VSOCKMON=m +@@ -2366,10 +2578,13 @@ CONFIG_VSOCKMON=m CONFIG_ETHERNET=y CONFIG_MDIO=m # CONFIG_NET_VENDOR_3COM is not set @@ -29561,7 +30719,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_ALTERA_TSE is not set CONFIG_NET_VENDOR_AMAZON=y -@@ -2406,14 +2589,13 @@ CONFIG_BNXT_DCB=y +@@ -2406,14 +2621,13 @@ CONFIG_BNXT_DCB=y # CONFIG_BNXT_HWMON is not set CONFIG_NET_VENDOR_CADENCE=y CONFIG_MACB=y @@ -29577,7 +30735,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_LIQUIDIO_CORE=m CONFIG_LIQUIDIO=m CONFIG_LIQUIDIO_VF=m -@@ -2441,7 +2623,10 @@ CONFIG_NET_VENDOR_ENGLEDER=y +@@ -2441,7 +2655,10 @@ CONFIG_NET_VENDOR_ENGLEDER=y CONFIG_NET_VENDOR_FUNGIBLE=y # CONFIG_FUN_ETH is not set CONFIG_NET_VENDOR_GOOGLE=y @@ -29588,7 +30746,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_NET_VENDOR_I825XX is not set CONFIG_NET_VENDOR_INTEL=y # CONFIG_E100 is not set -@@ -2464,8 +2649,13 @@ CONFIG_ICE=m +@@ -2464,8 +2681,13 @@ CONFIG_ICE=m CONFIG_ICE_SWITCHDEV=y CONFIG_FM10K=m # CONFIG_IGC is not set @@ -29602,7 +30760,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_JME is not set CONFIG_NET_VENDOR_ADI=y # CONFIG_ADIN1110 is not set -@@ -2506,6 +2696,8 @@ CONFIG_MLXFW=m +@@ -2506,6 +2728,8 @@ CONFIG_MLXFW=m CONFIG_NET_VENDOR_MICROSEMI=y # CONFIG_MSCC_OCELOT_SWITCH is not set CONFIG_NET_VENDOR_MICROSOFT=y @@ -29611,7 +30769,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_NET_VENDOR_MYRI=y # CONFIG_MYRI10GE is not set # CONFIG_FEALNX is not set -@@ -2539,6 +2731,7 @@ CONFIG_QED_OOO=y +@@ -2539,6 +2763,7 @@ CONFIG_QED_OOO=y # CONFIG_NET_VENDOR_BROCADE is not set CONFIG_NET_VENDOR_QUALCOMM=y # CONFIG_QCA7000_SPI is not set @@ -29619,7 +30777,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_QCOM_EMAC=m # CONFIG_RMNET is not set # CONFIG_NET_VENDOR_RDC is not set -@@ -2564,13 +2757,25 @@ CONFIG_SFC_MCDI_MON=y +@@ -2564,13 +2789,25 @@ CONFIG_SFC_MCDI_MON=y CONFIG_SFC_SRIOV=y CONFIG_SFC_MCDI_LOGGING=y # CONFIG_SFC_FALCON is not set @@ -29642,12 +30800,17 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +CONFIG_DWMAC_XUANTIE=y +CONFIG_DWMAC_SOPHGO=y +# CONFIG_DWMAC_INTEL_PLAT is not set -+# CONFIG_DWMAC_ULTRARISC is not set ++CONFIG_DWMAC_ULTRARISC=m +# CONFIG_STMMAC_PCI is not set # CONFIG_NET_VENDOR_SUN is not set # CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set -@@ -2584,8 +2789,14 @@ CONFIG_NGBE=m +@@ -2579,13 +2816,18 @@ CONFIG_NET_VENDOR_VERTEXCOM=y + # CONFIG_MSE102X is not set + # CONFIG_NET_VENDOR_VIA is not set + CONFIG_NET_VENDOR_WANGXUN=y +-CONFIG_LIBWX=m + CONFIG_NGBE=m CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set @@ -29662,7 +30825,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_PHYLINK=y CONFIG_PHYLIB=y CONFIG_SWPHY=y -@@ -2661,6 +2872,7 @@ CONFIG_CAN_CALC_BITTIMING=y +@@ -2661,6 +2903,7 @@ CONFIG_CAN_CALC_BITTIMING=y # CONFIG_CAN_GRCAN is not set # CONFIG_CAN_KVASER_PCIEFD is not set CONFIG_CAN_SLCAN=m @@ -29670,7 +30833,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CAN_C_CAN=m CONFIG_CAN_C_CAN_PLATFORM=m CONFIG_CAN_C_CAN_PCI=m -@@ -2672,6 +2884,8 @@ CONFIG_CAN_CC770_PLATFORM=m +@@ -2672,6 +2915,8 @@ CONFIG_CAN_CC770_PLATFORM=m # CONFIG_CAN_IFI_CANFD is not set # CONFIG_CAN_M_CAN is not set # CONFIG_CAN_PEAK_PCIEFD is not set @@ -29679,7 +30842,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CAN_SJA1000=m CONFIG_CAN_EMS_PCI=m # CONFIG_CAN_F81601 is not set -@@ -2711,7 +2925,9 @@ CONFIG_MDIO_DEVICE=y +@@ -2711,7 +2956,9 @@ CONFIG_MDIO_DEVICE=y CONFIG_MDIO_BUS=y CONFIG_FWNODE_MDIO=y CONFIG_OF_MDIO=y @@ -29689,7 +30852,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_MDIO_BITBANG=m CONFIG_MDIO_BCM_UNIMAC=m CONFIG_MDIO_CAVIUM=m -@@ -2728,6 +2944,7 @@ CONFIG_MDIO_THUNDER=m +@@ -2728,6 +2975,7 @@ CONFIG_MDIO_THUNDER=m # # MDIO Multiplexers # @@ -29697,7 +30860,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MDIO_BUS_MUX_GPIO is not set # CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set # CONFIG_MDIO_BUS_MUX_MMIOREG is not set -@@ -2735,7 +2952,7 @@ CONFIG_MDIO_THUNDER=m +@@ -2735,7 +2983,7 @@ CONFIG_MDIO_THUNDER=m # # PCS device drivers # @@ -29706,7 +30869,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # end of PCS device drivers CONFIG_PPP=m -@@ -2768,8 +2985,8 @@ CONFIG_USB_RTL8150=m +@@ -2768,8 +3016,8 @@ CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m CONFIG_USB_LAN78XX=m CONFIG_USB_USBNET=m @@ -29717,7 +30880,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_USB_NET_CDCETHER=m CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_CDC_NCM=m -@@ -2781,7 +2998,7 @@ CONFIG_USB_NET_SR9700=m +@@ -2781,7 +3029,7 @@ CONFIG_USB_NET_SR9700=m CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_NET_GL620A=m @@ -29726,7 +30889,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_USB_NET_PLUSB=m CONFIG_USB_NET_MCS7830=m CONFIG_USB_NET_RNDIS_HOST=m -@@ -2865,7 +3082,39 @@ CONFIG_RT2X00_LIB_CRYPTO=y +@@ -2865,7 +3113,39 @@ CONFIG_RT2X00_LIB_CRYPTO=y CONFIG_RT2X00_LIB_LEDS=y # CONFIG_RT2X00_LIB_DEBUGFS is not set # CONFIG_RT2X00_DEBUG is not set @@ -29767,7 +30930,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_WLAN_VENDOR_RSI is not set CONFIG_WLAN_VENDOR_SILABS=y # CONFIG_WFX is not set -@@ -2876,6 +3125,10 @@ CONFIG_WLAN_VENDOR_SILABS=y +@@ -2876,6 +3156,10 @@ CONFIG_WLAN_VENDOR_SILABS=y # CONFIG_USB_NET_RNDIS_WLAN is not set # CONFIG_MAC80211_HWSIM is not set # CONFIG_VIRT_WIFI is not set @@ -29778,7 +30941,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_WAN=y CONFIG_HDLC=m CONFIG_HDLC_RAW=m -@@ -2900,6 +3153,7 @@ CONFIG_HDLC_PPP=m +@@ -2900,6 +3184,7 @@ CONFIG_HDLC_PPP=m # end of Wireless WAN # CONFIG_VMXNET3 is not set @@ -29786,7 +30949,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_USB4_NET=m # CONFIG_NETDEVSIM is not set CONFIG_NET_FAILOVER=y -@@ -2930,6 +3184,7 @@ CONFIG_INPUT_EVDEV=y +@@ -2930,6 +3215,7 @@ CONFIG_INPUT_EVDEV=y # Input Device Drivers # CONFIG_INPUT_KEYBOARD=y @@ -29794,7 +30957,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_KEYBOARD_ADP5588 is not set # CONFIG_KEYBOARD_ADP5589 is not set CONFIG_KEYBOARD_ATKBD=y -@@ -2955,6 +3210,7 @@ CONFIG_KEYBOARD_GPIO=y +@@ -2955,6 +3241,7 @@ CONFIG_KEYBOARD_GPIO=y # CONFIG_KEYBOARD_GOLDFISH_EVENTS is not set # CONFIG_KEYBOARD_STOWAWAY is not set # CONFIG_KEYBOARD_SUNKBD is not set @@ -29802,7 +30965,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_KEYBOARD_OMAP4 is not set # CONFIG_KEYBOARD_TM2_TOUCHKEY is not set # CONFIG_KEYBOARD_XTKBD is not set -@@ -2987,7 +3243,83 @@ CONFIG_MOUSE_SYNAPTICS_I2C=m +@@ -2987,7 +3274,83 @@ CONFIG_MOUSE_SYNAPTICS_I2C=m CONFIG_MOUSE_SYNAPTICS_USB=m # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set @@ -29887,7 +31050,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_INPUT_MISC=y # CONFIG_INPUT_AD714X is not set # CONFIG_INPUT_ATMEL_CAPTOUCH is not set -@@ -3004,7 +3336,7 @@ CONFIG_INPUT_MISC=y +@@ -3004,7 +3367,7 @@ CONFIG_INPUT_MISC=y # CONFIG_INPUT_YEALINK is not set # CONFIG_INPUT_CM109 is not set # CONFIG_INPUT_REGULATOR_HAPTIC is not set @@ -29896,7 +31059,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_INPUT_PCF8574 is not set # CONFIG_INPUT_PWM_BEEPER is not set # CONFIG_INPUT_PWM_VIBRA is not set -@@ -3017,9 +3349,11 @@ CONFIG_INPUT_UINPUT=m +@@ -3017,9 +3380,11 @@ CONFIG_INPUT_UINPUT=m # CONFIG_INPUT_IQS626A is not set # CONFIG_INPUT_IQS7222 is not set # CONFIG_INPUT_CMA3000 is not set @@ -29908,7 +31071,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_RMI4_CORE=m CONFIG_RMI4_I2C=m CONFIG_RMI4_SPI=m -@@ -3048,6 +3382,7 @@ CONFIG_SERIO_ALTERA_PS2=m +@@ -3048,6 +3413,7 @@ CONFIG_SERIO_ALTERA_PS2=m # CONFIG_SERIO_PS2MULT is not set CONFIG_SERIO_ARC_PS2=m # CONFIG_SERIO_APBPS2 is not set @@ -29916,7 +31079,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SERIO_GPIO_PS2 is not set # CONFIG_USERIO is not set # CONFIG_GAMEPORT is not set -@@ -3075,6 +3410,7 @@ CONFIG_LDISC_AUTOLOAD=y +@@ -3075,6 +3441,7 @@ CONFIG_LDISC_AUTOLOAD=y CONFIG_SERIAL_EARLYCON=y CONFIG_SERIAL_8250=y # CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set @@ -29924,7 +31087,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SERIAL_8250_16550A_VARIANTS=y # CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y -@@ -3082,8 +3418,8 @@ CONFIG_SERIAL_8250_DMA=y +@@ -3082,8 +3449,8 @@ CONFIG_SERIAL_8250_DMA=y CONFIG_SERIAL_8250_PCILIB=y CONFIG_SERIAL_8250_PCI=y CONFIG_SERIAL_8250_EXAR=y @@ -29935,7 +31098,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y # CONFIG_SERIAL_8250_PCI1XXXX is not set -@@ -3092,6 +3428,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y +@@ -3092,6 +3459,7 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_8250_DWLIB=y CONFIG_SERIAL_8250_DW=y @@ -29943,7 +31106,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SERIAL_8250_RT288X=y CONFIG_SERIAL_8250_PERICOM=y CONFIG_SERIAL_OF_PLATFORM=y -@@ -3102,10 +3439,18 @@ CONFIG_SERIAL_OF_PLATFORM=y +@@ -3102,10 +3470,16 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_SERIAL_AMBA_PL010 is not set # CONFIG_SERIAL_AMBA_PL011 is not set # CONFIG_SERIAL_EARLYCON_SEMIHOST is not set @@ -29951,8 +31114,6 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SERIAL_KGDB_NMI is not set # CONFIG_SERIAL_MAX3100 is not set # CONFIG_SERIAL_MAX310X is not set -+CONFIG_SERIAL_SPACEMIT_K1X=y -+CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE=y # CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=18 @@ -29962,7 +31123,16 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y CONFIG_CONSOLE_POLL=y -@@ -3135,11 +3480,14 @@ CONFIG_N_GSM=m +@@ -3123,6 +3497,8 @@ CONFIG_SERIAL_SIFIVE_CONSOLE=y + # CONFIG_SERIAL_FSL_LINFLEXUART is not set + # CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set + # CONFIG_SERIAL_SPRD is not set ++CONFIG_SERIAL_LRW_UART=y ++CONFIG_SERIAL_LRW_UART_CONSOLE=y + # end of Serial drivers + + CONFIG_SERIAL_MCTRL_GPIO=y +@@ -3135,11 +3511,14 @@ CONFIG_N_GSM=m # CONFIG_NOZOMI is not set # CONFIG_NULL_TTY is not set CONFIG_HVC_DRIVER=y @@ -29978,7 +31148,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_IPMI_PLAT_DATA=y # CONFIG_IPMI_PANIC_EVENT is not set CONFIG_IPMI_DEVICE_INTERFACE=m -@@ -3154,6 +3502,7 @@ CONFIG_HW_RANDOM=y +@@ -3154,6 +3533,7 @@ CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_TIMERIOMEM=m # CONFIG_HW_RANDOM_BA431 is not set CONFIG_HW_RANDOM_VIRTIO=y @@ -29986,7 +31156,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_HW_RANDOM_CCTRNG is not set # CONFIG_HW_RANDOM_XIPHERA is not set # CONFIG_HW_RANDOM_JH7110 is not set -@@ -3172,7 +3521,10 @@ CONFIG_TCG_TIS_I2C_ATMEL=m +@@ -3172,7 +3552,10 @@ CONFIG_TCG_TIS_I2C_ATMEL=m CONFIG_TCG_TIS_I2C_INFINEON=m CONFIG_TCG_TIS_I2C_NUVOTON=m CONFIG_TCG_ATMEL=m @@ -29997,7 +31167,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_TCG_TIS_ST33ZP24=m CONFIG_TCG_TIS_ST33ZP24_I2C=m CONFIG_TCG_TIS_ST33ZP24_SPI=m -@@ -3184,6 +3536,7 @@ CONFIG_TCG_TIS_ST33ZP24_SPI=m +@@ -3184,6 +3567,7 @@ CONFIG_TCG_TIS_ST33ZP24_SPI=m # I2C support # CONFIG_I2C=y @@ -30005,7 +31175,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_I2C_BOARDINFO=y CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y -@@ -3221,6 +3574,7 @@ CONFIG_I2C_CCGX_UCSI=m +@@ -3221,6 +3605,7 @@ CONFIG_I2C_CCGX_UCSI=m # CONFIG_I2C_ALI15X3 is not set # CONFIG_I2C_AMD756 is not set # CONFIG_I2C_AMD8111 is not set @@ -30013,7 +31183,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_I2C_I801 is not set # CONFIG_I2C_ISCH is not set # CONFIG_I2C_PIIX4 is not set -@@ -3229,9 +3583,15 @@ CONFIG_I2C_NFORCE2=m +@@ -3229,9 +3614,15 @@ CONFIG_I2C_NFORCE2=m # CONFIG_I2C_SIS5595 is not set # CONFIG_I2C_SIS630 is not set # CONFIG_I2C_SIS96X is not set @@ -30029,7 +31199,12 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # I2C system bus drivers (mostly embedded / system-on-chip) # -@@ -3243,12 +3603,17 @@ CONFIG_I2C_DESIGNWARE_PCI=m +@@ -3240,15 +3631,22 @@ CONFIG_I2C_DESIGNWARE_CORE=y + CONFIG_I2C_DESIGNWARE_SLAVE=y + CONFIG_I2C_DESIGNWARE_PLATFORM=y + CONFIG_I2C_DESIGNWARE_PCI=m ++CONFIG_I2C_LRW_CORE=m ++CONFIG_I2C_LRW_PLATFORM=m # CONFIG_I2C_EMEV2 is not set CONFIG_I2C_GPIO=m # CONFIG_I2C_GPIO_FAULT_INJECTOR is not set @@ -30047,7 +31222,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # External I2C/SMBus adapter drivers -@@ -3290,6 +3655,7 @@ CONFIG_SPI_MEM=y +@@ -3290,6 +3688,7 @@ CONFIG_SPI_MEM=y CONFIG_SPI_CADENCE=m # CONFIG_SPI_CADENCE_QUADSPI is not set # CONFIG_SPI_CADENCE_XSPI is not set @@ -30055,7 +31230,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SPI_DESIGNWARE=y # CONFIG_SPI_DW_DMA is not set CONFIG_SPI_DW_PCI=m -@@ -3302,9 +3668,17 @@ CONFIG_SPI_DW_MMIO=y +@@ -3302,9 +3701,17 @@ CONFIG_SPI_DW_MMIO=y # CONFIG_SPI_PCI1XXXX is not set # CONFIG_SPI_PL022 is not set # CONFIG_SPI_PXA2XX is not set @@ -30073,7 +31248,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SPI_MXIC is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set -@@ -3319,7 +3693,7 @@ CONFIG_SPI_SIFIVE=y +@@ -3319,7 +3726,7 @@ CONFIG_SPI_SIFIVE=y # # SPI Protocol Masters # @@ -30082,7 +31257,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SPI_LOOPBACK_TEST is not set # CONFIG_SPI_TLE62X0 is not set # CONFIG_SPI_SLAVE is not set -@@ -3343,14 +3717,8 @@ CONFIG_PPS_CLIENT_GPIO=m +@@ -3343,14 +3750,8 @@ CONFIG_PPS_CLIENT_GPIO=m # # PTP clock support # @@ -30098,7 +31273,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # end of PTP clock support CONFIG_PINCTRL=y -@@ -3360,26 +3728,58 @@ CONFIG_GENERIC_PINMUX_FUNCTIONS=y +@@ -3360,26 +3761,59 @@ CONFIG_GENERIC_PINMUX_FUNCTIONS=y CONFIG_PINCONF=y CONFIG_GENERIC_PINCONF=y # CONFIG_DEBUG_PINCTRL is not set @@ -30112,7 +31287,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_PINCTRL_STMFX is not set # CONFIG_PINCTRL_SX150X is not set +CONFIG_PINCTRL_TH1520=y -+CONFIG_PINCTRL_SPACEMIT_K1X=y ++CONFIG_PINCTRL_SPACEMIT_K1=y # # Renesas pinctrl drivers @@ -30149,7 +31324,8 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +# CONFIG_PINCTRL_SUN50I_H6_R is not set +# CONFIG_PINCTRL_SUN50I_H616 is not set +# CONFIG_PINCTRL_SUN50I_H616_R is not set -+# CONFIG_PINCTRL_ULTRARISC_DP1000 is not set ++CONFIG_PINCTRL_ULTRARISC=y ++CONFIG_PINCTRL_ULTRARISC_DP1000=y CONFIG_GPIOLIB=y CONFIG_GPIOLIB_FASTPATH_LIMIT=512 CONFIG_OF_GPIO=y @@ -30157,7 +31333,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_GPIOLIB_IRQCHIP=y # CONFIG_DEBUG_GPIO is not set CONFIG_GPIO_SYSFS=y -@@ -3392,6 +3792,7 @@ CONFIG_GPIO_GENERIC=y +@@ -3392,6 +3826,7 @@ CONFIG_GPIO_GENERIC=y # # CONFIG_GPIO_74XX_MMIO is not set # CONFIG_GPIO_ALTERA is not set @@ -30165,7 +31341,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_GPIO_CADENCE=m CONFIG_GPIO_DWAPB=y # CONFIG_GPIO_EXAR is not set -@@ -3402,6 +3803,7 @@ CONFIG_GPIO_GENERIC_PLATFORM=m +@@ -3402,6 +3837,7 @@ CONFIG_GPIO_GENERIC_PLATFORM=m # CONFIG_GPIO_LOGICVC is not set # CONFIG_GPIO_MB86S7X is not set # CONFIG_GPIO_PL061 is not set @@ -30173,7 +31349,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_GPIO_SIFIVE=y # CONFIG_GPIO_SYSCON is not set # CONFIG_GPIO_XILINX is not set -@@ -3417,7 +3819,8 @@ CONFIG_GPIO_SIFIVE=y +@@ -3417,7 +3853,8 @@ CONFIG_GPIO_SIFIVE=y # CONFIG_GPIO_GW_PLD is not set # CONFIG_GPIO_MAX7300 is not set # CONFIG_GPIO_MAX732X is not set @@ -30183,15 +31359,15 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_GPIO_PCA9570 is not set # CONFIG_GPIO_PCF857X is not set # CONFIG_GPIO_TPIC2810 is not set -@@ -3461,6 +3864,7 @@ CONFIG_GPIO_SIFIVE=y +@@ -3461,6 +3898,7 @@ CONFIG_GPIO_SIFIVE=y # CONFIG_GPIO_MOCKUP is not set # CONFIG_GPIO_VIRTIO is not set # CONFIG_GPIO_SIM is not set -+CONFIG_GPIO_K1X=y ++CONFIG_GPIO_K1=y # end of Virtual GPIO drivers # CONFIG_W1 is not set -@@ -3477,6 +3881,7 @@ CONFIG_POWER_RESET_SYSCON_POWEROFF=y +@@ -3477,6 +3915,7 @@ CONFIG_POWER_RESET_SYSCON_POWEROFF=y CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_POWER_SUPPLY_HWMON=y @@ -30199,7 +31375,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_IP5XXX_POWER is not set # CONFIG_TEST_POWER is not set # CONFIG_CHARGER_ADP5061 is not set -@@ -3565,6 +3970,7 @@ CONFIG_SENSORS_G762=m +@@ -3565,6 +4004,7 @@ CONFIG_SENSORS_G762=m # CONFIG_SENSORS_HS3001 is not set CONFIG_SENSORS_IBMAEM=m CONFIG_SENSORS_IBMPEX=m @@ -30207,7 +31383,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SENSORS_IT87=m CONFIG_SENSORS_JC42=m CONFIG_SENSORS_POWR1220=m -@@ -3600,7 +4006,7 @@ CONFIG_SENSORS_MAX31790=m +@@ -3600,7 +4040,7 @@ CONFIG_SENSORS_MAX31790=m CONFIG_SENSORS_MCP3021=m # CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_TPS23861 is not set @@ -30216,7 +31392,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SENSORS_ADCXX=m CONFIG_SENSORS_LM63=m CONFIG_SENSORS_LM70=m -@@ -3620,6 +4026,7 @@ CONFIG_SENSORS_LM95241=m +@@ -3620,6 +4060,7 @@ CONFIG_SENSORS_LM95241=m CONFIG_SENSORS_LM95245=m CONFIG_SENSORS_PC87360=m CONFIG_SENSORS_PC87427=m @@ -30224,7 +31400,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SENSORS_NCT6683=m # CONFIG_SENSORS_NCT6775 is not set # CONFIG_SENSORS_NCT6775_I2C is not set -@@ -3680,7 +4087,7 @@ CONFIG_SENSORS_UCD9200=m +@@ -3680,7 +4121,7 @@ CONFIG_SENSORS_UCD9200=m # CONFIG_SENSORS_XDPE152 is not set # CONFIG_SENSORS_XDPE122 is not set CONFIG_SENSORS_ZL6100=m @@ -30233,10 +31409,11 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SENSORS_SBTSI is not set # CONFIG_SENSORS_SBRMI is not set CONFIG_SENSORS_SHT15=m -@@ -3733,9 +4140,14 @@ CONFIG_SENSORS_W83L785TS=m +@@ -3733,9 +4174,15 @@ CONFIG_SENSORS_W83L785TS=m CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m CONFIG_SENSORS_W83627EHF=m ++# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers @@ -30249,7 +31426,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 CONFIG_THERMAL_HWMON=y CONFIG_THERMAL_OF=y -@@ -3743,41 +4155,62 @@ CONFIG_THERMAL_OF=y +@@ -3743,41 +4190,62 @@ CONFIG_THERMAL_OF=y CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y # CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set # CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set @@ -30315,7 +31492,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # PCI-based Watchdog Cards -@@ -3804,8 +4237,9 @@ CONFIG_BCMA_DRIVER_GPIO=y +@@ -3804,8 +4272,9 @@ CONFIG_BCMA_DRIVER_GPIO=y # # Multifunction device drivers # @@ -30326,7 +31503,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MFD_AS3711 is not set # CONFIG_MFD_SMPRO is not set # CONFIG_MFD_AS3722 is not set -@@ -3877,8 +4311,8 @@ CONFIG_MFD_CORE=m +@@ -3877,8 +4346,8 @@ CONFIG_MFD_CORE=m # CONFIG_MFD_SM501 is not set # CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_STMPE is not set @@ -30336,7 +31513,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MFD_LP3943 is not set # CONFIG_MFD_LP8788 is not set # CONFIG_MFD_TI_LMU is not set -@@ -3921,6 +4355,8 @@ CONFIG_MFD_SYSCON=y +@@ -3921,6 +4390,8 @@ CONFIG_MFD_SYSCON=y # CONFIG_MFD_STMFX is not set # CONFIG_MFD_ATC260X_I2C is not set # CONFIG_MFD_QCOM_PM8008 is not set @@ -30345,7 +31522,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MFD_INTEL_M10_BMC_SPI is not set # CONFIG_MFD_RSMU_I2C is not set # CONFIG_MFD_RSMU_SPI is not set -@@ -3987,6 +4423,7 @@ CONFIG_REGULATOR_PWM=y +@@ -3987,6 +4458,7 @@ CONFIG_REGULATOR_PWM=y # CONFIG_REGULATOR_RTQ6752 is not set # CONFIG_REGULATOR_RTQ2208 is not set # CONFIG_REGULATOR_SLG51000 is not set @@ -30353,7 +31530,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_REGULATOR_SY8106A is not set # CONFIG_REGULATOR_SY8824X is not set # CONFIG_REGULATOR_SY8827N is not set -@@ -3999,6 +4436,7 @@ CONFIG_REGULATOR_PWM=y +@@ -3999,6 +4471,7 @@ CONFIG_REGULATOR_PWM=y # CONFIG_REGULATOR_TPS65132 is not set # CONFIG_REGULATOR_TPS6524X is not set # CONFIG_REGULATOR_VCTRL is not set @@ -30361,7 +31538,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_RC_CORE is not set # -@@ -4007,7 +4445,7 @@ CONFIG_REGULATOR_PWM=y +@@ -4007,7 +4480,7 @@ CONFIG_REGULATOR_PWM=y # CONFIG_MEDIA_CEC_SUPPORT is not set # end of CEC support @@ -30370,7 +31547,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_MEDIA_SUPPORT_FILTER is not set # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set -@@ -4131,10 +4569,12 @@ CONFIG_RADIO_ADAPTERS=m +@@ -4131,10 +4604,12 @@ CONFIG_RADIO_ADAPTERS=m # CONFIG_USB_RAREMONO is not set # CONFIG_RADIO_SI470X is not set CONFIG_MEDIA_PLATFORM_DRIVERS=y @@ -30385,7 +31562,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Allegro DVT media platform drivers -@@ -4173,6 +4613,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4173,6 +4648,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Marvell media platform drivers # @@ -30393,7 +31570,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Mediatek media platform drivers -@@ -4197,6 +4638,15 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4197,6 +4673,15 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Renesas media platform drivers # @@ -30409,7 +31586,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Rockchip media platform drivers -@@ -4213,6 +4663,11 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4213,6 +4698,11 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Sunxi media platform drivers # @@ -30421,7 +31598,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Texas Instruments drivers -@@ -4221,6 +4676,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4221,6 +4711,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Verisilicon media platform drivers # @@ -30429,7 +31606,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # VIA media platform drivers -@@ -4229,6 +4685,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y +@@ -4229,6 +4720,7 @@ CONFIG_MEDIA_PLATFORM_DRIVERS=y # # Xilinx media platform drivers # @@ -30437,7 +31614,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # MMC/SDIO DVB adapters -@@ -4283,6 +4740,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y +@@ -4283,6 +4775,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_OV2659 is not set # CONFIG_VIDEO_OV2680 is not set # CONFIG_VIDEO_OV2685 is not set @@ -30445,7 +31622,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_VIDEO_OV4689 is not set # CONFIG_VIDEO_OV5640 is not set # CONFIG_VIDEO_OV5645 is not set -@@ -4304,6 +4762,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y +@@ -4304,6 +4797,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_OV9282 is not set # CONFIG_VIDEO_OV9640 is not set # CONFIG_VIDEO_OV9650 is not set @@ -30453,7 +31630,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_VIDEO_RDACM20 is not set # CONFIG_VIDEO_RDACM21 is not set # CONFIG_VIDEO_RJ54N1 is not set -@@ -4341,6 +4800,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y +@@ -4341,6 +4835,7 @@ CONFIG_VIDEO_CAMERA_SENSOR=y # CONFIG_VIDEO_CS53L32A is not set # CONFIG_VIDEO_MSP3400 is not set # CONFIG_VIDEO_SONY_BTF_MPX is not set @@ -30461,7 +31638,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_VIDEO_TDA7432 is not set # CONFIG_VIDEO_TDA9840 is not set # CONFIG_VIDEO_TEA6415C is not set -@@ -4451,7 +4911,7 @@ CONFIG_CXD2880_SPI_DRV=m +@@ -4451,7 +4946,7 @@ CONFIG_CXD2880_SPI_DRV=m # CONFIG_VIDEO_GS1662 is not set # end of Media SPI Adapters @@ -30470,7 +31647,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Customize TV tuners -@@ -4668,6 +5128,7 @@ CONFIG_DVB_SP2=m +@@ -4668,6 +5163,7 @@ CONFIG_DVB_SP2=m # Graphics support # CONFIG_APERTURE_HELPERS=y @@ -30478,7 +31655,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_VIDEO_CMDLINE=y CONFIG_VIDEO_NOMODESET=y CONFIG_AUXDISPLAY=y -@@ -4679,6 +5140,7 @@ CONFIG_AUXDISPLAY=y +@@ -4679,6 +5175,7 @@ CONFIG_AUXDISPLAY=y # CONFIG_CHARLCD_BL_ON is not set CONFIG_CHARLCD_BL_FLASH=y CONFIG_DRM=y @@ -30486,7 +31663,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_DRM_DEBUG_MM is not set CONFIG_DRM_KMS_HELPER=y # CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set -@@ -4687,7 +5149,7 @@ CONFIG_DRM_FBDEV_EMULATION=y +@@ -4687,7 +5184,7 @@ CONFIG_DRM_FBDEV_EMULATION=y CONFIG_DRM_FBDEV_OVERALLOC=100 # CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set CONFIG_DRM_LOAD_EDID_FIRMWARE=y @@ -30495,7 +31672,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DRM_DISPLAY_DP_HELPER=y CONFIG_DRM_DISPLAY_HDCP_HELPER=y CONFIG_DRM_DISPLAY_HDMI_HELPER=y -@@ -4720,7 +5182,7 @@ CONFIG_DRM_I2C_NXP_TDA998X=m +@@ -4720,7 +5217,7 @@ CONFIG_DRM_I2C_NXP_TDA998X=m CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m @@ -30504,7 +31681,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y # CONFIG_DRM_AMDGPU_WERROR is not set -@@ -4735,9 +5197,13 @@ CONFIG_DRM_AMDGPU_USERPTR=y +@@ -4735,9 +5232,13 @@ CONFIG_DRM_AMDGPU_USERPTR=y # Display Engine Configuration # CONFIG_DRM_AMD_DC=y @@ -30518,7 +31695,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DRM_NOUVEAU=m CONFIG_NOUVEAU_DEBUG=5 CONFIG_NOUVEAU_DEBUG_DEFAULT=3 -@@ -4749,6 +5215,9 @@ CONFIG_DRM_NOUVEAU_BACKLIGHT=y +@@ -4749,6 +5250,9 @@ CONFIG_DRM_NOUVEAU_BACKLIGHT=y CONFIG_DRM_UDL=m CONFIG_DRM_AST=m CONFIG_DRM_MGAG200=m @@ -30528,7 +31705,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_VIRTIO_GPU_KMS=y -@@ -4759,36 +5228,89 @@ CONFIG_DRM_PANEL=y +@@ -4759,36 +5263,89 @@ CONFIG_DRM_PANEL=y # # CONFIG_DRM_PANEL_ABT_Y030XX067A is not set # CONFIG_DRM_PANEL_ARM_VERSATILE is not set @@ -30619,7 +31796,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # end of Display Panels CONFIG_DRM_BRIDGE=y -@@ -4834,10 +5356,16 @@ CONFIG_DRM_PANEL_BRIDGE=y +@@ -4834,10 +5391,16 @@ CONFIG_DRM_PANEL_BRIDGE=y # CONFIG_DRM_I2C_ADV7511 is not set # CONFIG_DRM_CDNS_DSI is not set # CONFIG_DRM_CDNS_MHDP8546 is not set @@ -30637,7 +31814,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_DRM_LOGICVC is not set # CONFIG_DRM_ARCPGU is not set CONFIG_DRM_BOCHS=m -@@ -4856,6 +5384,14 @@ CONFIG_DRM_CIRRUS_QEMU=m +@@ -4856,6 +5419,14 @@ CONFIG_DRM_CIRRUS_QEMU=m # CONFIG_TINYDRM_ST7735R is not set # CONFIG_DRM_GUD is not set # CONFIG_DRM_SSD130X is not set @@ -30652,7 +31829,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_DRM_LEGACY is not set CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y -@@ -4894,6 +5430,7 @@ CONFIG_FB_RADEON_BACKLIGHT=y +@@ -4894,6 +5465,7 @@ CONFIG_FB_RADEON_BACKLIGHT=y # CONFIG_FB_ARK is not set # CONFIG_FB_PM3 is not set # CONFIG_FB_CARMINE is not set @@ -30660,7 +31837,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_FB_SMSCUFX is not set # CONFIG_FB_UDL is not set # CONFIG_FB_IBM_GXT4500 is not set -@@ -4919,6 +5456,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y +@@ -4919,6 +5491,7 @@ CONFIG_FB_SYS_IMAGEBLIT=y # CONFIG_FB_FOREIGN_ENDIAN is not set CONFIG_FB_SYS_FOPS=y CONFIG_FB_DEFERRED_IO=y @@ -30668,7 +31845,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_FB_IOMEM_HELPERS=y CONFIG_FB_SYSMEM_HELPERS=y CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y -@@ -4946,7 +5484,7 @@ CONFIG_LCD_PLATFORM=m +@@ -4946,7 +5519,7 @@ CONFIG_LCD_PLATFORM=m CONFIG_BACKLIGHT_CLASS_DEVICE=y # CONFIG_BACKLIGHT_KTD253 is not set # CONFIG_BACKLIGHT_KTZ8866 is not set @@ -30677,7 +31854,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_BACKLIGHT_QCOM_WLED is not set # CONFIG_BACKLIGHT_ADP8860 is not set # CONFIG_BACKLIGHT_ADP8870 is not set -@@ -4960,6 +5498,7 @@ CONFIG_BACKLIGHT_GPIO=m +@@ -4960,6 +5533,7 @@ CONFIG_BACKLIGHT_GPIO=m # CONFIG_BACKLIGHT_LED is not set # end of Backlight & LCD device support @@ -30685,7 +31862,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_HDMI=y # -@@ -4983,10 +5522,13 @@ CONFIG_LOGO_LINUX_CLUT224=y +@@ -4983,10 +5557,13 @@ CONFIG_LOGO_LINUX_CLUT224=y # end of Graphics support # CONFIG_DRM_ACCEL is not set @@ -30703,7 +31880,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SND_HWDEP=m CONFIG_SND_RAWMIDI=m CONFIG_SND_JACK=y -@@ -5010,6 +5552,7 @@ CONFIG_SND_ALOOP=m +@@ -5010,6 +5587,7 @@ CONFIG_SND_ALOOP=m # CONFIG_SND_PCMTEST is not set # CONFIG_SND_MTPAV is not set # CONFIG_SND_SERIAL_U16550 is not set @@ -30711,7 +31888,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SND_MPU401 is not set CONFIG_SND_PCI=y # CONFIG_SND_AD1889 is not set -@@ -5074,6 +5617,11 @@ CONFIG_SND_HDA_INTEL=m +@@ -5074,6 +5652,11 @@ CONFIG_SND_HDA_INTEL=m # CONFIG_SND_HDA_RECONFIG is not set # CONFIG_SND_HDA_INPUT_BEEP is not set # CONFIG_SND_HDA_PATCH_LOADER is not set @@ -30723,7 +31900,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SND_HDA_CODEC_REALTEK is not set # CONFIG_SND_HDA_CODEC_ANALOG is not set # CONFIG_SND_HDA_CODEC_SIGMATEL is not set -@@ -5095,7 +5643,9 @@ CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +@@ -5095,7 +5678,9 @@ CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 CONFIG_SND_HDA_CORE=m CONFIG_SND_HDA_COMPONENT=y CONFIG_SND_HDA_PREALLOC_SIZE=64 @@ -30733,7 +31910,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_SND_SPI=y CONFIG_SND_USB=y CONFIG_SND_USB_AUDIO=m -@@ -5110,7 +5660,273 @@ CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +@@ -5110,7 +5695,273 @@ CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y # CONFIG_SND_USB_PODHD is not set # CONFIG_SND_USB_TONEPORT is not set # CONFIG_SND_USB_VARIAX is not set @@ -31008,7 +32185,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SND_VIRTIO is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y -@@ -5195,6 +6011,7 @@ CONFIG_HID_MULTITOUCH=m +@@ -5195,6 +6046,7 @@ CONFIG_HID_MULTITOUCH=m # CONFIG_HID_NINTENDO is not set # CONFIG_HID_NTI is not set CONFIG_HID_NTRIG=y @@ -31016,7 +32193,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_HID_ORTEK=m CONFIG_HID_PANTHERLORD=m # CONFIG_PANTHERLORD_FF is not set -@@ -5261,6 +6078,7 @@ CONFIG_USB_HIDDEV=y +@@ -5261,6 +6113,7 @@ CONFIG_USB_HIDDEV=y # end of USB HID support CONFIG_I2C_HID=y @@ -31024,7 +32201,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_I2C_HID_OF is not set # CONFIG_I2C_HID_OF_ELAN is not set # CONFIG_I2C_HID_OF_GOODIX is not set -@@ -5297,6 +6115,7 @@ CONFIG_USB_XHCI_HCD=y +@@ -5297,6 +6150,7 @@ CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_PCI=y # CONFIG_USB_XHCI_PCI_RENESAS is not set CONFIG_USB_XHCI_PLATFORM=y @@ -31032,7 +32209,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y -@@ -5314,6 +6133,7 @@ CONFIG_USB_UHCI_HCD=y +@@ -5314,6 +6168,7 @@ CONFIG_USB_UHCI_HCD=y # CONFIG_USB_R8A66597_HCD is not set # CONFIG_USB_HCD_BCMA is not set # CONFIG_USB_HCD_TEST_MODE is not set @@ -31040,7 +32217,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # USB Device Class drivers -@@ -5338,8 +6158,8 @@ CONFIG_USB_STORAGE_DATAFAB=m +@@ -5338,8 +6193,8 @@ CONFIG_USB_STORAGE_DATAFAB=m CONFIG_USB_STORAGE_FREECOM=m CONFIG_USB_STORAGE_ISD200=m CONFIG_USB_STORAGE_USBAT=m @@ -31051,7 +32228,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_USB_STORAGE_JUMPSHOT=m CONFIG_USB_STORAGE_ALAUDA=m CONFIG_USB_STORAGE_ONETOUCH=m -@@ -5360,7 +6180,19 @@ CONFIG_USB_MICROTEK=m +@@ -5360,7 +6215,19 @@ CONFIG_USB_MICROTEK=m # # CONFIG_USB_CDNS_SUPPORT is not set # CONFIG_USB_MUSB_HDRC is not set @@ -31072,7 +32249,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_USB_DWC2 is not set # CONFIG_USB_CHIPIDEA is not set # CONFIG_USB_ISP1760 is not set -@@ -5452,7 +6284,7 @@ CONFIG_USB_HSIC_USB3503=m +@@ -5452,7 +6319,7 @@ CONFIG_USB_HSIC_USB3503=m # CONFIG_USB_HSIC_USB4604 is not set # CONFIG_USB_LINK_LAYER_TEST is not set CONFIG_USB_CHAOSKEY=m @@ -31081,7 +32258,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_USB_ATM=m # CONFIG_USB_SPEEDTOUCH is not set CONFIG_USB_CXACRU=m -@@ -5467,7 +6299,101 @@ CONFIG_USB_XUSBATM=m +@@ -5467,7 +6334,101 @@ CONFIG_USB_XUSBATM=m # CONFIG_USB_ISP1301 is not set # end of USB Physical Layer drivers @@ -31184,7 +32361,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m -@@ -5476,6 +6402,7 @@ CONFIG_TYPEC_TCPCI=m +@@ -5476,6 +6437,7 @@ CONFIG_TYPEC_TCPCI=m # CONFIG_TYPEC_FUSB302 is not set CONFIG_TYPEC_UCSI=m # CONFIG_UCSI_CCG is not set @@ -31192,7 +32369,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_UCSI_STM32G0 is not set # CONFIG_TYPEC_TPS6598X is not set # CONFIG_TYPEC_ANX7411 is not set -@@ -5500,7 +6427,7 @@ CONFIG_TYPEC_DP_ALTMODE=m +@@ -5500,7 +6462,7 @@ CONFIG_TYPEC_DP_ALTMODE=m # CONFIG_TYPEC_NVIDIA_ALTMODE is not set # end of USB Type-C Alternate Mode drivers @@ -31201,7 +32378,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_MMC=y CONFIG_PWRSEQ_EMMC=m CONFIG_PWRSEQ_SIMPLE=m -@@ -5519,15 +6446,19 @@ CONFIG_MMC_SDHCI=y +@@ -5519,15 +6481,19 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_IO_ACCESSORS=y CONFIG_MMC_SDHCI_PCI=m CONFIG_MMC_RICOH_MMC=y @@ -31222,7 +32399,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_MMC_CB710=m CONFIG_MMC_VIA_SDMMC=m CONFIG_MMC_DW=m -@@ -5538,18 +6469,18 @@ CONFIG_MMC_DW_BLUEFIELD=m +@@ -5538,18 +6504,18 @@ CONFIG_MMC_DW_BLUEFIELD=m # CONFIG_MMC_DW_K3 is not set CONFIG_MMC_DW_PCI=m # CONFIG_MMC_DW_STARFIVE is not set @@ -31243,7 +32420,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_SCSI_UFSHCD is not set CONFIG_MEMSTICK=m # CONFIG_MEMSTICK_DEBUG is not set -@@ -5590,7 +6521,7 @@ CONFIG_LEDS_LM3530=m +@@ -5590,7 +6556,7 @@ CONFIG_LEDS_LM3530=m # CONFIG_LEDS_LM3642 is not set # CONFIG_LEDS_LM3692X is not set # CONFIG_LEDS_PCA9532 is not set @@ -31252,7 +32429,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_LEDS_LP3944=m # CONFIG_LEDS_LP3952 is not set # CONFIG_LEDS_LP50XX is not set -@@ -5672,6 +6603,7 @@ CONFIG_INFINIBAND_USER_MEM=y +@@ -5672,6 +6638,7 @@ CONFIG_INFINIBAND_USER_MEM=y CONFIG_INFINIBAND_ON_DEMAND_PAGING=y CONFIG_INFINIBAND_ADDR_TRANS=y CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y @@ -31260,7 +32437,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_INFINIBAND_VIRT_DMA=y CONFIG_INFINIBAND_BNXT_RE=m CONFIG_INFINIBAND_CXGB4=m -@@ -5753,6 +6685,7 @@ CONFIG_RTC_DRV_EM3027=m +@@ -5753,6 +6720,7 @@ CONFIG_RTC_DRV_EM3027=m # CONFIG_RTC_DRV_RV3032 is not set CONFIG_RTC_DRV_RV8803=m # CONFIG_RTC_DRV_SD3078 is not set @@ -31268,7 +32445,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # SPI RTC drivers -@@ -5804,21 +6737,28 @@ CONFIG_RTC_DRV_M48T35=m +@@ -5804,21 +6772,28 @@ CONFIG_RTC_DRV_M48T35=m CONFIG_RTC_DRV_M48T59=m CONFIG_RTC_DRV_MSM6242=m CONFIG_RTC_DRV_RP5C01=m @@ -31297,7 +32474,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_DMADEVICES=y # CONFIG_DMADEVICES_DEBUG is not set -@@ -5826,12 +6766,16 @@ CONFIG_DMADEVICES=y +@@ -5826,12 +6801,16 @@ CONFIG_DMADEVICES=y # DMA Devices # CONFIG_DMA_ENGINE=y @@ -31315,7 +32492,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_PL330_DMA is not set # CONFIG_PLX_DMA is not set # CONFIG_XILINX_DMA is not set -@@ -5844,6 +6788,8 @@ CONFIG_DW_DMAC=m +@@ -5844,6 +6823,8 @@ CONFIG_DW_DMAC=m CONFIG_DW_DMAC_PCI=m # CONFIG_DW_EDMA is not set # CONFIG_SF_PDMA is not set @@ -31324,7 +32501,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # DMA Clients -@@ -5855,11 +6801,11 @@ CONFIG_ASYNC_TX_DMA=y +@@ -5855,11 +6836,11 @@ CONFIG_ASYNC_TX_DMA=y # DMABUF options # CONFIG_SYNC_FILE=y @@ -31339,15 +32516,18 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_DMABUF_HEAPS is not set # CONFIG_DMABUF_SYSFS_STATS is not set # end of DMABUF options -@@ -5879,6 +6825,7 @@ CONFIG_VFIO_GROUP=y +@@ -5877,8 +6858,10 @@ CONFIG_UIO_PCI_GENERIC=m + CONFIG_VFIO=m + CONFIG_VFIO_GROUP=y CONFIG_VFIO_CONTAINER=y ++CONFIG_VFIO_IOMMU_TYPE1=m CONFIG_VFIO_NOIOMMU=y CONFIG_VFIO_VIRQFD=y +# CONFIG_VFIO_DEBUGFS is not set # # VFIO support for PCI devices -@@ -5948,8 +6895,11 @@ CONFIG_COMMON_CLK=y +@@ -5948,8 +6931,11 @@ CONFIG_COMMON_CLK=y # CONFIG_COMMON_CLK_VC7 is not set # CONFIG_COMMON_CLK_FIXED_MMIO is not set CONFIG_CLK_ANALOGBITS_WRPLL_CLN28HPC=y @@ -31355,11 +32535,11 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +# CONFIG_CLK_RCAR_USB2_CLOCK_SEL is not set CONFIG_CLK_SIFIVE=y CONFIG_CLK_SIFIVE_PRCI=y -+CONFIG_SPACEMIT_K1X_CCU=y ++CONFIG_SPACEMIT_K1_CCU=y CONFIG_CLK_STARFIVE_JH71X0=y CONFIG_CLK_STARFIVE_JH7100=y CONFIG_CLK_STARFIVE_JH7100_AUDIO=m -@@ -5959,15 +6909,27 @@ CONFIG_CLK_STARFIVE_JH7110_AON=m +@@ -5959,15 +6945,27 @@ CONFIG_CLK_STARFIVE_JH7110_AON=m CONFIG_CLK_STARFIVE_JH7110_STG=m CONFIG_CLK_STARFIVE_JH7110_ISP=m CONFIG_CLK_STARFIVE_JH7110_VOUT=m @@ -31387,21 +32567,24 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_RISCV_TIMER=y # end of Clock Source drivers -@@ -5976,8 +6938,11 @@ CONFIG_MAILBOX=y +@@ -5976,8 +6974,12 @@ CONFIG_MAILBOX=y # CONFIG_ARM_MHU_V2 is not set # CONFIG_PLATFORM_MHU is not set # CONFIG_PL320_MBOX is not set -+# CONFIG_PCC is not set ++CONFIG_PCC=y # CONFIG_ALTERA_MBOX is not set # CONFIG_MAILBOX_TEST is not set +CONFIG_SUN6I_MSGBOX=y +CONFIG_TH1520_MBOX=y ++CONFIG_IOMMU_IOVA=y CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y -@@ -5992,6 +6957,9 @@ CONFIG_IOMMU_DEFAULT_DMA_LAZY=y +@@ -5991,7 +6993,11 @@ CONFIG_IOMMU_SUPPORT=y + CONFIG_IOMMU_DEFAULT_DMA_LAZY=y # CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set CONFIG_OF_IOMMU=y ++CONFIG_IOMMU_DMA=y # CONFIG_IOMMUFD is not set +CONFIG_RISCV_IOMMU=y +CONFIG_RISCV_IOMMU_PCI=y @@ -31409,7 +32592,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # Remoteproc drivers -@@ -6007,6 +6975,7 @@ CONFIG_RPMSG_CHAR=y +@@ -6007,6 +7013,7 @@ CONFIG_RPMSG_CHAR=y CONFIG_RPMSG_CTRL=y CONFIG_RPMSG_NS=y # CONFIG_RPMSG_QCOM_GLINK_RPM is not set @@ -31417,7 +32600,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_RPMSG_VIRTIO=y # end of Rpmsg drivers -@@ -6055,22 +7024,72 @@ CONFIG_RPMSG_VIRTIO=y +@@ -6055,22 +7062,73 @@ CONFIG_RPMSG_VIRTIO=y # CONFIG_QCOM_PMIC_GLINK is not set # end of Qualcomm SoC drivers @@ -31480,6 +32663,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +# +# DEVFREQ Drivers +# ++# CONFIG_ARM_HISI_UNCORE_DEVFREQ is not set +# CONFIG_ARM_SUN8I_A33_MBUS_DEVFREQ is not set +CONFIG_PM_DEVFREQ_EVENT=y CONFIG_EXTCON=y @@ -31491,7 +32675,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_EXTCON_FSA9480 is not set CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_MAX3355 is not set -@@ -6080,7 +7099,540 @@ CONFIG_EXTCON_GPIO=m +@@ -6080,7 +7138,540 @@ CONFIG_EXTCON_GPIO=m # CONFIG_EXTCON_USB_GPIO is not set # CONFIG_EXTCON_USBC_TUSB320 is not set # CONFIG_MEMORY is not set @@ -32033,7 +33217,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_NTB is not set CONFIG_PWM=y CONFIG_PWM_SYSFS=y -@@ -6090,7 +7642,12 @@ CONFIG_PWM_SYSFS=y +@@ -6090,7 +7681,12 @@ CONFIG_PWM_SYSFS=y # CONFIG_PWM_DWC is not set # CONFIG_PWM_FSL_FTM is not set # CONFIG_PWM_PCA9685 is not set @@ -32046,7 +33230,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_PWM_XILINX is not set # -@@ -6099,15 +7656,24 @@ CONFIG_PWM_SIFIVE=m +@@ -6099,15 +7695,24 @@ CONFIG_PWM_SIFIVE=m CONFIG_IRQCHIP=y # CONFIG_AL_FIC is not set # CONFIG_XILINX_INTC is not set @@ -32067,11 +33251,11 @@ index 61f2b2f12589..4ef5c9933ef9 100644 +CONFIG_RESET_TH1520=y # CONFIG_RESET_TI_SYSCON is not set # CONFIG_RESET_TI_TPS380X is not set -+CONFIG_RESET_K1X_SPACEMIT=y ++CONFIG_RESET_K1_SPACEMIT=y CONFIG_RESET_STARFIVE_JH71X0=y CONFIG_RESET_STARFIVE_JH7100=y CONFIG_RESET_STARFIVE_JH7110=y -@@ -6116,7 +7682,12 @@ CONFIG_RESET_STARFIVE_JH7110=y +@@ -6116,7 +7721,12 @@ CONFIG_RESET_STARFIVE_JH7110=y # PHY Subsystem # CONFIG_GENERIC_PHY=y @@ -32084,7 +33268,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # PHY drivers for Broadcom platforms -@@ -6132,14 +7703,21 @@ CONFIG_GENERIC_PHY=y +@@ -6132,14 +7742,21 @@ CONFIG_GENERIC_PHY=y # CONFIG_PHY_PXA_28NM_HSIC is not set # CONFIG_PHY_PXA_28NM_USB2 is not set # CONFIG_PHY_LAN966X_SERDES is not set @@ -32106,15 +33290,17 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # end of PHY Subsystem # CONFIG_POWERCAP is not set -@@ -6151,6 +7729,7 @@ CONFIG_GENERIC_PHY=y +@@ -6151,6 +7768,9 @@ CONFIG_GENERIC_PHY=y CONFIG_RISCV_PMU=y CONFIG_RISCV_PMU_LEGACY=y CONFIG_RISCV_PMU_SBI=y ++# CONFIG_RISCV_PMU_SSE is not set +CONFIG_ANDES_CUSTOM_PMU=y ++CONFIG_LRW_DDR_PMU=m # end of Performance monitor support CONFIG_RAS=y -@@ -6191,7 +7770,9 @@ CONFIG_NVMEM_SYSFS=y +@@ -6191,7 +7811,9 @@ CONFIG_NVMEM_SYSFS=y # end of Layout Types # CONFIG_NVMEM_RMEM is not set @@ -32124,7 +33310,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # # HW tracing support -@@ -6202,6 +7783,8 @@ CONFIG_NVMEM_SYSFS=y +@@ -6202,6 +7824,8 @@ CONFIG_NVMEM_SYSFS=y # CONFIG_FPGA is not set # CONFIG_FSI is not set @@ -32133,7 +33319,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_PM_OPP=y # CONFIG_SIOX is not set # CONFIG_SLIMBUS is not set -@@ -6223,6 +7806,7 @@ CONFIG_INTERCONNECT=y +@@ -6223,6 +7847,7 @@ CONFIG_INTERCONNECT=y # CONFIG_VALIDATE_FS_PARSER=y CONFIG_FS_IOMAP=y @@ -32141,7 +33327,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_BUFFER_HEAD=y CONFIG_LEGACY_DIRECT_IO=y # CONFIG_EXT2_FS is not set -@@ -6235,6 +7819,7 @@ CONFIG_EXT4_FS_POSIX_ACL=y +@@ -6235,6 +7860,7 @@ CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_SECURITY=y # CONFIG_EXT4_DEBUG is not set # CONFIG_EXT4_ERROR_REPORT is not set @@ -32149,7 +33335,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_JBD2=y # CONFIG_JBD2_DEBUG is not set CONFIG_FS_MBCACHE=y -@@ -6289,10 +7874,11 @@ CONFIG_QUOTA_TREE=y +@@ -6289,10 +7915,12 @@ CONFIG_QUOTA_TREE=y CONFIG_QFMT_V2=y CONFIG_QUOTACTL=y CONFIG_AUTOFS_FS=y @@ -32159,11 +33345,12 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_VIRTIO_FS=m -CONFIG_OVERLAY_FS=m +CONFIG_FUSE_PASSTHROUGH=y ++CONFIG_FUSE_IO_URING=y +CONFIG_OVERLAY_FS=y # CONFIG_OVERLAY_FS_REDIRECT_DIR is not set CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y # CONFIG_OVERLAY_FS_INDEX is not set -@@ -6363,9 +7949,9 @@ CONFIG_TMPFS_XATTR=y +@@ -6363,9 +7991,9 @@ CONFIG_TMPFS_XATTR=y # CONFIG_TMPFS_QUOTA is not set CONFIG_ARCH_SUPPORTS_HUGETLBFS=y CONFIG_HUGETLBFS=y @@ -32174,7 +33361,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_HUGETLB_ALLOC_LIMIT is not set CONFIG_ARCH_HAS_GIGANTIC_PAGE=y CONFIG_CONFIGFS_FS=y -@@ -6382,8 +7968,24 @@ CONFIG_MISC_FILESYSTEMS=y +@@ -6382,8 +8010,24 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_BEFS_FS is not set # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set @@ -32201,7 +33388,15 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CRAMFS=m CONFIG_CRAMFS_BLOCKDEV=y # CONFIG_CRAMFS_MTD is not set -@@ -6432,7 +8034,7 @@ CONFIG_NFS_V4=y +@@ -6422,6 +8066,7 @@ CONFIG_PSTORE_RAM=m + # CONFIG_SYSV_FS is not set + # CONFIG_UFS_FS is not set + # CONFIG_EROFS_FS is not set ++# CONFIG_MFS_FS is not set + CONFIG_NETWORK_FILESYSTEMS=y + CONFIG_NFS_FS=y + CONFIG_NFS_V2=m +@@ -6432,7 +8077,7 @@ CONFIG_NFS_V4=y CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y CONFIG_PNFS_FILE_LAYOUT=y @@ -32210,7 +33405,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_PNFS_FLEXFILE_LAYOUT=m CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" # CONFIG_NFS_V4_1_MIGRATION is not set -@@ -6518,7 +8120,7 @@ CONFIG_NLS_ISO8859_8=m +@@ -6518,7 +8163,7 @@ CONFIG_NLS_ISO8859_8=m CONFIG_NLS_CODEPAGE_1250=m CONFIG_NLS_CODEPAGE_1251=m CONFIG_NLS_ASCII=y @@ -32219,7 +33414,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_NLS_ISO8859_2=m CONFIG_NLS_ISO8859_3=m CONFIG_NLS_ISO8859_4=m -@@ -6557,6 +8159,7 @@ CONFIG_KEYS=y +@@ -6557,6 +8202,7 @@ CONFIG_KEYS=y CONFIG_PERSISTENT_KEYRINGS=y CONFIG_TRUSTED_KEYS=y CONFIG_TRUSTED_KEYS_TPM=y @@ -32227,7 +33422,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ENCRYPTED_KEYS=y # CONFIG_USER_DECRYPTED_DATA is not set # CONFIG_KEY_DH_OPERATIONS is not set -@@ -6635,6 +8238,7 @@ CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +@@ -6635,6 +8281,7 @@ CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y # CONFIG_IMA_DISABLE_HTABLE is not set # CONFIG_IMA_DIGEST_LIST is not set @@ -32235,16 +33430,28 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_EVM=y # CONFIG_EVM_DEFAULT_HASH_SHA1 is not set CONFIG_EVM_DEFAULT_HASH_SHA256=y -@@ -6671,8 +8275,6 @@ CONFIG_LIST_HARDENED=y +@@ -6657,6 +8304,8 @@ CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,appar + # Memory initialization + # + CONFIG_INIT_STACK_NONE=y ++# CONFIG_INIT_STACK_ALL_PATTERN is not set ++# CONFIG_INIT_STACK_ALL_ZERO is not set + # CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set + # CONFIG_INIT_ON_FREE_DEFAULT_ON is not set + # CONFIG_ZERO_CALL_USED_REGS is not set +@@ -6670,9 +8319,9 @@ CONFIG_LIST_HARDENED=y + # end of Hardening of kernel data structures CONFIG_RANDSTRUCT_NONE=y ++# CONFIG_RANDSTRUCT_FULL is not set ++# CONFIG_RANDSTRUCT_PERFORMANCE is not set # end of Kernel hardening options - -# CONFIG_SECURITY_BOOT_INIT is not set # end of Security options CONFIG_XOR_BLOCKS=m -@@ -6693,6 +8295,7 @@ CONFIG_CRYPTO_ALGAPI=y +@@ -6693,6 +8342,7 @@ CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y CONFIG_CRYPTO_AEAD2=y @@ -32252,7 +33459,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CRYPTO_SIG2=y CONFIG_CRYPTO_SKCIPHER=y CONFIG_CRYPTO_SKCIPHER2=y -@@ -6704,18 +8307,18 @@ CONFIG_CRYPTO_RNG_DEFAULT=y +@@ -6704,18 +8354,18 @@ CONFIG_CRYPTO_RNG_DEFAULT=y CONFIG_CRYPTO_AKCIPHER2=y CONFIG_CRYPTO_AKCIPHER=y CONFIG_CRYPTO_KPP2=y @@ -32274,7 +33481,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_ENGINE=y # end of Crypto core or helper -@@ -6724,14 +8327,14 @@ CONFIG_CRYPTO_ENGINE=y +@@ -6724,14 +8374,14 @@ CONFIG_CRYPTO_ENGINE=y # Public-key cryptography # CONFIG_CRYPTO_RSA=y @@ -32293,7 +33500,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # end of Public-key cryptography # -@@ -6747,7 +8350,7 @@ CONFIG_CRYPTO_CAMELLIA=m +@@ -6747,7 +8397,7 @@ CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST_COMMON=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m @@ -32302,7 +33509,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m -@@ -6764,7 +8367,7 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m +@@ -6764,7 +8414,7 @@ CONFIG_CRYPTO_TWOFISH_COMMON=m # # CONFIG_CRYPTO_ADIANTUM is not set CONFIG_CRYPTO_ARC4=m @@ -32311,7 +33518,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CRYPTO_CBC=y # CONFIG_CRYPTO_CFB is not set CONFIG_CRYPTO_CTR=y -@@ -6773,35 +8376,35 @@ CONFIG_CRYPTO_ECB=y +@@ -6773,35 +8423,35 @@ CONFIG_CRYPTO_ECB=y # CONFIG_CRYPTO_HCTR2 is not set # CONFIG_CRYPTO_KEYWRAP is not set CONFIG_CRYPTO_LRW=m @@ -32354,7 +33561,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_SHA1=y CONFIG_CRYPTO_SHA256=y -@@ -6864,6 +8467,10 @@ CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +@@ -6864,6 +8514,10 @@ CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y CONFIG_CRYPTO_HASH_INFO=y CONFIG_CRYPTO_HW=y @@ -32365,7 +33572,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_CRYPTO_DEV_ATMEL_ECC is not set # CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set # CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set -@@ -6871,6 +8478,7 @@ CONFIG_CRYPTO_HW=y +@@ -6871,6 +8525,7 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set # CONFIG_CRYPTO_DEV_QAT_C62X is not set # CONFIG_CRYPTO_DEV_QAT_4XXX is not set @@ -32373,7 +33580,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set -@@ -6933,16 +8541,16 @@ CONFIG_GENERIC_PCI_IOMAP=y +@@ -6933,16 +8588,16 @@ CONFIG_GENERIC_PCI_IOMAP=y # CONFIG_CRYPTO_LIB_UTILS=y CONFIG_CRYPTO_LIB_AES=y @@ -32395,24 +33602,29 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_CRYPTO_LIB_POLY1305 is not set # CONFIG_CRYPTO_LIB_CHACHA20POLY1305 is not set CONFIG_CRYPTO_LIB_SHA1=y -@@ -7013,6 +8621,7 @@ CONFIG_HAS_IOPORT_MAP=y +@@ -7011,8 +8666,12 @@ CONFIG_HAS_IOMEM=y + CONFIG_HAS_IOPORT=y + CONFIG_HAS_IOPORT_MAP=y CONFIG_HAS_DMA=y ++CONFIG_DMA_OPS=y ++CONFIG_NEED_SG_DMA_FLAGS=y ++CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_NEED_DMA_MAP_STATE=y CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_DMA_WRITE_COMBINE=y CONFIG_DMA_DECLARE_COHERENT=y CONFIG_ARCH_HAS_SETUP_DMA_OPS=y CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -@@ -7032,7 +8641,7 @@ CONFIG_DMA_CMA=y +@@ -7032,7 +8691,7 @@ CONFIG_DMA_CMA=y # # Default contiguous memory area size: # -CONFIG_CMA_SIZE_MBYTES=64 -+CONFIG_CMA_SIZE_MBYTES=32 ++CONFIG_CMA_SIZE_MBYTES=256 CONFIG_CMA_SIZE_SEL_MBYTES=y # CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set # CONFIG_CMA_SIZE_SEL_MIN is not set -@@ -7043,7 +8652,6 @@ CONFIG_DMA_MAP_BENCHMARK=y +@@ -7043,7 +8702,6 @@ CONFIG_DMA_MAP_BENCHMARK=y CONFIG_SGL_ALLOC=y CONFIG_CHECK_SIGNATURE=y # CONFIG_CPUMASK_OFFSTACK is not set @@ -32420,7 +33632,16 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y -@@ -7179,7 +8787,6 @@ CONFIG_SLUB_DEBUG=y +@@ -7113,6 +8771,8 @@ CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y + # CONFIG_DEBUG_INFO_REDUCED is not set + CONFIG_DEBUG_INFO_COMPRESSED_NONE=y + # CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set ++# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set ++# CONFIG_DEBUG_INFO_SPLIT is not set + CONFIG_DEBUG_INFO_BTF=y + CONFIG_PAHOLE_HAS_SPLIT_BTF=y + CONFIG_PAHOLE_HAS_LANG_EXCLUDE=y +@@ -7179,7 +8839,6 @@ CONFIG_SLUB_DEBUG=y # CONFIG_PAGE_TABLE_CHECK is not set # CONFIG_PAGE_POISONING is not set # CONFIG_DEBUG_PAGE_REF is not set @@ -32428,7 +33649,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ARCH_HAS_DEBUG_WX=y # CONFIG_DEBUG_WX is not set CONFIG_GENERIC_PTDUMP=y -@@ -7257,7 +8864,7 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y +@@ -7257,7 +8916,7 @@ CONFIG_LOCK_DEBUGGING_SUPPORT=y # CONFIG_DEBUG_LOCK_ALLOC is not set CONFIG_DEBUG_ATOMIC_SLEEP=y # CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set @@ -32437,7 +33658,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_WW_MUTEX_SELFTEST is not set # CONFIG_SCF_TORTURE_TEST is not set # CONFIG_CSD_LOCK_WAIT_DEBUG is not set -@@ -7281,8 +8888,9 @@ CONFIG_DEBUG_LIST=y +@@ -7281,8 +8940,9 @@ CONFIG_DEBUG_LIST=y # # RCU Debugging # @@ -32448,7 +33669,15 @@ index 61f2b2f12589..4ef5c9933ef9 100644 # CONFIG_RCU_REF_SCALE_TEST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 -@@ -7358,7 +8966,38 @@ CONFIG_RING_BUFFER_BENCHMARK=m +@@ -7346,6 +9006,7 @@ CONFIG_FTRACE_MCOUNT_RECORD=y + CONFIG_FTRACE_MCOUNT_USE_RECORDMCOUNT=y + CONFIG_SYNTH_EVENTS=y + # CONFIG_USER_EVENTS is not set ++# CONFIG_HIST_TRIGGERS is not set + # CONFIG_TRACE_EVENT_INJECT is not set + # CONFIG_TRACEPOINT_BENCHMARK is not set + CONFIG_RING_BUFFER_BENCHMARK=m +@@ -7358,7 +9019,38 @@ CONFIG_RING_BUFFER_BENCHMARK=m # CONFIG_SYNTH_EVENT_GEN_TEST is not set # CONFIG_KPROBE_EVENT_GEN_TEST is not set # CONFIG_RV is not set @@ -32488,7 +33717,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_STRICT_DEVMEM=y CONFIG_IO_STRICT_DEVMEM=y -@@ -7376,7 +9015,47 @@ CONFIG_FUNCTION_ERROR_INJECTION=y +@@ -7376,7 +9068,47 @@ CONFIG_FUNCTION_ERROR_INJECTION=y # CONFIG_FAULT_INJECTION is not set CONFIG_ARCH_HAS_KCOV=y # CONFIG_KCOV is not set @@ -32537,7 +33766,7 @@ index 61f2b2f12589..4ef5c9933ef9 100644 CONFIG_ARCH_USE_MEMTEST=y # CONFIG_MEMTEST is not set # end of Kernel Testing and Coverage -@@ -7388,9 +9067,3 @@ CONFIG_ARCH_USE_MEMTEST=y +@@ -7388,9 +9120,3 @@ CONFIG_ARCH_USE_MEMTEST=y # end of Kernel hacking # CONFIG_KWORKER_NUMA_AFFINITY is not set @@ -33126,11 +34355,56 @@ index 0554ed4bf087..928d8f7fe288 100644 for (alt = begin; alt < end; alt++) { if (alt->vendor_id != THEAD_VENDOR_ID) continue; +diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild +index 504f8b7e72d4..ad72f2bd4cc9 100644 +--- a/arch/riscv/include/asm/Kbuild ++++ b/arch/riscv/include/asm/Kbuild +@@ -2,10 +2,12 @@ + generic-y += early_ioremap.h + generic-y += flat.h + generic-y += kvm_para.h ++generic-y += mcs_spinlock.h + generic-y += parport.h +-generic-y += spinlock.h + generic-y += spinlock_types.h ++generic-y += ticket_spinlock.h + generic-y += qrwlock.h + generic-y += qrwlock_types.h ++generic-y += qspinlock.h + generic-y += user.h + generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h -index d5604d2073bc..e0a1f84404f3 100644 +index d5604d2073bc..ca3cb4c69c1a 100644 --- a/arch/riscv/include/asm/acpi.h +++ b/arch/riscv/include/asm/acpi.h -@@ -61,11 +61,16 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } +@@ -27,6 +27,26 @@ extern int acpi_disabled; + extern int acpi_noirq; + extern int acpi_pci_disabled; + ++#ifdef CONFIG_ACPI_APEI ++/* ++ * acpi_disable_cmcff is used in drivers/acpi/apei/hest.c for disabling ++ * IA-32 Architecture Corrected Machine Check (CMC) Firmware-First mode ++ * with a kernel command line parameter "acpi=nocmcoff". But we don't ++ * have this IA-32 specific feature on ARM64, this definition is only ++ * for compatibility. ++ */ ++#define acpi_disable_cmcff 1 ++static inline pgprot_t arch_apei_get_mem_attribute(phys_addr_t addr) ++{ ++ /* ++ * Until we have a way to look for EFI memory attributes. ++ */ ++ return PAGE_KERNEL; ++} ++#else /* CONFIG_ACPI_APEI */ ++#define acpi_disable_cmcff 0 ++#endif /* !CONFIG_ACPI_APEI */ ++ + static inline void disable_acpi(void) + { + acpi_disabled = 1; +@@ -61,11 +81,16 @@ static inline void arch_fix_phys_package_id(int num, u32 slot) { } void acpi_init_rintc_map(void); struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu); @@ -33149,7 +34423,7 @@ index d5604d2073bc..e0a1f84404f3 100644 #else static inline void acpi_init_rintc_map(void) { } static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) -@@ -79,6 +84,18 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table, +@@ -79,6 +104,18 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table, return -EINVAL; } @@ -33368,6 +34642,38 @@ index 36b955c762ba..cd627ec289f1 100644 #define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs) +diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h +index b5b84c6be01e..6c77988430ea 100644 +--- a/arch/riscv/include/asm/asm.h ++++ b/arch/riscv/include/asm/asm.h +@@ -89,16 +89,24 @@ + #define PER_CPU_OFFSET_SHIFT 3 + #endif + +-.macro asm_per_cpu dst sym tmp +- REG_L \tmp, TASK_TI_CPU_NUM(tp) +- slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT ++.macro asm_per_cpu_with_cpu dst sym tmp cpu ++ slli \tmp, \cpu, PER_CPU_OFFSET_SHIFT + la \dst, __per_cpu_offset + add \dst, \dst, \tmp + REG_L \tmp, 0(\dst) + la \dst, \sym + add \dst, \dst, \tmp + .endm ++ ++.macro asm_per_cpu dst sym tmp ++ lw \tmp, TASK_TI_CPU_NUM(tp) ++ asm_per_cpu_with_cpu \dst \sym \tmp \tmp ++.endm + #else /* CONFIG_SMP */ ++.macro asm_per_cpu_with_cpu dst sym tmp cpu ++ la \dst, \sym ++.endm ++ + .macro asm_per_cpu dst sym tmp + la \dst, \sym + .endm diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index f5dfef6c2153..0e0522e588ca 100644 --- a/arch/riscv/include/asm/atomic.h @@ -33453,26 +34759,26 @@ index f5dfef6c2153..0e0522e588ca 100644 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) : diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h -index 110752594228..feebe8e02ae0 100644 +index 110752594228..99acef7386e4 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h -@@ -11,13 +11,13 @@ +@@ -11,13 +11,8 @@ #define _ASM_RISCV_BARRIER_H #ifndef __ASSEMBLY__ -+#include -+#include - - #define nop() __asm__ __volatile__ ("nop") - #define __nops(n) ".rept " #n "\nnop\n.endr\n" - #define nops(n) __asm__ __volatile__ (__nops(n)) - +- +-#define nop() __asm__ __volatile__ ("nop") +-#define __nops(n) ".rept " #n "\nnop\n.endr\n" +-#define nops(n) __asm__ __volatile__ (__nops(n)) +- -#define RISCV_FENCE(p, s) \ - __asm__ __volatile__ ("fence " #p "," #s : : : "memory") ++#include ++#include /* These barriers need to enforce ordering on both devices or memory. */ #define mb() RISCV_FENCE(iorw,iorw) -@@ -29,21 +29,6 @@ +@@ -29,21 +24,6 @@ #define __smp_rmb() RISCV_FENCE(r,r) #define __smp_wmb() RISCV_FENCE(w,w) @@ -33494,7 +34800,7 @@ index 110752594228..feebe8e02ae0 100644 /* * This is a very specific barrier: it's currently only used in two places in * the kernel, both in the scheduler. See include/linux/spinlock.h for the two -@@ -71,6 +56,45 @@ do { \ +@@ -71,6 +51,45 @@ do { \ */ #define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) @@ -33814,11 +35120,48 @@ index 3540b690944b..3cdcc2bbaaf5 100644 #if (BITS_PER_LONG == 64) #define __AMO(op) "amo" #op ".d" +diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h +index 3f65acd0ef75..0a8dd761df3a 100644 +--- a/arch/riscv/include/asm/cacheflush.h ++++ b/arch/riscv/include/asm/cacheflush.h +@@ -43,7 +43,23 @@ static inline void flush_dcache_page(struct page *page) + flush_icache_mm(vma->vm_mm, 0) + + #ifdef CONFIG_64BIT +-#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) ++extern u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; ++extern char _end[]; ++#define flush_cache_vmap flush_cache_vmap ++static inline void flush_cache_vmap(unsigned long start, unsigned long end) ++{ ++ if (is_vmalloc_or_module_addr((void *)start)) { ++ int i; ++ ++ /* ++ * We don't care if concurrently a cpu resets this value since ++ * the only place this can happen is in handle_exception() where ++ * an sfence.vma is emitted. ++ */ ++ for (i = 0; i < ARRAY_SIZE(new_vmalloc); ++i) ++ new_vmalloc[i] = -1ULL; ++ } ++} + #define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) + #endif + +@@ -61,6 +77,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local); + + extern unsigned int riscv_cbom_block_size; + extern unsigned int riscv_cboz_block_size; ++extern unsigned int riscv_cbop_block_size; + void riscv_init_cbo_blocksizes(void); + + #ifdef CONFIG_RISCV_DMA_NONCOHERENT diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h -index 2f4726d3cfcc..1f4cd12e4664 100644 +index 2f4726d3cfcc..569504738fac 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h -@@ -8,143 +8,87 @@ +@@ -8,143 +8,106 @@ #include @@ -33828,10 +35171,10 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 +#include +#include +#include ++#include -#define __xchg_relaxed(ptr, new, size) \ -+#define __arch_xchg_masked(sc_sfx, prepend, append, r, p, n) \ - ({ \ +-({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ @@ -33854,27 +35197,42 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - BUILD_BUG(); \ - } \ - __ret; \ -+ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ -+ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ -+ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ -+ << __s; \ -+ ulong __newx = (ulong)(n) << __s; \ -+ ulong __retx; \ -+ ulong __rc; \ -+ \ -+ __asm__ __volatile__ ( \ -+ prepend \ -+ "0: lr.w %0, %2\n" \ -+ " and %1, %0, %z4\n" \ -+ " or %1, %1, %z3\n" \ -+ " sc.w" sc_sfx " %1, %1, %2\n" \ -+ " bnez %1, 0b\n" \ -+ append \ -+ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ -+ : "rJ" (__newx), "rJ" (~__mask) \ -+ : "memory"); \ -+ \ -+ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ ++#define __arch_xchg_masked(sc_sfx, swap_sfx, prepend, sc_append, \ ++ swap_append, r, p, n) \ ++({ \ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ ++ riscv_has_extension_unlikely(RISCV_ISA_EXT_ZABHA)) { \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ " amoswap" swap_sfx " %0, %z2, %1\n" \ ++ swap_append \ ++ : "=&r" (r), "+A" (*(p)) \ ++ : "rJ" (n) \ ++ : "memory"); \ ++ } else { \ ++ u32 *__ptr32b = (u32 *)((ulong)(p) & ~0x3); \ ++ ulong __s = ((ulong)(p) & (0x4 - sizeof(*p))) * BITS_PER_BYTE; \ ++ ulong __mask = GENMASK(((sizeof(*p)) * BITS_PER_BYTE) - 1, 0) \ ++ << __s; \ ++ ulong __newx = (ulong)(n) << __s; \ ++ ulong __retx; \ ++ ulong __rc; \ ++ \ ++ __asm__ __volatile__ ( \ ++ prepend \ ++ PREFETCHW_ASM(%5) \ ++ "0: lr.w %0, %2\n" \ ++ " and %1, %0, %z4\n" \ ++ " or %1, %1, %z3\n" \ ++ " sc.w" sc_sfx " %1, %1, %2\n" \ ++ " bnez %1, 0b\n" \ ++ sc_append \ ++ : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ ++ : "rJ" (__newx), "rJ" (~__mask), "rJ" (__ptr32b) \ ++ : "memory"); \ ++ \ ++ r = (__typeof__(*(p)))((__retx & __mask) >> __s); \ ++ } \ }) -#define arch_xchg_relaxed(ptr, x) \ @@ -33900,89 +35258,89 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ -+ __typeof__(*(__ptr)) __new = (new); \ -+ __typeof__(*(__ptr)) __ret; \ -+ \ -+ switch (sizeof(*__ptr)) { \ -+ case 1: \ -+ case 2: \ -+ __arch_xchg_masked(sc_sfx, prepend, sc_append, \ -+ __ret, __ptr, __new); \ -+ break; \ - case 4: \ +- case 4: \ - __asm__ __volatile__ ( \ - " amoswap.w %0, %2, %1\n" \ - RISCV_ACQUIRE_BARRIER \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ -+ __arch_xchg(".w" swap_sfx, prepend, swap_append, \ -+ __ret, __ptr, __new); \ ++ __typeof__(*(__ptr)) __new = (new); \ ++ __typeof__(*(__ptr)) __ret; \ ++ \ ++ switch (sizeof(*__ptr)) { \ ++ case 1: \ ++ __arch_xchg_masked(sc_sfx, ".b" swap_sfx, \ ++ prepend, sc_append, swap_append, \ ++ __ret, __ptr, __new); \ break; \ - case 8: \ +- case 8: \ - __asm__ __volatile__ ( \ - " amoswap.d %0, %2, %1\n" \ - RISCV_ACQUIRE_BARRIER \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ -+ __arch_xchg(".d" swap_sfx, prepend, swap_append, \ -+ __ret, __ptr, __new); \ ++ case 2: \ ++ __arch_xchg_masked(sc_sfx, ".h" swap_sfx, \ ++ prepend, sc_append, swap_append, \ ++ __ret, __ptr, __new); \ break; \ - default: \ - BUILD_BUG(); \ - } \ +- default: \ +- BUILD_BUG(); \ +- } \ - __ret; \ -+ (__typeof__(*(__ptr)))__ret; \ - }) - +-}) +- -#define arch_xchg_acquire(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_acquire((ptr), \ - _x_, sizeof(*(ptr))); \ -}) -+#define arch_xchg_relaxed(ptr, x) \ -+ _arch_xchg(ptr, x, "", "", "", "", "") - +- -#define __xchg_release(ptr, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(new) __new = (new); \ - __typeof__(*(ptr)) __ret; \ - switch (size) { \ -- case 4: \ + case 4: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - " amoswap.w %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ -- break; \ -- case 8: \ ++ __arch_xchg(".w" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + case 8: \ - __asm__ __volatile__ ( \ - RISCV_RELEASE_BARRIER \ - " amoswap.d %0, %2, %1\n" \ - : "=r" (__ret), "+A" (*__ptr) \ - : "r" (__new) \ - : "memory"); \ -- break; \ -- default: \ -- BUILD_BUG(); \ -- } \ ++ __arch_xchg(".d" swap_sfx, prepend, swap_append, \ ++ __ret, __ptr, __new); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ - __ret; \ --}) -+#define arch_xchg_acquire(ptr, x) \ -+ _arch_xchg(ptr, x, "", "", "", \ -+ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) ++ (__typeof__(*(__ptr)))__ret; \ + }) - #define arch_xchg_release(ptr, x) \ +-#define arch_xchg_release(ptr, x) \ -({ \ - __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_release((ptr), \ - _x_, sizeof(*(ptr))); \ -}) -- ++#define arch_xchg_relaxed(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", "", "") + -#define __arch_xchg(ptr, new, size) \ -({ \ - __typeof__(ptr) __ptr = (ptr); \ @@ -34008,6 +35366,11 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - } \ - __ret; \ -}) ++#define arch_xchg_acquire(ptr, x) \ ++ _arch_xchg(ptr, x, "", "", "", \ ++ RISCV_ACQUIRE_BARRIER, RISCV_ACQUIRE_BARRIER) ++ ++#define arch_xchg_release(ptr, x) \ + _arch_xchg(ptr, x, "", "", RISCV_RELEASE_BARRIER, "", "") #define arch_xchg(ptr, x) \ @@ -34019,7 +35382,7 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 #define xchg32(ptr, x) \ ({ \ -@@ -163,190 +107,128 @@ +@@ -163,190 +126,164 @@ * store NEW in MEM. Return the initial value in MEM. Success is * indicated by comparing RETURN with OLD. */ @@ -34058,14 +35421,17 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - } \ - __ret; \ -}) - +- -#define arch_cmpxchg_relaxed(ptr, o, n) \ -({ \ - __typeof__(*(ptr)) _o_ = (o); \ - __typeof__(*(ptr)) _n_ = (n); \ - (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ -+#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, prepend, append, r, p, o, n) \ ++#define __arch_cmpxchg_masked(sc_sfx, cas_sfx, \ ++ sc_prepend, sc_append, \ ++ cas_prepend, cas_append, \ ++ r, p, o, n) \ +({ \ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && \ + IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && \ @@ -34074,9 +35440,9 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 + r = o; \ + \ + __asm__ __volatile__ ( \ -+ prepend \ ++ cas_prepend \ + " amocas" cas_sfx " %0, %z2, %1\n" \ -+ append \ ++ cas_append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ + : "memory"); \ @@ -34091,7 +35457,7 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 + ulong __rc; \ + \ + __asm__ __volatile__ ( \ -+ prepend \ ++ sc_prepend \ + "0: lr.w %0, %2\n" \ + " and %1, %0, %z5\n" \ + " bne %1, %z3, 1f\n" \ @@ -34099,7 +35465,7 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 + " or %1, %1, %z4\n" \ + " sc.w" sc_sfx " %1, %1, %2\n" \ + " bnez %1, 0b\n" \ -+ append \ ++ sc_append \ + "1:\n" \ + : "=&r" (__retx), "=&r" (__rc), "+A" (*(__ptr32b)) \ + : "rJ" ((long)__oldx), "rJ" (__newx), \ @@ -34111,7 +35477,10 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 }) -#define __cmpxchg_acquire(ptr, old, new, size) \ -+#define __arch_cmpxchg(lr_sfx, sc_cas_sfx, prepend, append, r, p, co, o, n) \ ++#define __arch_cmpxchg(lr_sfx, sc_sfx, cas_sfx, \ ++ sc_prepend, sc_append, \ ++ cas_prepend, cas_append, \ ++ r, p, co, o, n) \ ({ \ - __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ @@ -34133,9 +35502,9 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ -+ prepend \ -+ " amocas" sc_cas_sfx " %0, %z2, %1\n" \ -+ append \ ++ cas_prepend \ ++ " amocas" cas_sfx " %0, %z2, %1\n" \ ++ cas_append \ + : "+&r" (r), "+A" (*(p)) \ + : "rJ" (n) \ : "memory"); \ @@ -34148,13 +35517,13 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - "0: lr.d %0, %2\n" \ - " bne %0, %z3, 1f\n" \ - " sc.d %1, %z4, %2\n" \ -+ prepend \ ++ sc_prepend \ + "0: lr" lr_sfx " %0, %2\n" \ + " bne %0, %z3, 1f\n" \ -+ " sc" sc_cas_sfx " %1, %z4, %2\n" \ ++ " sc" sc_sfx " %1, %z4, %2\n" \ " bnez %1, 0b\n" \ - RISCV_ACQUIRE_BARRIER \ -+ append \ ++ sc_append \ "1:\n" \ - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ @@ -34177,7 +35546,9 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 -}) - -#define __cmpxchg_release(ptr, old, new, size) \ -+#define _arch_cmpxchg(ptr, old, new, sc_cas_sfx, prepend, append) \ ++#define _arch_cmpxchg(ptr, old, new, sc_sfx, cas_sfx, \ ++ sc_prepend, sc_append, \ ++ cas_prepend, cas_append) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ - __typeof__(*(ptr)) __old = (old); \ @@ -34202,9 +35573,10 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 + \ + switch (sizeof(*__ptr)) { \ + case 1: \ -+ __arch_cmpxchg_masked(sc_cas_sfx, ".b" sc_cas_sfx, \ -+ prepend, append, \ -+ __ret, __ptr, __old, __new); \ ++ __arch_cmpxchg_masked(sc_sfx, ".b" cas_sfx, \ ++ sc_prepend, sc_append, \ ++ cas_prepend, cas_append, \ ++ __ret, __ptr, __old, __new); \ break; \ - case 8: \ - __asm__ __volatile__ ( \ @@ -34218,9 +35590,10 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ + case 2: \ -+ __arch_cmpxchg_masked(sc_cas_sfx, ".h" sc_cas_sfx, \ -+ prepend, append, \ -+ __ret, __ptr, __old, __new); \ ++ __arch_cmpxchg_masked(sc_sfx, ".h" cas_sfx, \ ++ sc_prepend, sc_append, \ ++ cas_prepend, cas_append, \ ++ __ret, __ptr, __old, __new); \ break; \ - default: \ - BUILD_BUG(); \ @@ -34255,8 +35628,10 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" ((long)__old), "rJ" (__new) \ - : "memory"); \ -+ __arch_cmpxchg(".w", ".w" sc_cas_sfx, prepend, append, \ -+ __ret, __ptr, (long), __old, __new); \ ++ __arch_cmpxchg(".w", ".w" sc_sfx, ".w" cas_sfx, \ ++ sc_prepend, sc_append, \ ++ cas_prepend, cas_append, \ ++ __ret, __ptr, (long), __old, __new); \ break; \ case 8: \ - __asm__ __volatile__ ( \ @@ -34269,8 +35644,10 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ - : "rJ" (__old), "rJ" (__new) \ - : "memory"); \ -+ __arch_cmpxchg(".d", ".d" sc_cas_sfx, prepend, append, \ -+ __ret, __ptr, /**/, __old, __new); \ ++ __arch_cmpxchg(".d", ".d" sc_sfx, ".d" cas_sfx, \ ++ sc_prepend, sc_append, \ ++ cas_prepend, cas_append, \ ++ __ret, __ptr, /**/, __old, __new); \ break; \ default: \ BUILD_BUG(); \ @@ -34279,14 +35656,34 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 + (__typeof__(*(__ptr)))__ret; \ }) ++/* ++ * These macros are here to improve the readability of the arch_cmpxchg_XXX() ++ * macros. ++ */ ++#define SC_SFX(x) x ++#define CAS_SFX(x) x ++#define SC_PREPEND(x) x ++#define SC_APPEND(x) x ++#define CAS_PREPEND(x) x ++#define CAS_APPEND(x) x ++ +#define arch_cmpxchg_relaxed(ptr, o, n) \ -+ _arch_cmpxchg((ptr), (o), (n), "", "", "") ++ _arch_cmpxchg((ptr), (o), (n), \ ++ SC_SFX(""), CAS_SFX(""), \ ++ SC_PREPEND(""), SC_APPEND(""), \ ++ CAS_PREPEND(""), CAS_APPEND("")) + +#define arch_cmpxchg_acquire(ptr, o, n) \ -+ _arch_cmpxchg((ptr), (o), (n), "", "", RISCV_ACQUIRE_BARRIER) ++ _arch_cmpxchg((ptr), (o), (n), \ ++ SC_SFX(""), CAS_SFX(""), \ ++ SC_PREPEND(""), SC_APPEND(RISCV_ACQUIRE_BARRIER), \ ++ CAS_PREPEND(""), CAS_APPEND(RISCV_ACQUIRE_BARRIER)) + +#define arch_cmpxchg_release(ptr, o, n) \ -+ _arch_cmpxchg((ptr), (o), (n), "", RISCV_RELEASE_BARRIER, "") ++ _arch_cmpxchg((ptr), (o), (n), \ ++ SC_SFX(""), CAS_SFX(""), \ ++ SC_PREPEND(RISCV_RELEASE_BARRIER), SC_APPEND(""), \ ++ CAS_PREPEND(RISCV_RELEASE_BARRIER), CAS_APPEND("")) + #define arch_cmpxchg(ptr, o, n) \ -({ \ @@ -34295,7 +35692,10 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 - (__typeof__(*(ptr))) __cmpxchg((ptr), \ - _o_, _n_, sizeof(*(ptr))); \ -}) -+ _arch_cmpxchg((ptr), (o), (n), ".rl", "", " fence rw, rw\n") ++ _arch_cmpxchg((ptr), (o), (n), \ ++ SC_SFX(".rl"), CAS_SFX(".aqrl"), \ ++ SC_PREPEND(""), SC_APPEND(RISCV_FULL_BARRIER), \ ++ CAS_PREPEND(""), CAS_APPEND("")) #define arch_cmpxchg_local(ptr, o, n) \ - (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) @@ -34303,7 +35703,7 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 #define arch_cmpxchg64(ptr, o, n) \ ({ \ -@@ -360,4 +242,82 @@ +@@ -360,4 +297,120 @@ arch_cmpxchg_relaxed((ptr), (o), (n)); \ }) @@ -34325,6 +35725,44 @@ index 2f4726d3cfcc..1f4cd12e4664 100644 + arch_cmpxchg_release((ptr), (o), (n)); \ +}) + ++#if defined(CONFIG_64BIT) && defined(CONFIG_RISCV_ISA_ZACAS) ++ ++#define system_has_cmpxchg128() riscv_has_extension_unlikely(RISCV_ISA_EXT_ZACAS) ++ ++union __u128_halves { ++ u128 full; ++ struct { ++ u64 low, high; ++ }; ++}; ++ ++#define __arch_cmpxchg128(p, o, n, cas_sfx) \ ++({ \ ++ __typeof__(*(p)) __o = (o); \ ++ union __u128_halves __hn = { .full = (n) }; \ ++ union __u128_halves __ho = { .full = (__o) }; \ ++ register unsigned long t1 asm ("t1") = __hn.low; \ ++ register unsigned long t2 asm ("t2") = __hn.high; \ ++ register unsigned long t3 asm ("t3") = __ho.low; \ ++ register unsigned long t4 asm ("t4") = __ho.high; \ ++ \ ++ __asm__ __volatile__ ( \ ++ " amocas.q" cas_sfx " %0, %z3, %2" \ ++ : "+&r" (t3), "+&r" (t4), "+A" (*(p)) \ ++ : "rJ" (t1), "rJ" (t2) \ ++ : "memory"); \ ++ \ ++ ((u128)t4 << 64) | t3; \ ++}) ++ ++#define arch_cmpxchg128(ptr, o, n) \ ++ __arch_cmpxchg128((ptr), (o), (n), ".aqrl") ++ ++#define arch_cmpxchg128_local(ptr, o, n) \ ++ __arch_cmpxchg128((ptr), (o), (n), "") ++ ++#endif /* CONFIG_64BIT && CONFIG_RISCV_ISA_ZACAS */ ++ +#ifdef CONFIG_RISCV_ISA_ZAWRS +/* + * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to @@ -34558,7 +35996,7 @@ index d0345bd659c9..c8346dc0bed8 100644 #endif diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h -index 777cb8299551..1fd1bc2f220b 100644 +index 777cb8299551..2ac270ad4acd 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -194,6 +194,7 @@ @@ -34569,15 +36007,46 @@ index 777cb8299551..1fd1bc2f220b 100644 #define ENVCFG_CBZE (_AC(1, UL) << 7) #define ENVCFG_CBCFE (_AC(1, UL) << 6) #define ENVCFG_CBIE_SHIFT 4 -@@ -275,6 +276,7 @@ +@@ -203,6 +204,18 @@ + #define ENVCFG_CBIE_INV _AC(0x3, UL) + #define ENVCFG_FIOM _AC(0x1, UL) + ++/* Smstateen bits */ ++#define SMSTATEEN0_AIA_IMSIC_SHIFT 58 ++#define SMSTATEEN0_AIA_IMSIC (_ULL(1) << SMSTATEEN0_AIA_IMSIC_SHIFT) ++#define SMSTATEEN0_AIA_SHIFT 59 ++#define SMSTATEEN0_AIA (_ULL(1) << SMSTATEEN0_AIA_SHIFT) ++#define SMSTATEEN0_AIA_ISEL_SHIFT 60 ++#define SMSTATEEN0_AIA_ISEL (_ULL(1) << SMSTATEEN0_AIA_ISEL_SHIFT) ++#define SMSTATEEN0_HSENVCFG_SHIFT 62 ++#define SMSTATEEN0_HSENVCFG (_ULL(1) << SMSTATEEN0_HSENVCFG_SHIFT) ++#define SMSTATEEN0_SSTATEEN0_SHIFT 63 ++#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT) ++ + /* symbolic CSR names: */ + #define CSR_CYCLE 0xc00 + #define CSR_TIME 0xc01 +@@ -275,6 +288,8 @@ #define CSR_SIE 0x104 #define CSR_STVEC 0x105 #define CSR_SCOUNTEREN 0x106 +#define CSR_SENVCFG 0x10a ++#define CSR_SSTATEEN0 0x10c #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 #define CSR_SCAUSE 0x142 -@@ -393,10 +395,20 @@ +@@ -349,6 +364,10 @@ + #define CSR_VSIEH 0x214 + #define CSR_VSIPH 0x254 + ++/* Hypervisor stateen CSRs */ ++#define CSR_HSTATEEN0 0x60c ++#define CSR_HSTATEEN0H 0x61c ++ + #define CSR_MSTATUS 0x300 + #define CSR_MISA 0x301 + #define CSR_MIDELEG 0x303 +@@ -393,10 +412,20 @@ #define CSR_VTYPE 0xc21 #define CSR_VLENB 0xc22 @@ -34598,7 +36067,7 @@ index 777cb8299551..1fd1bc2f220b 100644 # define CSR_SCRATCH CSR_MSCRATCH # define CSR_EPC CSR_MEPC # define CSR_CAUSE CSR_MCAUSE -@@ -421,6 +433,7 @@ +@@ -421,6 +450,7 @@ # define CSR_STATUS CSR_SSTATUS # define CSR_IE CSR_SIE # define CSR_TVEC CSR_STVEC @@ -34777,8 +36246,27 @@ index 2b443a3a487f..6bcd80325dfc 100644 #endif #endif /* _ASM_RISCV_FENCE_H */ +diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h +index 0a55099bb734..fa3a0ec0c55c 100644 +--- a/arch/riscv/include/asm/fixmap.h ++++ b/arch/riscv/include/asm/fixmap.h +@@ -38,6 +38,14 @@ enum fixed_addresses { + FIX_TEXT_POKE0, + FIX_EARLYCON_MEM_BASE, + ++#ifdef CONFIG_ACPI_APEI_GHES ++ /* Used for GHES mapping from assorted contexts */ ++ FIX_APEI_GHES_IRQ, ++#ifdef CONFIG_RISCV_SSE ++ FIX_APEI_GHES_SSE_LOW_PRIORITY, ++ FIX_APEI_GHES_SSE_HIGH_PRIORITY, ++#endif /* CONFIG_RISCV_SSE */ ++#endif /* CONFIG_ACPI_APEI_GHES */ + __end_of_permanent_fixed_addresses, + /* + * Temporary boot-time mappings, used by early_ioremap(), diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h -index f4157034efa9..869da082252a 100644 +index f4157034efa9..58524c64aef5 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -8,25 +8,16 @@ @@ -34807,7 +36295,7 @@ index f4157034efa9..869da082252a 100644 #define RISCV_ISA_EXT_v ('v' - 'a') /* -@@ -58,85 +49,69 @@ +@@ -58,85 +49,75 @@ #define RISCV_ISA_EXT_ZICSR 40 #define RISCV_ISA_EXT_ZIFENCEI 41 #define RISCV_ISA_EXT_ZIHPM 42 @@ -34864,6 +36352,12 @@ index f4157034efa9..869da082252a 100644 +#define RISCV_ISA_EXT_ZICCRSE 91 +#define RISCV_ISA_EXT_SVADE 92 +#define RISCV_ISA_EXT_SVADU 93 ++#define RISCV_ISA_EXT_ZFBFMIN 94 ++#define RISCV_ISA_EXT_ZVFBFMIN 95 ++#define RISCV_ISA_EXT_ZVFBFWMA 96 ++#define RISCV_ISA_EXT_ZAAMO 97 ++#define RISCV_ISA_EXT_ZALRSC 98 ++#define RISCV_ISA_EXT_ZICBOP 99 + +#define RISCV_ISA_EXT_XLINUXENVCFG 127 + @@ -34952,7 +36446,7 @@ index f4157034efa9..869da082252a 100644 #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h -index 7cad513538d8..ef01c182af2b 100644 +index 7cad513538d8..ffb9484531af 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,11 +8,35 @@ @@ -34960,7 +36454,7 @@ index 7cad513538d8..ef01c182af2b 100644 #include -#define RISCV_HWPROBE_MAX_KEY 5 -+#define RISCV_HWPROBE_MAX_KEY 8 ++#define RISCV_HWPROBE_MAX_KEY 9 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { @@ -34992,24 +36486,179 @@ index 7cad513538d8..ef01c182af2b 100644 +} + #endif +diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h +index e0b319af3681..8927a6ea1127 100644 +--- a/arch/riscv/include/asm/image.h ++++ b/arch/riscv/include/asm/image.h +@@ -30,6 +30,8 @@ + RISCV_HEADER_VERSION_MINOR) + + #ifndef __ASSEMBLY__ ++#define riscv_image_flag_field(flags, field)\ ++ (((flags) >> field##_SHIFT) & field##_MASK) + /** + * struct riscv_image_header - riscv kernel image header + * @code0: Executable code diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h -index 6960beb75f32..cbd51bfdf527 100644 +index 6960beb75f32..a8a2a861b202 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h -@@ -196,4 +196,8 @@ +@@ -18,6 +18,13 @@ + #define INSN_I_RD_SHIFT 7 + #define INSN_I_OPCODE_SHIFT 0 + ++#define INSN_S_SIMM7_SHIFT 25 ++#define INSN_S_RS2_SHIFT 20 ++#define INSN_S_RS1_SHIFT 15 ++#define INSN_S_FUNC3_SHIFT 12 ++#define INSN_S_SIMM5_SHIFT 7 ++#define INSN_S_OPCODE_SHIFT 0 ++ + #ifdef __ASSEMBLY__ + + #ifdef CONFIG_AS_HAS_INSN +@@ -30,6 +37,10 @@ + .insn i \opcode, \func3, \rd, \rs1, \simm12 + .endm + ++ .macro insn_s, opcode, func3, rs2, simm12, rs1 ++ .insn s \opcode, \func3, \rs2, \simm12(\rs1) ++ .endm ++ + #else + + #include +@@ -51,10 +62,20 @@ + (\simm12 << INSN_I_SIMM12_SHIFT)) + .endm + ++ .macro insn_s, opcode, func3, rs2, simm12, rs1 ++ .4byte ((\opcode << INSN_S_OPCODE_SHIFT) | \ ++ (\func3 << INSN_S_FUNC3_SHIFT) | \ ++ (.L__gpr_num_\rs2 << INSN_S_RS2_SHIFT) | \ ++ (.L__gpr_num_\rs1 << INSN_S_RS1_SHIFT) | \ ++ ((\simm12 & 0x1f) << INSN_S_SIMM5_SHIFT) | \ ++ (((\simm12 >> 5) & 0x7f) << INSN_S_SIMM7_SHIFT)) ++ .endm ++ + #endif + + #define __INSN_R(...) insn_r __VA_ARGS__ + #define __INSN_I(...) insn_i __VA_ARGS__ ++#define __INSN_S(...) insn_s __VA_ARGS__ + + #else /* ! __ASSEMBLY__ */ + +@@ -66,6 +87,9 @@ + #define __INSN_I(opcode, func3, rd, rs1, simm12) \ + ".insn i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" + ++#define __INSN_S(opcode, func3, rs2, simm12, rs1) \ ++ ".insn s " opcode ", " func3 ", " rs2 ", " simm12 "(" rs1 ")\n" ++ + #else + + #include +@@ -92,12 +116,26 @@ + " (\\simm12 << " __stringify(INSN_I_SIMM12_SHIFT) "))\n" \ + " .endm\n" + ++#define DEFINE_INSN_S \ ++ __DEFINE_ASM_GPR_NUMS \ ++" .macro insn_s, opcode, func3, rs2, simm12, rs1\n" \ ++" .4byte ((\\opcode << " __stringify(INSN_S_OPCODE_SHIFT) ") |" \ ++" (\\func3 << " __stringify(INSN_S_FUNC3_SHIFT) ") |" \ ++" (.L__gpr_num_\\rs2 << " __stringify(INSN_S_RS2_SHIFT) ") |" \ ++" (.L__gpr_num_\\rs1 << " __stringify(INSN_S_RS1_SHIFT) ") |" \ ++" ((\\simm12 & 0x1f) << " __stringify(INSN_S_SIMM5_SHIFT) ") |" \ ++" (((\\simm12 >> 5) & 0x7f) << " __stringify(INSN_S_SIMM7_SHIFT) "))\n" \ ++" .endm\n" ++ + #define UNDEFINE_INSN_R \ + " .purgem insn_r\n" + + #define UNDEFINE_INSN_I \ + " .purgem insn_i\n" + ++#define UNDEFINE_INSN_S \ ++" .purgem insn_s\n" ++ + #define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + DEFINE_INSN_R \ + "insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \ +@@ -108,6 +146,11 @@ + "insn_i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" \ + UNDEFINE_INSN_I + ++#define __INSN_S(opcode, func3, rs2, simm12, rs1) \ ++ DEFINE_INSN_S \ ++ "insn_s " opcode ", " func3 ", " rs2 ", " simm12 ", " rs1 "\n" \ ++ UNDEFINE_INSN_S ++ + #endif + + #endif /* ! __ASSEMBLY__ */ +@@ -120,6 +163,10 @@ + __INSN_I(RV_##opcode, RV_##func3, RV_##rd, \ + RV_##rs1, RV_##simm12) + ++#define INSN_S(opcode, func3, rs2, simm12, rs1) \ ++ __INSN_S(RV_##opcode, RV_##func3, RV_##rs2, \ ++ RV_##simm12, RV_##rs1) ++ + #define RV_OPCODE(v) __ASM_STR(v) + #define RV_FUNC3(v) __ASM_STR(v) + #define RV_FUNC7(v) __ASM_STR(v) +@@ -133,6 +180,7 @@ + #define RV___RS2(v) __RV_REG(v) + + #define RV_OPCODE_MISC_MEM RV_OPCODE(15) ++#define RV_OPCODE_OP_IMM RV_OPCODE(19) + #define RV_OPCODE_SYSTEM RV_OPCODE(115) + + #define HFENCE_VVMA(vaddr, asid) \ +@@ -196,4 +244,26 @@ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(4)) ++#define PREFETCH_I(base, offset) \ ++ INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(0), \ ++ SIMM12((offset) & 0xfe0), RS1(base)) ++ ++#define PREFETCH_R(base, offset) \ ++ INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(1), \ ++ SIMM12((offset) & 0xfe0), RS1(base)) ++ ++#define PREFETCH_W(base, offset) \ ++ INSN_S(OPCODE_OP_IMM, FUNC3(6), __RS2(3), \ ++ SIMM12((offset) & 0xfe0), RS1(base)) ++ +#define RISCV_PAUSE ".4byte 0x100000f" +#define ZAWRS_WRS_NTO ".4byte 0x00d00073" +#define ZAWRS_WRS_STO ".4byte 0x01d00073" ++ ++#ifndef __ASSEMBLY__ ++#define nop() __asm__ __volatile__ ("nop") ++#define __nops(n) ".rept " #n "\nnop\n.endr\n" ++#define nops(n) __asm__ __volatile__ (__nops(n)) ++#endif + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h -index 42497d487a17..8118363494e0 100644 +index 42497d487a17..75a98e6219a3 100644 --- a/arch/riscv/include/asm/io.h +++ b/arch/riscv/include/asm/io.h -@@ -47,10 +47,10 @@ +@@ -30,6 +30,9 @@ + #define PCI_IOBASE ((void __iomem *)PCI_IO_START) + #endif /* CONFIG_MMU */ + ++#define ioremap_cache(addr, size) \ ++ ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) ++ + /* + * Emulation routines for the port-mapped IO space used by some PCI drivers. + * These are defined as being "fully synchronous", but also "not guaranteed to +@@ -47,10 +50,10 @@ * sufficient to ensure this works sanely on controllers that support I/O * writes. */ @@ -35024,7 +36673,7 @@ index 42497d487a17..8118363494e0 100644 /* * Accesses from a single hart to a single I/O address must be ordered. This -@@ -140,4 +140,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) +@@ -140,4 +143,8 @@ __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) #endif @@ -35034,10 +36683,10 @@ index 42497d487a17..8118363494e0 100644 + #endif /* _ASM_RISCV_IO_H */ diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h -index 8e10a94430a2..dba0359f029e 100644 +index 8e10a94430a2..8f355794b2ad 100644 --- a/arch/riscv/include/asm/irq.h +++ b/arch/riscv/include/asm/irq.h -@@ -12,8 +12,68 @@ +@@ -12,8 +12,77 @@ #include @@ -35052,6 +36701,15 @@ index 8e10a94430a2..dba0359f029e 100644 struct fwnode_handle *riscv_get_intc_hwnode(void); ++struct riscv_iommu_ir_vcpu_info { ++ u64 gpa; ++ u64 hpa; ++ u64 msi_addr_mask; ++ u64 msi_addr_pattern; ++ u32 group_index_bits; ++ u32 group_index_shift; ++}; ++ +#ifdef CONFIG_ACPI + +enum riscv_irqchip_type { @@ -35106,6 +36764,30 @@ index 8e10a94430a2..dba0359f029e 100644 +#endif /* CONFIG_ACPI */ + #endif /* _ASM_RISCV_IRQ_H */ +diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h +index 2b56769cb530..b9ee8346cc8c 100644 +--- a/arch/riscv/include/asm/kexec.h ++++ b/arch/riscv/include/asm/kexec.h +@@ -56,6 +56,7 @@ extern riscv_kexec_method riscv_kexec_norelocate; + + #ifdef CONFIG_KEXEC_FILE + extern const struct kexec_file_ops elf_kexec_ops; ++extern const struct kexec_file_ops image_kexec_ops; + + struct purgatory_info; + int arch_kexec_apply_relocations_add(struct purgatory_info *pi, +@@ -67,6 +68,11 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + struct kimage; + int arch_kimage_file_post_load_cleanup(struct kimage *image); + #define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup ++ ++int load_extra_segments(struct kimage *image, unsigned long kernel_start, ++ unsigned long kernel_len, char *initrd, ++ unsigned long initrd_len, char *cmdline, ++ unsigned long cmdline_len); + #endif + + #endif diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h deleted file mode 100644 index 6dd1a4809ec1..000000000000 @@ -35215,7 +36897,7 @@ index da5881d2bde0..000000000000 - -#endif diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h -index 459e61ad7d2b..a2a9dff0c07f 100644 +index 459e61ad7d2b..b0276feec9e8 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -41,6 +41,7 @@ @@ -35226,11 +36908,69 @@ index 459e61ad7d2b..a2a9dff0c07f 100644 enum kvm_riscv_hfence_type { KVM_RISCV_HFENCE_UNKNOWN = 0, -@@ -245,6 +246,12 @@ struct kvm_vcpu_arch { +@@ -68,12 +69,18 @@ struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 ecall_exit_stat; + u64 wfi_exit_stat; ++ u64 wrs_exit_stat; + u64 mmio_exit_user; + u64 mmio_exit_kernel; + u64 csr_exit_user; + u64 csr_exit_kernel; + u64 signal_exits; + u64 exits; ++ u64 instr_illegal_exits; ++ u64 load_misaligned_exits; ++ u64 store_misaligned_exits; ++ u64 load_access_exits; ++ u64 store_access_exits; + }; + + struct kvm_arch_memory_slot { +@@ -162,6 +169,16 @@ struct kvm_vcpu_csr { + unsigned long hvip; + unsigned long vsatp; + unsigned long scounteren; ++ unsigned long senvcfg; ++}; ++ ++struct kvm_vcpu_config { ++ u64 henvcfg; ++ u64 hstateen0; ++}; ++ ++struct kvm_vcpu_smstateen_csr { ++ unsigned long sstateen0; + }; + + struct kvm_vcpu_arch { +@@ -183,6 +200,8 @@ struct kvm_vcpu_arch { + unsigned long host_sscratch; + unsigned long host_stvec; + unsigned long host_scounteren; ++ unsigned long host_senvcfg; ++ unsigned long host_sstateen0; + + /* CPU context of Host */ + struct kvm_cpu_context host_context; +@@ -193,6 +212,9 @@ struct kvm_vcpu_arch { + /* CPU CSR context of Guest VCPU */ + struct kvm_vcpu_csr guest_csr; + ++ /* CPU Smstateen CSR context of Guest VCPU */ ++ struct kvm_vcpu_smstateen_csr smstateen_csr; ++ + /* CPU context upon Guest VCPU reset */ + struct kvm_cpu_context guest_reset_context; + +@@ -245,6 +267,15 @@ struct kvm_vcpu_arch { /* Performance monitoring context */ struct kvm_pmu pmu_context; + ++ /* 'static' configurations which are set only once */ ++ struct kvm_vcpu_config cfg; ++ + /* SBI steal-time accounting */ + struct { + gpa_t shmem; @@ -35239,7 +36979,7 @@ index 459e61ad7d2b..a2a9dff0c07f 100644 }; static inline void kvm_arch_sync_events(struct kvm *kvm) {} -@@ -356,6 +363,8 @@ void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); +@@ -356,6 +387,8 @@ void __kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); @@ -35528,7 +37268,7 @@ index 3272ca7a5270..b99bd66107a6 100644 static inline u64 riscv_page_mtmask(void) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h -index e58315cedfd3..e93155c2c200 100644 +index e58315cedfd3..980e435e1451 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -117,6 +117,7 @@ @@ -35549,7 +37289,65 @@ index e58315cedfd3..e93155c2c200 100644 #define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) extern pgd_t swapper_pg_dir[]; -@@ -620,6 +622,17 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) +@@ -478,6 +480,9 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) + { ++ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) ++ : : : : svvptc); ++ + /* + * The kernel assumes that TLBs don't cache invalid entries, but + * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a +@@ -487,6 +492,13 @@ static inline void update_mmu_cache_range(struct vm_fault *vmf, + */ + while (nr--) + local_flush_tlb_page(address + nr * PAGE_SIZE); ++ ++svvptc:; ++ /* ++ * Svvptc guarantees that the new valid pte will be visible within ++ * a bounded timeframe, so when the uarch does not cache invalid ++ * entries, we don't have to do anything. ++ */ + } + #define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) +@@ -518,12 +530,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) + WRITE_ONCE(*ptep, pteval); + } + +-void flush_icache_pte(pte_t pte); ++void flush_icache_pte(struct mm_struct *mm, pte_t pte); + +-static inline void __set_pte_at(pte_t *ptep, pte_t pteval) ++static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval) + { + if (pte_present(pteval) && pte_exec(pteval)) +- flush_icache_pte(pteval); ++ flush_icache_pte(mm, pteval); + + set_pte(ptep, pteval); + } +@@ -536,7 +548,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + page_table_check_ptes_set(mm, ptep, pteval, nr); + + for (;;) { +- __set_pte_at(ptep, pteval); ++ __set_pte_at(mm, ptep, pteval); + if (--nr == 0) + break; + ptep++; +@@ -548,7 +560,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) + { +- __set_pte_at(ptep, __pte(0)); ++ __set_pte_at(mm, ptep, __pte(0)); + } + + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ +@@ -620,6 +632,17 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } @@ -35567,7 +37365,7 @@ index e58315cedfd3..e93155c2c200 100644 /* * THP functions */ -@@ -663,6 +676,12 @@ static inline int pmd_write(pmd_t pmd) +@@ -663,6 +686,12 @@ static inline int pmd_write(pmd_t pmd) return pte_write(pmd_pte(pmd)); } @@ -35580,11 +37378,38 @@ index e58315cedfd3..e93155c2c200 100644 static inline int pmd_dirty(pmd_t pmd) { return pte_dirty(pmd_pte(pmd)); +@@ -713,14 +742,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) + { + page_table_check_pmd_set(mm, pmdp, pmd); +- return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd)); ++ return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd)); + } + + static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud) + { + page_table_check_pud_set(mm, pudp, pud); +- return __set_pte_at((pte_t *)pudp, pud_pte(pud)); ++ return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud)); + } + + #ifdef CONFIG_PAGE_TABLE_CHECK diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h -index 4f6af8c6cfa0..eb458e4cb93e 100644 +index 4f6af8c6cfa0..3811688a68b5 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h -@@ -57,6 +57,12 @@ +@@ -12,6 +12,9 @@ + #include + + #include ++#include ++#include ++#include + + #ifdef CONFIG_64BIT + #define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1)) +@@ -57,6 +60,12 @@ #define STACK_TOP DEFAULT_MAP_WINDOW @@ -35597,7 +37422,7 @@ index 4f6af8c6cfa0..eb458e4cb93e 100644 /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. -@@ -72,6 +78,43 @@ +@@ -72,6 +81,43 @@ struct task_struct; struct pt_regs; @@ -35641,11 +37466,12 @@ index 4f6af8c6cfa0..eb458e4cb93e 100644 /* CPU-specific state of a task */ struct thread_struct { /* Callee-saved registers */ -@@ -80,8 +123,10 @@ struct thread_struct { +@@ -80,8 +126,11 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; - unsigned long vstate_ctrl; ++ unsigned long envcfg; + u32 riscv_v_flags; + u32 vstate_ctrl; struct __riscv_v_ext_state vstate; @@ -35653,11 +37479,39 @@ index 4f6af8c6cfa0..eb458e4cb93e 100644 }; /* Whitelist the fstate from the task_struct for hardened usercopy */ +@@ -103,6 +152,27 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, + #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc) + #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) + ++#define PREFETCH_ASM(x) \ ++ ALTERNATIVE(__nops(1), PREFETCH_R(x, 0), 0, \ ++ RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP) ++ ++#define PREFETCHW_ASM(x) \ ++ ALTERNATIVE(__nops(1), PREFETCH_W(x, 0), 0, \ ++ RISCV_ISA_EXT_ZICBOP, CONFIG_RISCV_ISA_ZICBOP) ++ ++#ifdef CONFIG_RISCV_ISA_ZICBOP ++#define ARCH_HAS_PREFETCH ++static inline void prefetch(const void *x) ++{ ++ __asm__ __volatile__(PREFETCH_ASM(%0) : : "r" (x) : "memory"); ++} ++ ++#define ARCH_HAS_PREFETCHW ++static inline void prefetchw(const void *x) ++{ ++ __asm__ __volatile__(PREFETCHW_ASM(%0) : : "r" (x) : "memory"); ++} ++#endif /* CONFIG_RISCV_ISA_ZICBOP */ + + /* Do necessary setup to start up a newly executed thread. */ + extern void start_thread(struct pt_regs *regs, diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h -index 3ed853b8a8c8..d9a7fd7eb8d6 100644 +index 3ed853b8a8c8..5b57946990bd 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h -@@ -29,7 +29,10 @@ enum sbi_ext_id { +@@ -29,7 +29,11 @@ enum sbi_ext_id { SBI_EXT_RFENCE = 0x52464E43, SBI_EXT_HSM = 0x48534D, SBI_EXT_SRST = 0x53525354, @@ -35665,10 +37519,11 @@ index 3ed853b8a8c8..d9a7fd7eb8d6 100644 SBI_EXT_PMU = 0x504D55, + SBI_EXT_DBCN = 0x4442434E, + SBI_EXT_STA = 0x535441, ++ SBI_EXT_SSE = 0x535345, /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START = 0x08000000, -@@ -85,6 +88,21 @@ enum sbi_hsm_hart_state { +@@ -85,6 +89,21 @@ enum sbi_hsm_hart_state { SBI_HSM_STATE_RESUME_PENDING, }; @@ -35690,7 +37545,7 @@ index 3ed853b8a8c8..d9a7fd7eb8d6 100644 #define SBI_HSM_SUSP_BASE_MASK 0x7fffffff #define SBI_HSM_SUSP_NON_RET_BIT 0x80000000 #define SBI_HSM_SUSP_PLAT_BASE 0x10000000 -@@ -113,6 +131,14 @@ enum sbi_srst_reset_reason { +@@ -113,6 +132,14 @@ enum sbi_srst_reset_reason { SBI_SRST_RESET_REASON_SYS_FAILURE, }; @@ -35705,7 +37560,7 @@ index 3ed853b8a8c8..d9a7fd7eb8d6 100644 enum sbi_ext_pmu_fid { SBI_EXT_PMU_NUM_COUNTERS = 0, SBI_EXT_PMU_COUNTER_GET_INFO, -@@ -236,6 +262,12 @@ enum sbi_pmu_ctr_type { +@@ -236,6 +263,72 @@ enum sbi_pmu_ctr_type { /* Flags defined for counter stop function */ #define SBI_PMU_STOP_FLAG_RESET (1 << 0) @@ -35714,11 +37569,71 @@ index 3ed853b8a8c8..d9a7fd7eb8d6 100644 + SBI_EXT_DBCN_CONSOLE_READ = 1, + SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2, +}; ++ ++enum sbi_ext_sse_fid { ++ SBI_SSE_EVENT_ATTR_READ = 0, ++ SBI_SSE_EVENT_ATTR_WRITE, ++ SBI_SSE_EVENT_REGISTER, ++ SBI_SSE_EVENT_UNREGISTER, ++ SBI_SSE_EVENT_ENABLE, ++ SBI_SSE_EVENT_DISABLE, ++ SBI_SSE_EVENT_COMPLETE, ++ SBI_SSE_EVENT_SIGNAL, ++ SBI_SSE_EVENT_HART_UNMASK, ++ SBI_SSE_EVENT_HART_MASK, ++}; ++ ++enum sbi_sse_state { ++ SBI_SSE_STATE_UNUSED = 0, ++ SBI_SSE_STATE_REGISTERED = 1, ++ SBI_SSE_STATE_ENABLED = 2, ++ SBI_SSE_STATE_RUNNING = 3, ++}; ++ ++/* SBI SSE Event Attributes. */ ++enum sbi_sse_attr_id { ++ SBI_SSE_ATTR_STATUS = 0x00000000, ++ SBI_SSE_ATTR_PRIO = 0x00000001, ++ SBI_SSE_ATTR_CONFIG = 0x00000002, ++ SBI_SSE_ATTR_PREFERRED_HART = 0x00000003, ++ SBI_SSE_ATTR_ENTRY_PC = 0x00000004, ++ SBI_SSE_ATTR_ENTRY_ARG = 0x00000005, ++ SBI_SSE_ATTR_INTERRUPTED_SEPC = 0x00000006, ++ SBI_SSE_ATTR_INTERRUPTED_FLAGS = 0x00000007, ++ SBI_SSE_ATTR_INTERRUPTED_A6 = 0x00000008, ++ SBI_SSE_ATTR_INTERRUPTED_A7 = 0x00000009, ++ ++ SBI_SSE_ATTR_MAX = 0x0000000A ++}; ++ ++#define SBI_SSE_ATTR_STATUS_STATE_OFFSET 0 ++#define SBI_SSE_ATTR_STATUS_STATE_MASK 0x3 ++#define SBI_SSE_ATTR_STATUS_PENDING_OFFSET 2 ++#define SBI_SSE_ATTR_STATUS_INJECT_OFFSET 3 ++ ++#define SBI_SSE_ATTR_CONFIG_ONESHOT BIT(0) ++ ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPP BIT(0) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_SSTATUS_SPIE BIT(1) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPV BIT(2) ++#define SBI_SSE_ATTR_INTERRUPTED_FLAGS_HSTATUS_SPVP BIT(3) ++ ++#define SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS 0x00000000 ++#define SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP 0x00000001 ++#define SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS 0x00008000 ++#define SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW 0x00010000 ++#define SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS 0x00100000 ++#define SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS 0x00108000 ++#define SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED 0xffff0000 ++#define SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED 0xffff8000 ++ ++#define SBI_SSE_EVENT_PLATFORM BIT(14) ++#define SBI_SSE_EVENT_GLOBAL BIT(15) + #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f -@@ -264,8 +296,13 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, +@@ -264,8 +357,13 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg3, unsigned long arg4, unsigned long arg5); @@ -35732,7 +37647,7 @@ index 3ed853b8a8c8..d9a7fd7eb8d6 100644 long sbi_get_mvendorid(void); long sbi_get_marchid(void); long sbi_get_mimpid(void); -@@ -322,6 +359,11 @@ static inline unsigned long sbi_mk_version(unsigned long major, +@@ -322,6 +420,11 @@ static inline unsigned long sbi_mk_version(unsigned long major, } int sbi_err_map_linux_errno(int err); @@ -35814,19 +37729,112 @@ index 000000000000..54efbf523d49 +#endif /* ! CONFIG_RISCV_ISA_V */ + +#endif -diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h -index 2f901a410586..87ab782be702 100644 ---- a/arch/riscv/include/asm/sparsemem.h -+++ b/arch/riscv/include/asm/sparsemem.h -@@ -5,7 +5,7 @@ - - #ifdef CONFIG_SPARSEMEM - #ifdef CONFIG_64BIT --#define MAX_PHYSMEM_BITS 56 -+#define MAX_PHYSMEM_BITS 44 - #else - #define MAX_PHYSMEM_BITS 32 - #endif /* CONFIG_64BIT */ +diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h +new file mode 100644 +index 000000000000..e5121b89acea +--- /dev/null ++++ b/arch/riscv/include/asm/spinlock.h +@@ -0,0 +1,47 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++#ifndef __ASM_RISCV_SPINLOCK_H ++#define __ASM_RISCV_SPINLOCK_H ++ ++#ifdef CONFIG_RISCV_COMBO_SPINLOCKS ++#define _Q_PENDING_LOOPS (1 << 9) ++ ++#define __no_arch_spinlock_redefine ++#include ++#include ++#include ++ ++/* ++ * TODO: Use an alternative instead of a static key when we are able to parse ++ * the extensions string earlier in the boot process. ++ */ ++DECLARE_STATIC_KEY_TRUE(qspinlock_key); ++ ++#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \ ++static __always_inline type arch_spin_##op(type_lock lock) \ ++{ \ ++ if (static_branch_unlikely(&qspinlock_key)) \ ++ return queued_spin_##op(lock); \ ++ return ticket_spin_##op(lock); \ ++} ++ ++SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *) ++SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *) ++SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *) ++SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *) ++SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *) ++SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t) ++ ++#elif defined(CONFIG_RISCV_QUEUED_SPINLOCKS) ++ ++#include ++ ++#else ++ ++#include ++ ++#endif ++ ++#include ++ ++#endif /* __ASM_RISCV_SPINLOCK_H */ +diff --git a/arch/riscv/include/asm/sse.h b/arch/riscv/include/asm/sse.h +new file mode 100644 +index 000000000000..8929a268462c +--- /dev/null ++++ b/arch/riscv/include/asm/sse.h +@@ -0,0 +1,47 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2024 Rivos Inc. ++ */ ++#ifndef __ASM_SSE_H ++#define __ASM_SSE_H ++ ++#include ++ ++#ifdef CONFIG_RISCV_SSE ++ ++struct sse_event_interrupted_state { ++ unsigned long a6; ++ unsigned long a7; ++}; ++ ++struct sse_event_arch_data { ++ void *stack; ++ void *shadow_stack; ++ unsigned long tmp; ++ struct sse_event_interrupted_state interrupted; ++ unsigned long interrupted_phys; ++ u32 evt_id; ++ unsigned int hart_id; ++ unsigned int cpu_id; ++}; ++ ++static inline bool sse_event_is_global(u32 evt) ++{ ++ return !!(evt & SBI_SSE_EVENT_GLOBAL); ++} ++ ++void arch_sse_event_update_cpu(struct sse_event_arch_data *arch_evt, int cpu); ++int arch_sse_init_event(struct sse_event_arch_data *arch_evt, u32 evt_id, ++ int cpu); ++void arch_sse_free_event(struct sse_event_arch_data *arch_evt); ++int arch_sse_register_event(struct sse_event_arch_data *arch_evt); ++ ++void sse_handle_event(struct sse_event_arch_data *arch_evt, ++ struct pt_regs *regs); ++asmlinkage void handle_sse(void); ++asmlinkage void do_sse(struct sse_event_arch_data *arch_evt, ++ struct pt_regs *reg); ++ ++#endif ++ ++#endif diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h index 02f87867389a..4ffb022b097f 100644 --- a/arch/riscv/include/asm/suspend.h @@ -35849,7 +37857,7 @@ index 02f87867389a..4ffb022b097f 100644 +int riscv_sbi_hart_suspend(u32 state); #endif diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h -index a727be723c56..774a17b39c7d 100644 +index a727be723c56..6134e302686f 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -9,7 +9,7 @@ @@ -35893,6 +37901,42 @@ index a727be723c56..774a17b39c7d 100644 #else static __always_inline bool has_fpu(void) { return false; } #define fstate_save(task, regs) do { } while (0) +@@ -70,6 +84,26 @@ static __always_inline bool has_fpu(void) { return false; } + #define __switch_to_fpu(__prev, __next) do { } while (0) + #endif + ++static inline void __switch_to_envcfg(struct task_struct *next) ++{ ++ asm volatile (ALTERNATIVE("nop", "csrw " __stringify(CSR_ENVCFG) ", %0", ++ 0, RISCV_ISA_EXT_XLINUXENVCFG, 1) ++ :: "r" (next->thread.envcfg) : "memory"); ++} ++ ++#ifdef CONFIG_RISCV_SSE ++DECLARE_PER_CPU(struct task_struct *, __sse_entry_task); ++ ++static inline void __switch_sse_entry_task(struct task_struct *next) ++{ ++ __this_cpu_write(__sse_entry_task, next); ++} ++#else ++static inline void __switch_sse_entry_task(struct task_struct *next) ++{ ++} ++#endif ++ + extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +@@ -81,6 +115,8 @@ do { \ + __switch_to_fpu(__prev, __next); \ + if (has_vector()) \ + __switch_to_vector(__prev, __next); \ ++ __switch_to_envcfg(__next); \ ++ __switch_sse_entry_task(__next); \ + ((last) = __switch_to(__prev, __next)); \ + } while (0) + diff --git a/arch/riscv/include/asm/sync_core.h b/arch/riscv/include/asm/sync_core.h new file mode 100644 index 000000000000..9153016da8f1 @@ -35929,10 +37973,32 @@ index 000000000000..9153016da8f1 + +#endif /* _ASM_RISCV_SYNC_CORE_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h -index 8c72d1bcdf14..fb10cdf02b97 100644 +index 8c72d1bcdf14..59357bb8ffb3 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h -@@ -94,12 +94,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); +@@ -35,6 +35,7 @@ + #define OVERFLOW_STACK_SIZE SZ_4K + + #define IRQ_STACK_SIZE THREAD_SIZE ++#define SSE_STACK_SIZE THREAD_SIZE + + #ifndef __ASSEMBLY__ + +@@ -61,6 +62,13 @@ struct thread_info { + long user_sp; /* User stack pointer */ + int cpu; + unsigned long syscall_work; /* SYSCALL_WORK_ flags */ ++#ifdef CONFIG_64BIT ++ /* ++ * Used in handle_exception() to save a0, a1 and a2 before knowing if we ++ * can access the kernel stack. ++ */ ++ unsigned long a0, a1, a2; ++#endif + }; + + /* +@@ -94,12 +102,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ #define TIF_32BIT 11 /* compat-mode 32bit process */ @@ -36416,7 +38482,7 @@ index 000000000000..96011861e46b + } while (0) +#endif diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h -index 006bfb48343d..6fdaefa62e14 100644 +index 006bfb48343d..a6a094780a30 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -10,7 +10,7 @@ @@ -36428,7 +38494,7 @@ index 006bfb48343d..6fdaefa62e14 100644 */ struct riscv_hwprobe { __s64 key; -@@ -29,6 +29,50 @@ struct riscv_hwprobe { +@@ -29,6 +29,58 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZBA (1 << 3) #define RISCV_HWPROBE_EXT_ZBB (1 << 4) #define RISCV_HWPROBE_EXT_ZBS (1 << 5) @@ -36476,16 +38542,25 @@ index 006bfb48343d..6fdaefa62e14 100644 +#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) +#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) +#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) ++#define RISCV_HWPROBE_EXT_ZICNTR (1ULL << 50) ++#define RISCV_HWPROBE_EXT_ZIHPM (1ULL << 51) ++#define RISCV_HWPROBE_EXT_ZFBFMIN (1ULL << 52) ++#define RISCV_HWPROBE_EXT_ZVFBFMIN (1ULL << 53) ++#define RISCV_HWPROBE_EXT_ZVFBFWMA (1ULL << 54) ++#define RISCV_HWPROBE_EXT_ZICBOM (1ULL << 55) ++#define RISCV_HWPROBE_EXT_ZAAMO (1ULL << 56) ++#define RISCV_HWPROBE_EXT_ZALRSC (1ULL << 57) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) -@@ -36,6 +80,12 @@ struct riscv_hwprobe { +@@ -36,6 +88,13 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0) #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0) #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) +#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 +#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7 +#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8 ++#define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE 9 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ +/* Flags */ @@ -36493,10 +38568,85 @@ index 006bfb48343d..6fdaefa62e14 100644 + #endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h -index 992c5e407104..d25800e04cf2 100644 +index 992c5e407104..2e384eb1b7c2 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h -@@ -148,9 +148,17 @@ enum KVM_RISCV_SBI_EXT_ID { +@@ -80,6 +80,7 @@ struct kvm_riscv_csr { + unsigned long sip; + unsigned long satp; + unsigned long scounteren; ++ unsigned long senvcfg; + }; + + /* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +@@ -93,6 +94,11 @@ struct kvm_riscv_aia_csr { + unsigned long iprio2h; + }; + ++/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ ++struct kvm_riscv_smstateen_csr { ++ unsigned long sstateen0; ++}; ++ + /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ + struct kvm_riscv_timer { + __u64 frequency; +@@ -131,6 +137,54 @@ enum KVM_RISCV_ISA_EXT_ID { + KVM_RISCV_ISA_EXT_ZICSR, + KVM_RISCV_ISA_EXT_ZIFENCEI, + KVM_RISCV_ISA_EXT_ZIHPM, ++ KVM_RISCV_ISA_EXT_SMSTATEEN, ++ KVM_RISCV_ISA_EXT_ZICOND, ++ KVM_RISCV_ISA_EXT_ZBC, ++ KVM_RISCV_ISA_EXT_ZBKB, ++ KVM_RISCV_ISA_EXT_ZBKC, ++ KVM_RISCV_ISA_EXT_ZBKX, ++ KVM_RISCV_ISA_EXT_ZKND, ++ KVM_RISCV_ISA_EXT_ZKNE, ++ KVM_RISCV_ISA_EXT_ZKNH, ++ KVM_RISCV_ISA_EXT_ZKR, ++ KVM_RISCV_ISA_EXT_ZKSED, ++ KVM_RISCV_ISA_EXT_ZKSH, ++ KVM_RISCV_ISA_EXT_ZKT, ++ KVM_RISCV_ISA_EXT_ZVBB, ++ KVM_RISCV_ISA_EXT_ZVBC, ++ KVM_RISCV_ISA_EXT_ZVKB, ++ KVM_RISCV_ISA_EXT_ZVKG, ++ KVM_RISCV_ISA_EXT_ZVKNED, ++ KVM_RISCV_ISA_EXT_ZVKNHA, ++ KVM_RISCV_ISA_EXT_ZVKNHB, ++ KVM_RISCV_ISA_EXT_ZVKSED, ++ KVM_RISCV_ISA_EXT_ZVKSH, ++ KVM_RISCV_ISA_EXT_ZVKT, ++ KVM_RISCV_ISA_EXT_ZFH, ++ KVM_RISCV_ISA_EXT_ZFHMIN, ++ KVM_RISCV_ISA_EXT_ZIHINTNTL, ++ KVM_RISCV_ISA_EXT_ZVFH, ++ KVM_RISCV_ISA_EXT_ZVFHMIN, ++ KVM_RISCV_ISA_EXT_ZFA, ++ KVM_RISCV_ISA_EXT_ZTSO, ++ KVM_RISCV_ISA_EXT_ZACAS, ++ KVM_RISCV_ISA_EXT_SSCOFPMF, ++ KVM_RISCV_ISA_EXT_ZIMOP, ++ KVM_RISCV_ISA_EXT_ZCA, ++ KVM_RISCV_ISA_EXT_ZCB, ++ KVM_RISCV_ISA_EXT_ZCD, ++ KVM_RISCV_ISA_EXT_ZCF, ++ KVM_RISCV_ISA_EXT_ZCMOP, ++ KVM_RISCV_ISA_EXT_ZAWRS, ++ KVM_RISCV_ISA_EXT_SMNPM, ++ KVM_RISCV_ISA_EXT_SSNPM, ++ KVM_RISCV_ISA_EXT_SVADE, ++ KVM_RISCV_ISA_EXT_SVADU, ++ KVM_RISCV_ISA_EXT_SVVPTC, ++ KVM_RISCV_ISA_EXT_ZABHA, ++ KVM_RISCV_ISA_EXT_ZICCRSE, ++ KVM_RISCV_ISA_EXT_ZAAMO, ++ KVM_RISCV_ISA_EXT_ZALRSC, + KVM_RISCV_ISA_EXT_MAX, + }; + +@@ -148,9 +202,17 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_PMU, KVM_RISCV_SBI_EXT_EXPERIMENTAL, KVM_RISCV_SBI_EXT_VENDOR, @@ -36514,7 +38664,21 @@ index 992c5e407104..d25800e04cf2 100644 /* Possible states for kvm_riscv_timer */ #define KVM_RISCV_TIMER_STATE_OFF 0 #define KVM_RISCV_TIMER_STATE_ON 1 -@@ -229,6 +237,12 @@ enum KVM_RISCV_SBI_EXT_ID { +@@ -178,10 +240,13 @@ enum KVM_RISCV_SBI_EXT_ID { + #define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) + #define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) + #define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) ++#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) + #define KVM_REG_RISCV_CSR_REG(name) \ + (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) + #define KVM_REG_RISCV_CSR_AIA_REG(name) \ + (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long)) ++#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \ ++ (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long)) + + /* Timer registers are mapped as type 4 */ + #define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) +@@ -229,6 +294,12 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_VECTOR_REG(n) \ ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) @@ -36528,7 +38692,7 @@ index 992c5e407104..d25800e04cf2 100644 #define KVM_DEV_RISCV_APLIC_ALIGN 0x1000 #define KVM_DEV_RISCV_APLIC_SIZE 0x4000 diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile -index a2499fcc1cf3..1fd2ac7f0c6b 100644 +index a2499fcc1cf3..3b2e798adb65 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -52,18 +52,22 @@ obj-y += setup.o @@ -36554,15 +38718,25 @@ index a2499fcc1cf3..1fd2ac7f0c6b 100644 obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += cpu_ops.o -@@ -86,6 +90,7 @@ obj-$(CONFIG_SMP) += sbi-ipi.o +@@ -81,14 +85,16 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o + obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o + obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o + obj-$(CONFIG_RISCV_SBI) += sbi.o ++obj-$(CONFIG_RISCV_SSE) += sse.o sse_entry.o + ifeq ($(CONFIG_RISCV_SBI), y) + obj-$(CONFIG_SMP) += sbi-ipi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o - obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o -@@ -104,3 +109,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/ +-obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o ++obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o kexec_image.o machine_kexec_file.o + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o + obj-$(CONFIG_CRASH_CORE) += crash_core.o + +@@ -104,3 +110,4 @@ obj-$(CONFIG_COMPAT) += compat_vdso/ obj-$(CONFIG_64BIT) += pi/ obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch.o @@ -36903,8 +39077,60 @@ index 319a1da0358b..0128b161bfda 100644 cpu_mfr_info->patch_func = andes_errata_patch_func; break; #endif +diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c +index 6a992cba2f28..0d103c942b2a 100644 +--- a/arch/riscv/kernel/asm-offsets.c ++++ b/arch/riscv/kernel/asm-offsets.c +@@ -12,6 +12,8 @@ + #include + #include + #include ++#include ++#include + #include + + void asm_offsets(void); +@@ -32,6 +34,8 @@ void asm_offsets(void) + OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); + OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); + OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); ++ ++ OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); + OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); + OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); + OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); +@@ -74,6 +78,11 @@ void asm_offsets(void) + #ifdef CONFIG_STACKPROTECTOR + OFFSET(TSK_STACK_CANARY, task_struct, stack_canary); + #endif ++#ifdef CONFIG_64BIT ++ OFFSET(TASK_TI_A0, task_struct, thread_info.a0); ++ OFFSET(TASK_TI_A1, task_struct, thread_info.a1); ++ OFFSET(TASK_TI_A2, task_struct, thread_info.a2); ++#endif + + DEFINE(PT_SIZE, sizeof(struct pt_regs)); + OFFSET(PT_EPC, pt_regs, epc); +@@ -478,4 +487,17 @@ void asm_offsets(void) + OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr); + OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr); + OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr); ++ ++#ifdef CONFIG_RISCV_SSE ++ OFFSET(SSE_REG_EVT_STACK, sse_event_arch_data, stack); ++ OFFSET(SSE_REG_EVT_SHADOW_STACK, sse_event_arch_data, shadow_stack); ++ OFFSET(SSE_REG_EVT_TMP, sse_event_arch_data, tmp); ++ OFFSET(SSE_REG_HART_ID, sse_event_arch_data, hart_id); ++ OFFSET(SSE_REG_CPU_ID, sse_event_arch_data, cpu_id); ++ ++ DEFINE(SBI_EXT_SSE, SBI_EXT_SSE); ++ DEFINE(SBI_SSE_EVENT_COMPLETE, SBI_SSE_EVENT_COMPLETE); ++ #define ASM_MAX_CPUS NR_CPUS ++ DEFINE(ASM_NR_CPUS, ASM_MAX_CPUS); ++#endif + } diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c -index bb5fb2b820a2..820f579e4581 100644 +index bb5fb2b820a2..57884b72b89f 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -23,6 +23,7 @@ @@ -36915,16 +39141,18 @@ index bb5fb2b820a2..820f579e4581 100644 #include "copy-unaligned.h" -@@ -32,6 +33,8 @@ +@@ -32,6 +33,10 @@ #define MISALIGNED_BUFFER_SIZE 0x4000 #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) +static bool any_cpu_has_zicboz; ++static bool any_cpu_has_zicbop; ++static bool any_cpu_has_zicbom; + unsigned long elf_hwcap __read_mostly; /* Host ISA bitmap */ -@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base); +@@ -69,7 +74,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base); * * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. */ @@ -36933,13 +39161,13 @@ index bb5fb2b820a2..820f579e4581 100644 { const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa; -@@ -80,37 +83,204 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) +@@ -80,38 +85,258 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) } EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); -static bool riscv_isa_extension_check(int id) -+static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data, -+ const unsigned long *isa_bitmap) ++static int riscv_ext_f_depends(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) { - switch (id) { - case RISCV_ISA_EXT_ZICBOM: @@ -36960,6 +39188,15 @@ index bb5fb2b820a2..820f579e4581 100644 - return false; - } - return true; ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_f)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ + if (!riscv_cbom_block_size) { + pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); + return -EINVAL; @@ -36967,11 +39204,12 @@ index bb5fb2b820a2..820f579e4581 100644 + if (!is_power_of_2(riscv_cbom_block_size)) { + pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); + return -EINVAL; - } ++ } ++ ++ any_cpu_has_zicbom = true; + return 0; +} - -- return true; ++ +static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, + const unsigned long *isa_bitmap) +{ @@ -36985,18 +39223,30 @@ index bb5fb2b820a2..820f579e4581 100644 + } + any_cpu_has_zicboz = true; + return 0; - } - --#define __RISCV_ISA_EXT_DATA(_name, _id) { \ -- .name = #_name, \ -- .property = #_name, \ -- .id = _id, \ ++} ++ ++static int riscv_ext_zicbop_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (!riscv_cbop_block_size) { ++ pr_err("Zicbop detected in ISA string, disabling as no cbop-block-size found\n"); ++ return -EINVAL; ++ } ++ if (!is_power_of_2(riscv_cbop_block_size)) { ++ pr_err("Zicbop disabled as cbop-block-size present, but is not a power-of-2\n"); ++ return -EINVAL; + } ++ any_cpu_has_zicbop = true; ++ return 0; ++} ++ +static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data, + const unsigned long *isa_bitmap) +{ + if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA)) + return 0; -+ + +- return true; + return -EPROBE_DEFER; } +static int riscv_ext_zcd_validate(const struct riscv_isa_ext_data *data, @@ -37005,10 +39255,14 @@ index bb5fb2b820a2..820f579e4581 100644 + if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d)) + return 0; -+ + +-#define __RISCV_ISA_EXT_DATA(_name, _id) { \ +- .name = #_name, \ +- .property = #_name, \ +- .id = _id, \ + return -EPROBE_DEFER; -+} -+ + } + +static int riscv_ext_zcf_validate(const struct riscv_isa_ext_data *data, + const unsigned long *isa_bitmap) +{ @@ -37022,6 +39276,28 @@ index bb5fb2b820a2..820f579e4581 100644 + return -EPROBE_DEFER; +} + ++static int riscv_vector_f_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (!IS_ENABLED(CONFIG_RISCV_ISA_V)) ++ return -EINVAL; ++ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVE32F)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ ++static int riscv_ext_zvfbfwma_validate(const struct riscv_isa_ext_data *data, ++ const unsigned long *isa_bitmap) ++{ ++ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZFBFMIN) && ++ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZVFBFMIN)) ++ return 0; ++ ++ return -EPROBE_DEFER; ++} ++ +static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data, + const unsigned long *isa_bitmap) +{ @@ -37032,6 +39308,11 @@ index bb5fb2b820a2..820f579e4581 100644 + return 0; +} + ++static const unsigned int riscv_a_exts[] = { ++ RISCV_ISA_EXT_ZAAMO, ++ RISCV_ISA_EXT_ZALRSC, ++}; ++ +static const unsigned int riscv_zk_bundled_exts[] = { + RISCV_ISA_EXT_ZBKB, + RISCV_ISA_EXT_ZBKC, @@ -37160,10 +39441,16 @@ index bb5fb2b820a2..820f579e4581 100644 + RISCV_ISA_EXT_ZCF, + RISCV_ISA_EXT_ZCD, +}; - ++ /* * The canonical order of ISA extension names in the ISA string is defined in -@@ -158,36 +328,177 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { + * chapter 27 of the unprivileged specification. +@@ -154,40 +379,188 @@ static bool riscv_isa_extension_check(int id) + const struct riscv_isa_ext_data riscv_isa_ext[] = { + __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i), + __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m), +- __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a), ++ __RISCV_ISA_EXT_SUPERSET(a, RISCV_ISA_EXT_a, riscv_a_exts), __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f), __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d), __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q), @@ -37180,6 +39467,7 @@ index bb5fb2b820a2..820f579e4581 100644 - __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), + __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts, + riscv_ext_zicbom_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zicbop, RISCV_ISA_EXT_ZICBOP, riscv_ext_zicbop_validate), + __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, + riscv_ext_zicboz_validate), + __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), @@ -37191,10 +39479,13 @@ index bb5fb2b820a2..820f579e4581 100644 __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), + __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), ++ __RISCV_ISA_EXT_DATA(zaamo, RISCV_ISA_EXT_ZAAMO), + __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), + __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), ++ __RISCV_ISA_EXT_DATA(zalrsc, RISCV_ISA_EXT_ZALRSC), + __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), + __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zfbfmin, RISCV_ISA_EXT_ZFBFMIN, riscv_ext_f_depends), + __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), + __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), + __RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA), @@ -37227,6 +39518,9 @@ index bb5fb2b820a2..820f579e4581 100644 + __RISCV_ISA_EXT_SUPERSET(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts), + __RISCV_ISA_EXT_SUPERSET(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts), + __RISCV_ISA_EXT_SUPERSET(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zvfbfmin, RISCV_ISA_EXT_ZVFBFMIN, riscv_vector_f_validate), ++ __RISCV_ISA_EXT_DATA_VALIDATE(zvfbfwma, RISCV_ISA_EXT_ZVFBFWMA, ++ riscv_ext_zvfbfwma_validate), + __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH), + __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN), + __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB), @@ -37351,7 +39645,7 @@ index bb5fb2b820a2..820f579e4581 100644 { /* * For all possible cpus, we have already validated in -@@ -200,15 +511,31 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc +@@ -200,15 +573,31 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc while (*isa) { const char *ext = isa++; const char *ext_end = isa; @@ -37387,7 +39681,7 @@ index bb5fb2b820a2..820f579e4581 100644 */ if (ext[-1] != '_' && ext[1] == 'u') { ++isa; -@@ -217,8 +544,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc +@@ -217,8 +606,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc } fallthrough; case 'S': @@ -37396,7 +39690,7 @@ index bb5fb2b820a2..820f579e4581 100644 case 'z': case 'Z': /* -@@ -239,7 +564,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc +@@ -239,7 +626,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc * character itself while eliminating the extensions version number. * A simple re-increment solves this problem. */ @@ -37404,7 +39698,7 @@ index bb5fb2b820a2..820f579e4581 100644 for (; *isa && *isa != '_'; ++isa) if (unlikely(!isalnum(*isa))) ext_err = true; -@@ -317,29 +641,10 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc +@@ -317,29 +703,10 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc if (*isa == '_') ++isa; @@ -37435,7 +39729,7 @@ index bb5fb2b820a2..820f579e4581 100644 } } -@@ -366,6 +671,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) +@@ -366,6 +733,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) for_each_possible_cpu(cpu) { struct riscv_isainfo *isainfo = &hart_isa[cpu]; unsigned long this_hwcap = 0; @@ -37443,7 +39737,7 @@ index bb5fb2b820a2..820f579e4581 100644 if (acpi_disabled) { node = of_cpu_device_node_get(cpu); -@@ -388,7 +694,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) +@@ -388,7 +756,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) } } @@ -37452,7 +39746,7 @@ index bb5fb2b820a2..820f579e4581 100644 /* * These ones were as they were part of the base ISA when the -@@ -396,10 +702,10 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) +@@ -396,10 +764,10 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) * unconditionally where `i` is in riscv,isa on DT systems. */ if (acpi_disabled) { @@ -37467,7 +39761,7 @@ index bb5fb2b820a2..820f579e4581 100644 } /* -@@ -412,9 +718,11 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) +@@ -412,9 +780,11 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) */ if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) { this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v]; @@ -37480,7 +39774,7 @@ index bb5fb2b820a2..820f579e4581 100644 /* * All "okay" hart should have same isa. Set HWCAP based on * common capabilities of every "okay" hart, in case they don't -@@ -435,6 +743,61 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) +@@ -435,6 +805,61 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) acpi_put_table((struct acpi_table_header *)rhct); } @@ -37542,7 +39836,7 @@ index bb5fb2b820a2..820f579e4581 100644 static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) { unsigned int cpu; -@@ -443,6 +806,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) +@@ -443,6 +868,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) unsigned long this_hwcap = 0; struct device_node *cpu_node; struct riscv_isainfo *isainfo = &hart_isa[cpu]; @@ -37550,7 +39844,7 @@ index bb5fb2b820a2..820f579e4581 100644 cpu_node = of_cpu_device_node_get(cpu); if (!cpu_node) { -@@ -456,20 +820,18 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) +@@ -456,20 +882,18 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) } for (int i = 0; i < riscv_isa_ext_count; i++) { @@ -37578,7 +39872,7 @@ index bb5fb2b820a2..820f579e4581 100644 of_node_put(cpu_node); /* -@@ -485,6 +847,8 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) +@@ -485,6 +909,8 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); else bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); @@ -37587,7 +39881,7 @@ index bb5fb2b820a2..820f579e4581 100644 } if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) -@@ -539,8 +903,14 @@ void __init riscv_fill_hwcap(void) +@@ -539,8 +965,14 @@ void __init riscv_fill_hwcap(void) elf_hwcap &= ~COMPAT_HWCAP_ISA_F; } @@ -37603,7 +39897,7 @@ index bb5fb2b820a2..820f579e4581 100644 /* * ISA string in device tree might have 'v' flag, but * CONFIG_RISCV_ISA_V is disabled in kernel. -@@ -668,7 +1038,7 @@ void check_unaligned_access(int cpu) +@@ -668,7 +1100,7 @@ void check_unaligned_access(int cpu) __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE)); } @@ -37612,22 +39906,31 @@ index bb5fb2b820a2..820f579e4581 100644 { check_unaligned_access(0); return 0; -@@ -676,6 +1046,14 @@ static int check_unaligned_access_boot_cpu(void) +@@ -676,6 +1108,23 @@ static int check_unaligned_access_boot_cpu(void) arch_initcall(check_unaligned_access_boot_cpu); +void __init riscv_user_isa_enable(void) +{ + if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) -+ csr_set(CSR_ENVCFG, ENVCFG_CBZE); ++ current->thread.envcfg |= ENVCFG_CBZE; + else if (any_cpu_has_zicboz) + pr_warn("Zicboz disabled as it is unavailable on some harts\n"); ++ ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOM)) ++ current->thread.envcfg |= ENVCFG_CBCFE; ++ else if (any_cpu_has_zicbom) ++ pr_warn("Zicbom disabled as it is unavailable on some harts\n"); ++ ++ if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOP) && ++ any_cpu_has_zicbop) ++ pr_warn("Zicbop disabled as it is unavailable on some harts\n"); +} + #ifdef CONFIG_RISCV_ALTERNATIVE /* * Alternative patch sites consider 48 bits when determining when to patch -@@ -716,28 +1094,45 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, +@@ -716,28 +1165,45 @@ void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, { struct alt_entry *alt; void *oldptr, *altptr; @@ -37685,11 +39988,572 @@ index bb5fb2b820a2..820f579e4581 100644 oldptr = ALT_OLD_PTR(alt); altptr = ALT_ALT_PTR(alt); +diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c +deleted file mode 100644 +index 8c32bf1eedda..000000000000 +--- a/arch/riscv/kernel/elf_kexec.c ++++ /dev/null +@@ -1,475 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0-only +-/* +- * Load ELF vmlinux file for the kexec_file_load syscall. +- * +- * Copyright (C) 2021 Huawei Technologies Co, Ltd. +- * +- * Author: Liao Chang (liaochang1@huawei.com) +- * +- * Based on kexec-tools' kexec-elf-riscv.c, heavily modified +- * for kernel. +- */ +- +-#define pr_fmt(fmt) "kexec_image: " fmt +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-int arch_kimage_file_post_load_cleanup(struct kimage *image) +-{ +- kvfree(image->arch.fdt); +- image->arch.fdt = NULL; +- +- vfree(image->elf_headers); +- image->elf_headers = NULL; +- image->elf_headers_sz = 0; +- +- return kexec_image_post_load_cleanup_default(image); +-} +- +-static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, +- struct kexec_elf_info *elf_info, unsigned long old_pbase, +- unsigned long new_pbase) +-{ +- int i; +- int ret = 0; +- size_t size; +- struct kexec_buf kbuf; +- const struct elf_phdr *phdr; +- +- kbuf.image = image; +- +- for (i = 0; i < ehdr->e_phnum; i++) { +- phdr = &elf_info->proghdrs[i]; +- if (phdr->p_type != PT_LOAD) +- continue; +- +- size = phdr->p_filesz; +- if (size > phdr->p_memsz) +- size = phdr->p_memsz; +- +- kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset; +- kbuf.bufsz = size; +- kbuf.buf_align = phdr->p_align; +- kbuf.mem = phdr->p_paddr - old_pbase + new_pbase; +- kbuf.memsz = phdr->p_memsz; +- kbuf.top_down = false; +- ret = kexec_add_buffer(&kbuf); +- if (ret) +- break; +- } +- +- return ret; +-} +- +-/* +- * Go through the available phsyical memory regions and find one that hold +- * an image of the specified size. +- */ +-static int elf_find_pbase(struct kimage *image, unsigned long kernel_len, +- struct elfhdr *ehdr, struct kexec_elf_info *elf_info, +- unsigned long *old_pbase, unsigned long *new_pbase) +-{ +- int i; +- int ret; +- struct kexec_buf kbuf; +- const struct elf_phdr *phdr; +- unsigned long lowest_paddr = ULONG_MAX; +- unsigned long lowest_vaddr = ULONG_MAX; +- +- for (i = 0; i < ehdr->e_phnum; i++) { +- phdr = &elf_info->proghdrs[i]; +- if (phdr->p_type != PT_LOAD) +- continue; +- +- if (lowest_paddr > phdr->p_paddr) +- lowest_paddr = phdr->p_paddr; +- +- if (lowest_vaddr > phdr->p_vaddr) +- lowest_vaddr = phdr->p_vaddr; +- } +- +- kbuf.image = image; +- kbuf.buf_min = lowest_paddr; +- kbuf.buf_max = ULONG_MAX; +- +- /* +- * Current riscv boot protocol requires 2MB alignment for +- * RV64 and 4MB alignment for RV32 +- * +- */ +- kbuf.buf_align = PMD_SIZE; +- kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; +- kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE); +- kbuf.top_down = false; +- ret = arch_kexec_locate_mem_hole(&kbuf); +- if (!ret) { +- *old_pbase = lowest_paddr; +- *new_pbase = kbuf.mem; +- image->start = ehdr->e_entry - lowest_vaddr + kbuf.mem; +- } +- return ret; +-} +- +-static int get_nr_ram_ranges_callback(struct resource *res, void *arg) +-{ +- unsigned int *nr_ranges = arg; +- +- (*nr_ranges)++; +- return 0; +-} +- +-static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) +-{ +- struct crash_mem *cmem = arg; +- +- cmem->ranges[cmem->nr_ranges].start = res->start; +- cmem->ranges[cmem->nr_ranges].end = res->end; +- cmem->nr_ranges++; +- +- return 0; +-} +- +-static int prepare_elf_headers(void **addr, unsigned long *sz) +-{ +- struct crash_mem *cmem; +- unsigned int nr_ranges; +- int ret; +- +- nr_ranges = 1; /* For exclusion of crashkernel region */ +- walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); +- +- cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); +- if (!cmem) +- return -ENOMEM; +- +- cmem->max_nr_ranges = nr_ranges; +- cmem->nr_ranges = 0; +- ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback); +- if (ret) +- goto out; +- +- /* Exclude crashkernel region */ +- ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); +- if (!ret) +- ret = crash_prepare_elf64_headers(cmem, true, addr, sz); +- +-out: +- kfree(cmem); +- return ret; +-} +- +-static char *setup_kdump_cmdline(struct kimage *image, char *cmdline, +- unsigned long cmdline_len) +-{ +- int elfcorehdr_strlen; +- char *cmdline_ptr; +- +- cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL); +- if (!cmdline_ptr) +- return NULL; +- +- elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ", +- image->elf_load_addr); +- +- if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) { +- pr_err("Appending elfcorehdr= exceeds cmdline size\n"); +- kfree(cmdline_ptr); +- return NULL; +- } +- +- memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len); +- /* Ensure it's nul terminated */ +- cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0'; +- return cmdline_ptr; +-} +- +-static void *elf_kexec_load(struct kimage *image, char *kernel_buf, +- unsigned long kernel_len, char *initrd, +- unsigned long initrd_len, char *cmdline, +- unsigned long cmdline_len) +-{ +- int ret; +- unsigned long old_kernel_pbase = ULONG_MAX; +- unsigned long new_kernel_pbase = 0UL; +- unsigned long initrd_pbase = 0UL; +- unsigned long headers_sz; +- unsigned long kernel_start; +- void *fdt, *headers; +- struct elfhdr ehdr; +- struct kexec_buf kbuf; +- struct kexec_elf_info elf_info; +- char *modified_cmdline = NULL; +- +- ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); +- if (ret) +- return ERR_PTR(ret); +- +- ret = elf_find_pbase(image, kernel_len, &ehdr, &elf_info, +- &old_kernel_pbase, &new_kernel_pbase); +- if (ret) +- goto out; +- kernel_start = image->start; +- pr_notice("The entry point of kernel at 0x%lx\n", image->start); +- +- /* Add the kernel binary to the image */ +- ret = riscv_kexec_elf_load(image, &ehdr, &elf_info, +- old_kernel_pbase, new_kernel_pbase); +- if (ret) +- goto out; +- +- kbuf.image = image; +- kbuf.buf_min = new_kernel_pbase + kernel_len; +- kbuf.buf_max = ULONG_MAX; +- +- /* Add elfcorehdr */ +- if (image->type == KEXEC_TYPE_CRASH) { +- ret = prepare_elf_headers(&headers, &headers_sz); +- if (ret) { +- pr_err("Preparing elf core header failed\n"); +- goto out; +- } +- +- kbuf.buffer = headers; +- kbuf.bufsz = headers_sz; +- kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; +- kbuf.memsz = headers_sz; +- kbuf.buf_align = ELF_CORE_HEADER_ALIGN; +- kbuf.top_down = true; +- +- ret = kexec_add_buffer(&kbuf); +- if (ret) { +- vfree(headers); +- goto out; +- } +- image->elf_headers = headers; +- image->elf_load_addr = kbuf.mem; +- image->elf_headers_sz = headers_sz; +- +- pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- image->elf_load_addr, kbuf.bufsz, kbuf.memsz); +- +- /* Setup cmdline for kdump kernel case */ +- modified_cmdline = setup_kdump_cmdline(image, cmdline, +- cmdline_len); +- if (!modified_cmdline) { +- pr_err("Setting up cmdline for kdump kernel failed\n"); +- ret = -EINVAL; +- goto out; +- } +- cmdline = modified_cmdline; +- } +- +-#ifdef CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY +- /* Add purgatory to the image */ +- kbuf.top_down = true; +- kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; +- ret = kexec_load_purgatory(image, &kbuf); +- if (ret) { +- pr_err("Error loading purgatory ret=%d\n", ret); +- goto out; +- } +- ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry", +- &kernel_start, +- sizeof(kernel_start), 0); +- if (ret) +- pr_err("Error update purgatory ret=%d\n", ret); +-#endif /* CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY */ +- +- /* Add the initrd to the image */ +- if (initrd != NULL) { +- kbuf.buffer = initrd; +- kbuf.bufsz = kbuf.memsz = initrd_len; +- kbuf.buf_align = PAGE_SIZE; +- kbuf.top_down = true; +- kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; +- ret = kexec_add_buffer(&kbuf); +- if (ret) +- goto out; +- initrd_pbase = kbuf.mem; +- pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase); +- } +- +- /* Add the DTB to the image */ +- fdt = of_kexec_alloc_and_setup_fdt(image, initrd_pbase, +- initrd_len, cmdline, 0); +- if (!fdt) { +- pr_err("Error setting up the new device tree.\n"); +- ret = -EINVAL; +- goto out; +- } +- +- fdt_pack(fdt); +- kbuf.buffer = fdt; +- kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt); +- kbuf.buf_align = PAGE_SIZE; +- kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; +- kbuf.top_down = true; +- ret = kexec_add_buffer(&kbuf); +- if (ret) { +- pr_err("Error add DTB kbuf ret=%d\n", ret); +- goto out_free_fdt; +- } +- /* Cache the fdt buffer address for memory cleanup */ +- image->arch.fdt = fdt; +- pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem); +- goto out; +- +-out_free_fdt: +- kvfree(fdt); +-out: +- kfree(modified_cmdline); +- kexec_free_elf_info(&elf_info); +- return ret ? ERR_PTR(ret) : NULL; +-} +- +-#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) +-#define RISCV_IMM_BITS 12 +-#define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS) +-#define RISCV_CONST_HIGH_PART(x) \ +- (((x) + (RISCV_IMM_REACH >> 1)) & ~(RISCV_IMM_REACH - 1)) +-#define RISCV_CONST_LOW_PART(x) ((x) - RISCV_CONST_HIGH_PART(x)) +- +-#define ENCODE_ITYPE_IMM(x) \ +- (RV_X(x, 0, 12) << 20) +-#define ENCODE_BTYPE_IMM(x) \ +- ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | \ +- (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31)) +-#define ENCODE_UTYPE_IMM(x) \ +- (RV_X(x, 12, 20) << 12) +-#define ENCODE_JTYPE_IMM(x) \ +- ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | \ +- (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31)) +-#define ENCODE_CBTYPE_IMM(x) \ +- ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | \ +- (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12)) +-#define ENCODE_CJTYPE_IMM(x) \ +- ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | \ +- (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | \ +- (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12)) +-#define ENCODE_UJTYPE_IMM(x) \ +- (ENCODE_UTYPE_IMM(RISCV_CONST_HIGH_PART(x)) | \ +- (ENCODE_ITYPE_IMM(RISCV_CONST_LOW_PART(x)) << 32)) +-#define ENCODE_UITYPE_IMM(x) \ +- (ENCODE_UTYPE_IMM(x) | (ENCODE_ITYPE_IMM(x) << 32)) +- +-#define CLEAN_IMM(type, x) \ +- ((~ENCODE_##type##_IMM((uint64_t)(-1))) & (x)) +- +-int arch_kexec_apply_relocations_add(struct purgatory_info *pi, +- Elf_Shdr *section, +- const Elf_Shdr *relsec, +- const Elf_Shdr *symtab) +-{ +- const char *strtab, *name, *shstrtab; +- const Elf_Shdr *sechdrs; +- Elf64_Rela *relas; +- int i, r_type; +- +- /* String & section header string table */ +- sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; +- strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset; +- shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset; +- +- relas = (void *)pi->ehdr + relsec->sh_offset; +- +- for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { +- const Elf_Sym *sym; /* symbol to relocate */ +- unsigned long addr; /* final location after relocation */ +- unsigned long val; /* relocated symbol value */ +- unsigned long sec_base; /* relocated symbol value */ +- void *loc; /* tmp location to modify */ +- +- sym = (void *)pi->ehdr + symtab->sh_offset; +- sym += ELF64_R_SYM(relas[i].r_info); +- +- if (sym->st_name) +- name = strtab + sym->st_name; +- else +- name = shstrtab + sechdrs[sym->st_shndx].sh_name; +- +- loc = pi->purgatory_buf; +- loc += section->sh_offset; +- loc += relas[i].r_offset; +- +- if (sym->st_shndx == SHN_ABS) +- sec_base = 0; +- else if (sym->st_shndx >= pi->ehdr->e_shnum) { +- pr_err("Invalid section %d for symbol %s\n", +- sym->st_shndx, name); +- return -ENOEXEC; +- } else +- sec_base = pi->sechdrs[sym->st_shndx].sh_addr; +- +- val = sym->st_value; +- val += sec_base; +- val += relas[i].r_addend; +- +- addr = section->sh_addr + relas[i].r_offset; +- +- r_type = ELF64_R_TYPE(relas[i].r_info); +- +- switch (r_type) { +- case R_RISCV_BRANCH: +- *(u32 *)loc = CLEAN_IMM(BTYPE, *(u32 *)loc) | +- ENCODE_BTYPE_IMM(val - addr); +- break; +- case R_RISCV_JAL: +- *(u32 *)loc = CLEAN_IMM(JTYPE, *(u32 *)loc) | +- ENCODE_JTYPE_IMM(val - addr); +- break; +- /* +- * With no R_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_I +- * sym is expected to be next to R_RISCV_PCREL_HI20 +- * in purgatory relsec. Handle it like R_RISCV_CALL +- * sym, instead of searching the whole relsec. +- */ +- case R_RISCV_PCREL_HI20: +- case R_RISCV_CALL_PLT: +- case R_RISCV_CALL: +- *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) | +- ENCODE_UJTYPE_IMM(val - addr); +- break; +- case R_RISCV_RVC_BRANCH: +- *(u32 *)loc = CLEAN_IMM(CBTYPE, *(u32 *)loc) | +- ENCODE_CBTYPE_IMM(val - addr); +- break; +- case R_RISCV_RVC_JUMP: +- *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) | +- ENCODE_CJTYPE_IMM(val - addr); +- break; +- case R_RISCV_ADD16: +- *(u16 *)loc += val; +- break; +- case R_RISCV_SUB16: +- *(u16 *)loc -= val; +- break; +- case R_RISCV_ADD32: +- *(u32 *)loc += val; +- break; +- case R_RISCV_SUB32: +- *(u32 *)loc -= val; +- break; +- /* It has been applied by R_RISCV_PCREL_HI20 sym */ +- case R_RISCV_PCREL_LO12_I: +- case R_RISCV_ALIGN: +- case R_RISCV_RELAX: +- break; +- default: +- pr_err("Unknown rela relocation: %d\n", r_type); +- return -ENOEXEC; +- } +- } +- return 0; +-} +- +-const struct kexec_file_ops elf_kexec_ops = { +- .probe = kexec_elf_probe, +- .load = elf_kexec_load, +-}; diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S -index 1f90fee24a8b..6d239ba461ad 100644 +index 1f90fee24a8b..0382fce9d8bd 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S -@@ -25,9 +25,9 @@ SYM_CODE_START(handle_exception) +@@ -18,6 +18,79 @@ + + .section .irqentry.text, "ax" + ++.macro new_vmalloc_check ++ REG_S a0, TASK_TI_A0(tp) ++ csrr a0, CSR_CAUSE ++ /* Exclude IRQs */ ++ blt a0, zero, _new_vmalloc_restore_context_a0 ++ ++ REG_S a1, TASK_TI_A1(tp) ++ /* Only check new_vmalloc if we are in page/protection fault */ ++ li a1, EXC_LOAD_PAGE_FAULT ++ beq a0, a1, _new_vmalloc_kernel_address ++ li a1, EXC_STORE_PAGE_FAULT ++ beq a0, a1, _new_vmalloc_kernel_address ++ li a1, EXC_INST_PAGE_FAULT ++ bne a0, a1, _new_vmalloc_restore_context_a1 ++ ++_new_vmalloc_kernel_address: ++ /* Is it a kernel address? */ ++ csrr a0, CSR_TVAL ++ bge a0, zero, _new_vmalloc_restore_context_a1 ++ ++ /* Check if a new vmalloc mapping appeared that could explain the trap */ ++ REG_S a2, TASK_TI_A2(tp) ++ /* ++ * Computes: ++ * a0 = &new_vmalloc[BIT_WORD(cpu)] ++ * a1 = BIT_MASK(cpu) ++ */ ++ REG_L a2, TASK_TI_CPU(tp) ++ /* ++ * Compute the new_vmalloc element position: ++ * (cpu / 64) * 8 = (cpu >> 6) << 3 ++ */ ++ srli a1, a2, 6 ++ slli a1, a1, 3 ++ la a0, new_vmalloc ++ add a0, a0, a1 ++ /* ++ * Compute the bit position in the new_vmalloc element: ++ * bit_pos = cpu % 64 = cpu - (cpu / 64) * 64 = cpu - (cpu >> 6) << 6 ++ * = cpu - ((cpu >> 6) << 3) << 3 ++ */ ++ slli a1, a1, 3 ++ sub a1, a2, a1 ++ /* Compute the "get mask": 1 << bit_pos */ ++ li a2, 1 ++ sll a1, a2, a1 ++ ++ /* Check the value of new_vmalloc for this cpu */ ++ REG_L a2, 0(a0) ++ and a2, a2, a1 ++ beq a2, zero, _new_vmalloc_restore_context ++ ++ /* Atomically reset the current cpu bit in new_vmalloc */ ++ amoxor.d a0, a1, (a0) ++ ++ /* Only emit a sfence.vma if the uarch caches invalid entries */ ++ ALTERNATIVE("sfence.vma", "nop", 0, RISCV_ISA_EXT_SVVPTC, 1) ++ ++ REG_L a0, TASK_TI_A0(tp) ++ REG_L a1, TASK_TI_A1(tp) ++ REG_L a2, TASK_TI_A2(tp) ++ csrw CSR_SCRATCH, x0 ++ sret ++ ++_new_vmalloc_restore_context: ++ REG_L a2, TASK_TI_A2(tp) ++_new_vmalloc_restore_context_a1: ++ REG_L a1, TASK_TI_A1(tp) ++_new_vmalloc_restore_context_a0: ++ REG_L a0, TASK_TI_A0(tp) ++.endm ++ ++ + SYM_CODE_START(handle_exception) + /* + * If coming from userspace, preserve the user thread pointer and load +@@ -25,10 +98,24 @@ SYM_CODE_START(handle_exception) * register will contain 0, and we should continue on the current TP. */ csrrw tp, CSR_SCRATCH, tp @@ -37699,9 +40563,24 @@ index 1f90fee24a8b..6d239ba461ad 100644 -_restore_kernel_tpsp: +.Lrestore_kernel_tpsp: csrr tp, CSR_SCRATCH ++ ++#ifdef CONFIG_64BIT ++ /* ++ * The RISC-V kernel does not eagerly emit a sfence.vma after each ++ * new vmalloc mapping, which may result in exceptions: ++ * - if the uarch caches invalid entries, the new mapping would not be ++ * observed by the page table walker and an invalidation is needed. ++ * - if the uarch does not cache invalid entries, a reordered access ++ * could "miss" the new mapping and traps: in that case, we only need ++ * to retry the access, no sfence.vma is required. ++ */ ++ new_vmalloc_check ++#endif ++ REG_S sp, TASK_TI_KERNEL_SP(tp) -@@ -39,7 +39,7 @@ _restore_kernel_tpsp: + #ifdef CONFIG_VMAP_STACK +@@ -39,7 +126,7 @@ _restore_kernel_tpsp: REG_L sp, TASK_TI_KERNEL_SP(tp) #endif @@ -37710,7 +40589,7 @@ index 1f90fee24a8b..6d239ba461ad 100644 REG_S sp, TASK_TI_USER_SP(tp) REG_L sp, TASK_TI_KERNEL_SP(tp) addi sp, sp, -(PT_SIZE_ON_STACK) -@@ -81,6 +81,11 @@ _save_context: +@@ -81,6 +168,11 @@ _save_context: .option norelax la gp, __global_pointer$ .option pop @@ -37722,7 +40601,7 @@ index 1f90fee24a8b..6d239ba461ad 100644 move a0, sp /* pt_regs */ la ra, ret_from_exception -@@ -134,6 +139,10 @@ SYM_CODE_START_NOALIGN(ret_from_exception) +@@ -134,6 +226,10 @@ SYM_CODE_START_NOALIGN(ret_from_exception) */ csrw CSR_SCRATCH, tp 1: @@ -37733,7 +40612,7 @@ index 1f90fee24a8b..6d239ba461ad 100644 REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's -@@ -279,7 +288,7 @@ SYM_FUNC_END(__switch_to) +@@ -279,7 +375,7 @@ SYM_FUNC_END(__switch_to) .section ".rodata" .align LGREG /* Exception vector table */ @@ -37742,7 +40621,7 @@ index 1f90fee24a8b..6d239ba461ad 100644 RISCV_PTR do_trap_insn_misaligned ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) RISCV_PTR do_trap_insn_illegal -@@ -297,12 +306,11 @@ SYM_CODE_START(excp_vect_table) +@@ -297,12 +393,11 @@ SYM_CODE_START(excp_vect_table) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ @@ -38074,6 +40953,676 @@ index 000000000000..6afe80c7f03a + put_cpu_vector_context(); +} +EXPORT_SYMBOL_GPL(kernel_vector_end); +diff --git a/arch/riscv/kernel/kexec_elf.c b/arch/riscv/kernel/kexec_elf.c +new file mode 100644 +index 000000000000..f4755d49b89e +--- /dev/null ++++ b/arch/riscv/kernel/kexec_elf.c +@@ -0,0 +1,144 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Load ELF vmlinux file for the kexec_file_load syscall. ++ * ++ * Copyright (C) 2021 Huawei Technologies Co, Ltd. ++ * ++ * Author: Liao Chang (liaochang1@huawei.com) ++ * ++ * Based on kexec-tools' kexec-elf-riscv.c, heavily modified ++ * for kernel. ++ */ ++ ++#define pr_fmt(fmt) "kexec_image: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static int riscv_kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, ++ struct kexec_elf_info *elf_info, unsigned long old_pbase, ++ unsigned long new_pbase) ++{ ++ int i; ++ int ret = 0; ++ size_t size; ++ struct kexec_buf kbuf; ++ const struct elf_phdr *phdr; ++ ++ kbuf.image = image; ++ ++ for (i = 0; i < ehdr->e_phnum; i++) { ++ phdr = &elf_info->proghdrs[i]; ++ if (phdr->p_type != PT_LOAD) ++ continue; ++ ++ size = phdr->p_filesz; ++ if (size > phdr->p_memsz) ++ size = phdr->p_memsz; ++ ++ kbuf.buffer = (void *) elf_info->buffer + phdr->p_offset; ++ kbuf.bufsz = size; ++ kbuf.buf_align = phdr->p_align; ++ kbuf.mem = phdr->p_paddr - old_pbase + new_pbase; ++ kbuf.memsz = phdr->p_memsz; ++ kbuf.top_down = false; ++ ret = kexec_add_buffer(&kbuf); ++ if (ret) ++ break; ++ } ++ ++ return ret; ++} ++ ++/* ++ * Go through the available phsyical memory regions and find one that hold ++ * an image of the specified size. ++ */ ++static int elf_find_pbase(struct kimage *image, unsigned long kernel_len, ++ struct elfhdr *ehdr, struct kexec_elf_info *elf_info, ++ unsigned long *old_pbase, unsigned long *new_pbase) ++{ ++ int i; ++ int ret; ++ struct kexec_buf kbuf; ++ const struct elf_phdr *phdr; ++ unsigned long lowest_paddr = ULONG_MAX; ++ unsigned long lowest_vaddr = ULONG_MAX; ++ ++ for (i = 0; i < ehdr->e_phnum; i++) { ++ phdr = &elf_info->proghdrs[i]; ++ if (phdr->p_type != PT_LOAD) ++ continue; ++ ++ if (lowest_paddr > phdr->p_paddr) ++ lowest_paddr = phdr->p_paddr; ++ ++ if (lowest_vaddr > phdr->p_vaddr) ++ lowest_vaddr = phdr->p_vaddr; ++ } ++ ++ kbuf.image = image; ++ kbuf.buf_min = lowest_paddr; ++ kbuf.buf_max = ULONG_MAX; ++ ++ /* ++ * Current riscv boot protocol requires 2MB alignment for ++ * RV64 and 4MB alignment for RV32 ++ * ++ */ ++ kbuf.buf_align = PMD_SIZE; ++ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ++ kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE); ++ kbuf.top_down = false; ++ ret = arch_kexec_locate_mem_hole(&kbuf); ++ if (!ret) { ++ *old_pbase = lowest_paddr; ++ *new_pbase = kbuf.mem; ++ image->start = ehdr->e_entry - lowest_vaddr + kbuf.mem; ++ } ++ return ret; ++} ++ ++static void *elf_kexec_load(struct kimage *image, char *kernel_buf, ++ unsigned long kernel_len, char *initrd, ++ unsigned long initrd_len, char *cmdline, ++ unsigned long cmdline_len) ++{ ++ int ret; ++ unsigned long old_kernel_pbase = ULONG_MAX; ++ unsigned long new_kernel_pbase = 0UL; ++ struct elfhdr ehdr; ++ struct kexec_elf_info elf_info; ++ ++ ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info); ++ if (ret) ++ return ERR_PTR(ret); ++ ++ ret = elf_find_pbase(image, kernel_len, &ehdr, &elf_info, ++ &old_kernel_pbase, &new_kernel_pbase); ++ if (ret) ++ goto out; ++ ++ /* Add the kernel binary to the image */ ++ ret = riscv_kexec_elf_load(image, &ehdr, &elf_info, ++ old_kernel_pbase, new_kernel_pbase); ++ if (ret) ++ goto out; ++ ++ ret = load_extra_segments(image, image->start, kernel_len, ++ initrd, initrd_len, cmdline, cmdline_len); ++out: ++ kexec_free_elf_info(&elf_info); ++ return ret ? ERR_PTR(ret) : NULL; ++} ++ ++const struct kexec_file_ops elf_kexec_ops = { ++ .probe = kexec_elf_probe, ++ .load = elf_kexec_load, ++}; +diff --git a/arch/riscv/kernel/kexec_image.c b/arch/riscv/kernel/kexec_image.c +new file mode 100644 +index 000000000000..26a81774a78a +--- /dev/null ++++ b/arch/riscv/kernel/kexec_image.c +@@ -0,0 +1,96 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * RISC-V Kexec image loader ++ * ++ */ ++ ++#define pr_fmt(fmt) "kexec_file(Image): " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static int image_probe(const char *kernel_buf, unsigned long kernel_len) ++{ ++ const struct riscv_image_header *h = (const struct riscv_image_header *)kernel_buf; ++ ++ if (!h || kernel_len < sizeof(*h)) ++ return -EINVAL; ++ ++ /* According to Documentation/riscv/boot-image-header.rst, ++ * use "magic2" field to check when version >= 0.2. ++ */ ++ ++ if (h->version >= RISCV_HEADER_VERSION && ++ memcmp(&h->magic2, RISCV_IMAGE_MAGIC2, sizeof(h->magic2))) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static void *image_load(struct kimage *image, ++ char *kernel, unsigned long kernel_len, ++ char *initrd, unsigned long initrd_len, ++ char *cmdline, unsigned long cmdline_len) ++{ ++ struct riscv_image_header *h; ++ u64 flags; ++ bool be_image, be_kernel; ++ struct kexec_buf kbuf; ++ int ret; ++ ++ /* Check Image header */ ++ h = (struct riscv_image_header *)kernel; ++ if (!h->image_size) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* Check endianness */ ++ flags = le64_to_cpu(h->flags); ++ be_image = riscv_image_flag_field(flags, RISCV_IMAGE_FLAG_BE); ++ be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); ++ if (be_image != be_kernel) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* Load the kernel image */ ++ kbuf.image = image; ++ kbuf.buf_min = 0; ++ kbuf.buf_max = ULONG_MAX; ++ kbuf.top_down = false; ++ ++ kbuf.buffer = kernel; ++ kbuf.bufsz = kernel_len; ++ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ++ kbuf.memsz = le64_to_cpu(h->image_size); ++ kbuf.buf_align = le64_to_cpu(h->text_offset); ++ ++ ret = kexec_add_buffer(&kbuf); ++ if (ret) { ++ pr_err("Error add kernel image ret=%d\n", ret); ++ goto out; ++ } ++ ++ image->start = kbuf.mem; ++ ++ pr_info("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ kbuf.mem, kbuf.bufsz, kbuf.memsz); ++ ++ ret = load_extra_segments(image, kbuf.mem, kbuf.memsz, ++ initrd, initrd_len, cmdline, cmdline_len); ++ ++out: ++ return ret ? ERR_PTR(ret) : NULL; ++} ++ ++const struct kexec_file_ops image_kexec_ops = { ++ .probe = image_probe, ++ .load = image_load, ++}; +diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c +index ccb0c5d5c63c..3c830a6f7ef4 100644 +--- a/arch/riscv/kernel/machine_kexec.c ++++ b/arch/riscv/kernel/machine_kexec.c +@@ -18,30 +18,6 @@ + #include + #include + +-/* +- * kexec_image_info - Print received image details +- */ +-static void +-kexec_image_info(const struct kimage *image) +-{ +- unsigned long i; +- +- pr_debug("Kexec image info:\n"); +- pr_debug("\ttype: %d\n", image->type); +- pr_debug("\tstart: %lx\n", image->start); +- pr_debug("\thead: %lx\n", image->head); +- pr_debug("\tnr_segments: %lu\n", image->nr_segments); +- +- for (i = 0; i < image->nr_segments; i++) { +- pr_debug("\t segment[%lu]: %016lx - %016lx", i, +- image->segment[i].mem, +- image->segment[i].mem + image->segment[i].memsz); +- pr_debug("\t\t0x%lx bytes, %lu pages\n", +- (unsigned long) image->segment[i].memsz, +- (unsigned long) image->segment[i].memsz / PAGE_SIZE); +- } +-} +- + /* + * machine_kexec_prepare - Initialize kexec + * +@@ -60,8 +36,6 @@ machine_kexec_prepare(struct kimage *image) + unsigned int control_code_buffer_sz = 0; + int i = 0; + +- kexec_image_info(image); +- + /* Find the Flattened Device Tree and save its physical address */ + for (i = 0; i < image->nr_segments; i++) { + if (image->segment[i].memsz <= sizeof(fdt)) +diff --git a/arch/riscv/kernel/machine_kexec_file.c b/arch/riscv/kernel/machine_kexec_file.c +index b0bf8c1722c0..e36104af2e24 100644 +--- a/arch/riscv/kernel/machine_kexec_file.c ++++ b/arch/riscv/kernel/machine_kexec_file.c +@@ -7,8 +7,369 @@ + * Author: Liao Chang (liaochang1@huawei.com) + */ + #include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + + const struct kexec_file_ops * const kexec_file_loaders[] = { + &elf_kexec_ops, ++ &image_kexec_ops, + NULL + }; ++ ++int arch_kimage_file_post_load_cleanup(struct kimage *image) ++{ ++ kvfree(image->arch.fdt); ++ image->arch.fdt = NULL; ++ ++ vfree(image->elf_headers); ++ image->elf_headers = NULL; ++ image->elf_headers_sz = 0; ++ ++ return kexec_image_post_load_cleanup_default(image); ++} ++ ++#ifdef CONFIG_CRASH_DUMP ++static int get_nr_ram_ranges_callback(struct resource *res, void *arg) ++{ ++ unsigned int *nr_ranges = arg; ++ ++ (*nr_ranges)++; ++ return 0; ++} ++ ++static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) ++{ ++ struct crash_mem *cmem = arg; ++ ++ cmem->ranges[cmem->nr_ranges].start = res->start; ++ cmem->ranges[cmem->nr_ranges].end = res->end; ++ cmem->nr_ranges++; ++ ++ return 0; ++} ++ ++static int prepare_elf_headers(void **addr, unsigned long *sz) ++{ ++ struct crash_mem *cmem; ++ unsigned int nr_ranges; ++ int ret; ++ ++ nr_ranges = 1; /* For exclusion of crashkernel region */ ++ walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); ++ ++ cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); ++ if (!cmem) ++ return -ENOMEM; ++ ++ cmem->max_nr_ranges = nr_ranges; ++ cmem->nr_ranges = 0; ++ ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback); ++ if (ret) ++ goto out; ++ ++ /* Exclude crashkernel region */ ++ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); ++ if (!ret) ++ ret = crash_prepare_elf64_headers(cmem, true, addr, sz); ++ ++out: ++ kfree(cmem); ++ return ret; ++} ++ ++static char *setup_kdump_cmdline(struct kimage *image, char *cmdline, ++ unsigned long cmdline_len) ++{ ++ int elfcorehdr_strlen; ++ char *cmdline_ptr; ++ ++ cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL); ++ if (!cmdline_ptr) ++ return NULL; ++ ++ elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ", ++ image->elf_load_addr); ++ ++ if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) { ++ pr_err("Appending elfcorehdr= exceeds cmdline size\n"); ++ kfree(cmdline_ptr); ++ return NULL; ++ } ++ ++ memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len); ++ /* Ensure it's nul terminated */ ++ cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0'; ++ return cmdline_ptr; ++} ++#endif ++ ++#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1)) ++#define RISCV_IMM_BITS 12 ++#define RISCV_IMM_REACH (1LL << RISCV_IMM_BITS) ++#define RISCV_CONST_HIGH_PART(x) \ ++ (((x) + (RISCV_IMM_REACH >> 1)) & ~(RISCV_IMM_REACH - 1)) ++#define RISCV_CONST_LOW_PART(x) ((x) - RISCV_CONST_HIGH_PART(x)) ++ ++#define ENCODE_ITYPE_IMM(x) \ ++ (RV_X(x, 0, 12) << 20) ++#define ENCODE_BTYPE_IMM(x) \ ++ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | \ ++ (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31)) ++#define ENCODE_UTYPE_IMM(x) \ ++ (RV_X(x, 12, 20) << 12) ++#define ENCODE_JTYPE_IMM(x) \ ++ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | \ ++ (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31)) ++#define ENCODE_CBTYPE_IMM(x) \ ++ ((RV_X(x, 1, 2) << 3) | (RV_X(x, 3, 2) << 10) | (RV_X(x, 5, 1) << 2) | \ ++ (RV_X(x, 6, 2) << 5) | (RV_X(x, 8, 1) << 12)) ++#define ENCODE_CJTYPE_IMM(x) \ ++ ((RV_X(x, 1, 3) << 3) | (RV_X(x, 4, 1) << 11) | (RV_X(x, 5, 1) << 2) | \ ++ (RV_X(x, 6, 1) << 7) | (RV_X(x, 7, 1) << 6) | (RV_X(x, 8, 2) << 9) | \ ++ (RV_X(x, 10, 1) << 8) | (RV_X(x, 11, 1) << 12)) ++#define ENCODE_UJTYPE_IMM(x) \ ++ (ENCODE_UTYPE_IMM(RISCV_CONST_HIGH_PART(x)) | \ ++ (ENCODE_ITYPE_IMM(RISCV_CONST_LOW_PART(x)) << 32)) ++#define ENCODE_UITYPE_IMM(x) \ ++ (ENCODE_UTYPE_IMM(x) | (ENCODE_ITYPE_IMM(x) << 32)) ++ ++#define CLEAN_IMM(type, x) \ ++ ((~ENCODE_##type##_IMM((uint64_t)(-1))) & (x)) ++ ++int arch_kexec_apply_relocations_add(struct purgatory_info *pi, ++ Elf_Shdr *section, ++ const Elf_Shdr *relsec, ++ const Elf_Shdr *symtab) ++{ ++ const char *strtab, *name, *shstrtab; ++ const Elf_Shdr *sechdrs; ++ Elf64_Rela *relas; ++ int i, r_type; ++ ++ /* String & section header string table */ ++ sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; ++ strtab = (char *)pi->ehdr + sechdrs[symtab->sh_link].sh_offset; ++ shstrtab = (char *)pi->ehdr + sechdrs[pi->ehdr->e_shstrndx].sh_offset; ++ ++ relas = (void *)pi->ehdr + relsec->sh_offset; ++ ++ for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) { ++ const Elf_Sym *sym; /* symbol to relocate */ ++ unsigned long addr; /* final location after relocation */ ++ unsigned long val; /* relocated symbol value */ ++ unsigned long sec_base; /* relocated symbol value */ ++ void *loc; /* tmp location to modify */ ++ ++ sym = (void *)pi->ehdr + symtab->sh_offset; ++ sym += ELF64_R_SYM(relas[i].r_info); ++ ++ if (sym->st_name) ++ name = strtab + sym->st_name; ++ else ++ name = shstrtab + sechdrs[sym->st_shndx].sh_name; ++ ++ loc = pi->purgatory_buf; ++ loc += section->sh_offset; ++ loc += relas[i].r_offset; ++ ++ if (sym->st_shndx == SHN_ABS) ++ sec_base = 0; ++ else if (sym->st_shndx >= pi->ehdr->e_shnum) { ++ pr_err("Invalid section %d for symbol %s\n", ++ sym->st_shndx, name); ++ return -ENOEXEC; ++ } else ++ sec_base = pi->sechdrs[sym->st_shndx].sh_addr; ++ ++ val = sym->st_value; ++ val += sec_base; ++ val += relas[i].r_addend; ++ ++ addr = section->sh_addr + relas[i].r_offset; ++ ++ r_type = ELF64_R_TYPE(relas[i].r_info); ++ ++ switch (r_type) { ++ case R_RISCV_BRANCH: ++ *(u32 *)loc = CLEAN_IMM(BTYPE, *(u32 *)loc) | ++ ENCODE_BTYPE_IMM(val - addr); ++ break; ++ case R_RISCV_JAL: ++ *(u32 *)loc = CLEAN_IMM(JTYPE, *(u32 *)loc) | ++ ENCODE_JTYPE_IMM(val - addr); ++ break; ++ /* ++ * With no R_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_I ++ * sym is expected to be next to R_RISCV_PCREL_HI20 ++ * in purgatory relsec. Handle it like R_RISCV_CALL ++ * sym, instead of searching the whole relsec. ++ */ ++ case R_RISCV_PCREL_HI20: ++ case R_RISCV_CALL_PLT: ++ case R_RISCV_CALL: ++ *(u64 *)loc = CLEAN_IMM(UITYPE, *(u64 *)loc) | ++ ENCODE_UJTYPE_IMM(val - addr); ++ break; ++ case R_RISCV_RVC_BRANCH: ++ *(u32 *)loc = CLEAN_IMM(CBTYPE, *(u32 *)loc) | ++ ENCODE_CBTYPE_IMM(val - addr); ++ break; ++ case R_RISCV_RVC_JUMP: ++ *(u32 *)loc = CLEAN_IMM(CJTYPE, *(u32 *)loc) | ++ ENCODE_CJTYPE_IMM(val - addr); ++ break; ++ case R_RISCV_ADD16: ++ *(u16 *)loc += val; ++ break; ++ case R_RISCV_SUB16: ++ *(u16 *)loc -= val; ++ break; ++ case R_RISCV_ADD32: ++ *(u32 *)loc += val; ++ break; ++ case R_RISCV_SUB32: ++ *(u32 *)loc -= val; ++ break; ++ /* It has been applied by R_RISCV_PCREL_HI20 sym */ ++ case R_RISCV_PCREL_LO12_I: ++ case R_RISCV_ALIGN: ++ case R_RISCV_RELAX: ++ break; ++ case R_RISCV_64: ++ *(u64 *)loc = val; ++ break; ++ default: ++ pr_err("Unknown rela relocation: %d\n", r_type); ++ return -ENOEXEC; ++ } ++ } ++ return 0; ++} ++ ++ ++int load_extra_segments(struct kimage *image, unsigned long kernel_start, ++ unsigned long kernel_len, char *initrd, ++ unsigned long initrd_len, char *cmdline, ++ unsigned long cmdline_len) ++{ ++ int ret; ++ void *fdt; ++ unsigned long initrd_pbase = 0UL; ++ struct kexec_buf kbuf; ++ char *modified_cmdline = NULL; ++ ++ kbuf.image = image; ++ kbuf.buf_min = kernel_start + kernel_len; ++ kbuf.buf_max = ULONG_MAX; ++ ++#ifdef CONFIG_CRASH_DUMP ++ /* Add elfcorehdr */ ++ if (image->type == KEXEC_TYPE_CRASH) { ++ void *headers; ++ unsigned long headers_sz; ++ ret = prepare_elf_headers(&headers, &headers_sz); ++ if (ret) { ++ pr_err("Preparing elf core header failed\n"); ++ goto out; ++ } ++ ++ kbuf.buffer = headers; ++ kbuf.bufsz = headers_sz; ++ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ++ kbuf.memsz = headers_sz; ++ kbuf.buf_align = ELF_CORE_HEADER_ALIGN; ++ kbuf.top_down = true; ++ ++ ret = kexec_add_buffer(&kbuf); ++ if (ret) { ++ vfree(headers); ++ goto out; ++ } ++ image->elf_headers = headers; ++ image->elf_load_addr = kbuf.mem; ++ image->elf_headers_sz = headers_sz; ++ ++ kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ image->elf_load_addr, kbuf.bufsz, kbuf.memsz); ++ ++ /* Setup cmdline for kdump kernel case */ ++ modified_cmdline = setup_kdump_cmdline(image, cmdline, ++ cmdline_len); ++ if (!modified_cmdline) { ++ pr_err("Setting up cmdline for kdump kernel failed\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ cmdline = modified_cmdline; ++ } ++#endif ++ ++#ifdef CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY ++ /* Add purgatory to the image */ ++ kbuf.top_down = true; ++ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ++ ret = kexec_load_purgatory(image, &kbuf); ++ if (ret) { ++ pr_err("Error loading purgatory ret=%d\n", ret); ++ goto out; ++ } ++ kexec_dprintk("Loaded purgatory at 0x%lx\n", kbuf.mem); ++ ++ ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry", ++ &kernel_start, ++ sizeof(kernel_start), 0); ++ if (ret) ++ pr_err("Error update purgatory ret=%d\n", ret); ++#endif /* CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY */ ++ ++ /* Add the initrd to the image */ ++ if (initrd != NULL) { ++ kbuf.buffer = initrd; ++ kbuf.bufsz = kbuf.memsz = initrd_len; ++ kbuf.buf_align = PAGE_SIZE; ++ kbuf.top_down = true; ++ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ++ ret = kexec_add_buffer(&kbuf); ++ if (ret) ++ goto out; ++ initrd_pbase = kbuf.mem; ++ kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_pbase); ++ } ++ ++ /* Add the DTB to the image */ ++ fdt = of_kexec_alloc_and_setup_fdt(image, initrd_pbase, ++ initrd_len, cmdline, 0); ++ if (!fdt) { ++ pr_err("Error setting up the new device tree.\n"); ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ fdt_pack(fdt); ++ kbuf.buffer = fdt; ++ kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt); ++ kbuf.buf_align = PAGE_SIZE; ++ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ++ kbuf.top_down = true; ++ ret = kexec_add_buffer(&kbuf); ++ if (ret) { ++ pr_err("Error add DTB kbuf ret=%d\n", ret); ++ goto out_free_fdt; ++ } ++ /* Cache the fdt buffer address for memory cleanup */ ++ image->arch.fdt = fdt; ++ kexec_dprintk("Loaded device tree at 0x%lx\n", kbuf.mem); ++ goto out; ++ ++out_free_fdt: ++ kvfree(fdt); ++out: ++ kfree(modified_cmdline); ++ return ret; ++} diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 4d8b2819920f..b4dd9ed6849e 100644 --- a/arch/riscv/kernel/mcount.S @@ -38612,7 +42161,7 @@ index 5a62ed1da453..e66e0999a800 100644 __sbi_set_timer = __sbi_set_timer_v01; __sbi_send_ipi = __sbi_send_ipi_v01; diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c -index c2cdf812ebd0..d949fd3c0884 100644 +index eb312af5eb09..b035b651fdf2 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -26,6 +26,7 @@ @@ -38623,7 +42172,50 @@ index c2cdf812ebd0..d949fd3c0884 100644 #include #include #include -@@ -299,17 +300,22 @@ void __init setup_arch(char **cmdline_p) +@@ -296,6 +297,42 @@ static void __init parse_dtb(void) + #endif + } + ++#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) ++DEFINE_STATIC_KEY_TRUE(qspinlock_key); ++EXPORT_SYMBOL(qspinlock_key); ++#endif ++ ++static void __init riscv_spinlock_init(void) ++{ ++ char *using_ext = NULL; ++ ++ if (IS_ENABLED(CONFIG_RISCV_TICKET_SPINLOCKS)) { ++ pr_info("Ticket spinlock: enabled\n"); ++ return; ++ } ++ ++ if (IS_ENABLED(CONFIG_RISCV_ISA_ZABHA) && ++ IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) && ++ riscv_isa_extension_available(NULL, ZABHA) && ++ riscv_isa_extension_available(NULL, ZACAS)) { ++ using_ext = "using Zabha"; ++ } else if (riscv_isa_extension_available(NULL, ZICCRSE)) { ++ using_ext = "using Ziccrse"; ++ } ++#if defined(CONFIG_RISCV_COMBO_SPINLOCKS) ++ else { ++ static_branch_disable(&qspinlock_key); ++ pr_info("Ticket spinlock: enabled\n"); ++ return; ++ } ++#endif ++ ++ if (!using_ext) ++ pr_err("Queued spinlock without Zabha or Ziccrse"); ++ else ++ pr_info("Queued spinlock %s: enabled\n", using_ext); ++} ++ + extern void __init init_rt_signal_env(void); + + void __init setup_arch(char **cmdline_p) +@@ -333,17 +370,23 @@ void __init setup_arch(char **cmdline_p) setup_smp(); #endif @@ -38644,6 +42236,7 @@ index c2cdf812ebd0..d949fd3c0884 100644 riscv_set_dma_cache_alignment(); + + riscv_user_isa_enable(); ++ riscv_spinlock_init(); } bool arch_cpu_is_hotpluggable(int cpu) @@ -38774,6 +42367,376 @@ index 1b8da4e40a4d..3f37eec7a790 100644 cpu_count++; return 0; +diff --git a/arch/riscv/kernel/sse.c b/arch/riscv/kernel/sse.c +new file mode 100644 +index 000000000000..d2da7e23a74a +--- /dev/null ++++ b/arch/riscv/kernel/sse.c +@@ -0,0 +1,154 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later ++/* ++ * Copyright (C) 2024 Rivos Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++DEFINE_PER_CPU(struct task_struct *, __sse_entry_task); ++ ++void __weak sse_handle_event(struct sse_event_arch_data *arch_evt, struct pt_regs *regs) ++{ ++} ++ ++void do_sse(struct sse_event_arch_data *arch_evt, struct pt_regs *regs) ++{ ++ nmi_enter(); ++ ++ /* Retrieve missing GPRs from SBI */ ++ sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_READ, arch_evt->evt_id, ++ SBI_SSE_ATTR_INTERRUPTED_A6, ++ (SBI_SSE_ATTR_INTERRUPTED_A7 - SBI_SSE_ATTR_INTERRUPTED_A6) + 1, ++ arch_evt->interrupted_phys, 0, 0); ++ ++ memcpy(®s->a6, &arch_evt->interrupted, sizeof(arch_evt->interrupted)); ++ ++ sse_handle_event(arch_evt, regs); ++ ++ /* ++ * The SSE delivery path does not uses the "standard" exception path ++ * (see sse_entry.S) and does not process any pending signal/softirqs ++ * due to being similar to a NMI. ++ * Some drivers (PMU, RAS) enqueue pending work that needs to be handled ++ * as soon as possible by bottom halves. For that purpose, set the SIP ++ * software interrupt pending bit which will force a software interrupt ++ * to be serviced once interrupts are reenabled in the interrupted ++ * context if they were masked or directly if unmasked. ++ */ ++ csr_set(CSR_IP, IE_SIE); ++ ++ nmi_exit(); ++} ++ ++static void *alloc_to_stack_pointer(void *alloc) ++{ ++ return alloc ? alloc + SSE_STACK_SIZE : NULL; ++} ++ ++static void *stack_pointer_to_alloc(void *stack) ++{ ++ return stack - SSE_STACK_SIZE; ++} ++ ++#ifdef CONFIG_VMAP_STACK ++static void *sse_stack_alloc(unsigned int cpu) ++{ ++ void *stack = arch_alloc_vmap_stack(SSE_STACK_SIZE, cpu_to_node(cpu)); ++ ++ return alloc_to_stack_pointer(stack); ++} ++ ++static void sse_stack_free(void *stack) ++{ ++ vfree(stack_pointer_to_alloc(stack)); ++} ++#else /* CONFIG_VMAP_STACK */ ++static void *sse_stack_alloc(unsigned int cpu) ++{ ++ void *stack = kmalloc(SSE_STACK_SIZE, GFP_KERNEL); ++ ++ return alloc_to_stack_pointer(stack); ++} ++ ++static void sse_stack_free(void *stack) ++{ ++ kfree(stack_pointer_to_alloc(stack)); ++} ++#endif /* CONFIG_VMAP_STACK */ ++ ++static int sse_init_scs(int cpu, struct sse_event_arch_data *arch_evt) ++{ ++ void *stack; ++ ++ if (!scs_is_enabled()) ++ return 0; ++ ++ stack = scs_alloc(cpu_to_node(cpu)); ++ if (!stack) ++ return -ENOMEM; ++ ++ arch_evt->shadow_stack = stack; ++ ++ return 0; ++} ++ ++void arch_sse_event_update_cpu(struct sse_event_arch_data *arch_evt, int cpu) ++{ ++ arch_evt->cpu_id = cpu; ++ arch_evt->hart_id = cpuid_to_hartid_map(cpu); ++} ++ ++int arch_sse_init_event(struct sse_event_arch_data *arch_evt, u32 evt_id, int cpu) ++{ ++ void *stack; ++ ++ arch_evt->evt_id = evt_id; ++ stack = sse_stack_alloc(cpu); ++ if (!stack) ++ return -ENOMEM; ++ ++ arch_evt->stack = stack; ++ ++ if (sse_init_scs(cpu, arch_evt)) { ++ sse_stack_free(arch_evt->stack); ++ return -ENOMEM; ++ } ++ ++ if (sse_event_is_global(evt_id)) { ++ arch_evt->interrupted_phys = ++ virt_to_phys(&arch_evt->interrupted); ++ } else { ++ arch_evt->interrupted_phys = ++ per_cpu_ptr_to_phys(&arch_evt->interrupted); ++ } ++ ++ arch_sse_event_update_cpu(arch_evt, cpu); ++ ++ return 0; ++} ++ ++void arch_sse_free_event(struct sse_event_arch_data *arch_evt) ++{ ++ scs_free(arch_evt->shadow_stack); ++ sse_stack_free(arch_evt->stack); ++} ++ ++int arch_sse_register_event(struct sse_event_arch_data *arch_evt) ++{ ++ struct sbiret sret; ++ ++ sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_REGISTER, arch_evt->evt_id, ++ (unsigned long)handle_sse, (unsigned long)arch_evt, 0, ++ 0, 0); ++ ++ return sbi_err_map_linux_errno(sret.error); ++} +diff --git a/arch/riscv/kernel/sse_entry.S b/arch/riscv/kernel/sse_entry.S +new file mode 100644 +index 000000000000..3837a22b6e4f +--- /dev/null ++++ b/arch/riscv/kernel/sse_entry.S +@@ -0,0 +1,204 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2024 Rivos Inc. ++ */ ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++/* When entering handle_sse, the following registers are set: ++ * a6: contains the hartid ++ * a7: contains a sse_event_arch_data struct pointer ++ */ ++ ++#ifdef CONFIG_SHADOW_CALL_STACK ++/* gp is used as the shadow call stack pointer instead */ ++.macro load_global_pointer ++.endm ++ ++/* Load the per-CPU SSE shadow call stack to gp. */ ++.macro scs_load_sse_stack reg_evt ++ REG_L gp, SSE_REG_EVT_SHADOW_STACK(\reg_evt) ++.endm ++#else ++/* load __global_pointer to gp */ ++.macro load_global_pointer ++.option push ++.option norelax ++ la gp, __global_pointer$ ++.option pop ++.endm ++ ++/* Don't load shadow stack if SCS is disabled */ ++.macro scs_load_sse_stack reg_evt ++.endm ++#endif /* CONFIG_SHADOW_CALL_STACK */ ++ ++SYM_CODE_START(handle_sse) ++ /* Save stack temporarily */ ++ REG_S sp, SSE_REG_EVT_TMP(a7) ++ /* Set entry stack */ ++ REG_L sp, SSE_REG_EVT_STACK(a7) ++ ++ addi sp, sp, -(PT_SIZE_ON_STACK) ++ REG_S ra, PT_RA(sp) ++ REG_S s0, PT_S0(sp) ++ REG_S s1, PT_S1(sp) ++ REG_S s2, PT_S2(sp) ++ REG_S s3, PT_S3(sp) ++ REG_S s4, PT_S4(sp) ++ REG_S s5, PT_S5(sp) ++ REG_S s6, PT_S6(sp) ++ REG_S s7, PT_S7(sp) ++ REG_S s8, PT_S8(sp) ++ REG_S s9, PT_S9(sp) ++ REG_S s10, PT_S10(sp) ++ REG_S s11, PT_S11(sp) ++ REG_S tp, PT_TP(sp) ++ REG_S t0, PT_T0(sp) ++ REG_S t1, PT_T1(sp) ++ REG_S t2, PT_T2(sp) ++ REG_S t3, PT_T3(sp) ++ REG_S t4, PT_T4(sp) ++ REG_S t5, PT_T5(sp) ++ REG_S t6, PT_T6(sp) ++ REG_S gp, PT_GP(sp) ++ REG_S a0, PT_A0(sp) ++ REG_S a1, PT_A1(sp) ++ REG_S a2, PT_A2(sp) ++ REG_S a3, PT_A3(sp) ++ REG_S a4, PT_A4(sp) ++ REG_S a5, PT_A5(sp) ++ ++ /* Retrieve entry sp */ ++ REG_L a4, SSE_REG_EVT_TMP(a7) ++ /* Save CSRs */ ++ csrr a0, CSR_EPC ++ csrr a1, CSR_SSTATUS ++ csrr a2, CSR_STVAL ++ csrr a3, CSR_SCAUSE ++ ++ REG_S a0, PT_EPC(sp) ++ REG_S a1, PT_STATUS(sp) ++ REG_S a2, PT_BADADDR(sp) ++ REG_S a3, PT_CAUSE(sp) ++ REG_S a4, PT_SP(sp) ++ ++ /* Disable user memory access and floating/vector computing */ ++ li t0, SR_SUM | SR_FS_VS ++ csrc CSR_STATUS, t0 ++ ++ load_global_pointer ++ scs_load_sse_stack a7 ++ ++ /* Restore current task struct from __sse_entry_task */ ++ li t1, ASM_NR_CPUS ++ mv t3, zero ++ ++#ifdef CONFIG_SMP ++ REG_L t4, SSE_REG_HART_ID(a7) ++ REG_L t3, SSE_REG_CPU_ID(a7) ++ ++ bne t4, a6, .Lfind_hart_id_slowpath ++ ++.Lcpu_id_found: ++#endif ++ asm_per_cpu_with_cpu t2 __sse_entry_task t1 t3 ++ REG_L tp, 0(t2) ++ ++ mv a1, sp /* pt_regs on stack */ ++ ++ /* ++ * Save sscratch for restoration since we might have interrupted the ++ * kernel in early exception path and thus, we don't know the content of ++ * sscratch. ++ */ ++ csrr s4, CSR_SSCRATCH ++ /* In-kernel scratch is 0 */ ++ csrw CSR_SCRATCH, x0 ++ ++ mv a0, a7 ++ ++ call do_sse ++ ++ csrw CSR_SSCRATCH, s4 ++ ++ REG_L a0, PT_STATUS(sp) ++ REG_L a1, PT_EPC(sp) ++ REG_L a2, PT_BADADDR(sp) ++ REG_L a3, PT_CAUSE(sp) ++ csrw CSR_SSTATUS, a0 ++ csrw CSR_EPC, a1 ++ csrw CSR_STVAL, a2 ++ csrw CSR_SCAUSE, a3 ++ ++ REG_L ra, PT_RA(sp) ++ REG_L s0, PT_S0(sp) ++ REG_L s1, PT_S1(sp) ++ REG_L s2, PT_S2(sp) ++ REG_L s3, PT_S3(sp) ++ REG_L s4, PT_S4(sp) ++ REG_L s5, PT_S5(sp) ++ REG_L s6, PT_S6(sp) ++ REG_L s7, PT_S7(sp) ++ REG_L s8, PT_S8(sp) ++ REG_L s9, PT_S9(sp) ++ REG_L s10, PT_S10(sp) ++ REG_L s11, PT_S11(sp) ++ REG_L tp, PT_TP(sp) ++ REG_L t0, PT_T0(sp) ++ REG_L t1, PT_T1(sp) ++ REG_L t2, PT_T2(sp) ++ REG_L t3, PT_T3(sp) ++ REG_L t4, PT_T4(sp) ++ REG_L t5, PT_T5(sp) ++ REG_L t6, PT_T6(sp) ++ REG_L gp, PT_GP(sp) ++ REG_L a0, PT_A0(sp) ++ REG_L a1, PT_A1(sp) ++ REG_L a2, PT_A2(sp) ++ REG_L a3, PT_A3(sp) ++ REG_L a4, PT_A4(sp) ++ REG_L a5, PT_A5(sp) ++ ++ REG_L sp, PT_SP(sp) ++ ++ li a7, SBI_EXT_SSE ++ li a6, SBI_SSE_EVENT_COMPLETE ++ ecall ++ ++#ifdef CONFIG_SMP ++.Lfind_hart_id_slowpath: ++ ++/* Slowpath to find the CPU id associated to the hart id */ ++la t0, __cpuid_to_hartid_map ++ ++.Lhart_id_loop: ++ REG_L t2, 0(t0) ++ beq t2, a6, .Lcpu_id_found ++ ++ /* Increment pointer and CPU number */ ++ addi t3, t3, 1 ++ addi t0, t0, RISCV_SZPTR ++ bltu t3, t1, .Lhart_id_loop ++ ++ /* ++ * This should never happen since we expect the hart_id to match one ++ * of our CPU, but better be safe than sorry ++ */ ++ la tp, init_task ++ la a0, sse_hart_id_panic_string ++ la t0, panic ++ jalr t0 ++ ++#endif ++ ++SYM_CODE_END(handle_sse) ++ ++SYM_DATA_START_LOCAL(sse_hart_id_panic_string) ++ .ascii "Unable to match hart_id with cpu\0" ++SYM_DATA_END(sse_hart_id_panic_string) diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c index 3c89b8ec69c4..9a8a0dc035b2 100644 --- a/arch/riscv/kernel/suspend.c @@ -38904,10 +42867,10 @@ index 3c89b8ec69c4..9a8a0dc035b2 100644 +#endif /* CONFIG_RISCV_SBI */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c new file mode 100644 -index 000000000000..052a41f53dc2 +index 000000000000..ed452a3cab03 --- /dev/null +++ b/arch/riscv/kernel/sys_hwprobe.c -@@ -0,0 +1,349 @@ +@@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The hwprobe interface, for allowing userspace to probe to see which features @@ -39004,7 +42967,9 @@ index 000000000000..052a41f53dc2 + * regardless of the kernel's configuration, as no other checks, besides + * presence in the hart_isa bitmap, are made. + */ ++ EXT_KEY(ZAAMO); + EXT_KEY(ZACAS); ++ EXT_KEY(ZALRSC); + EXT_KEY(ZAWRS); + EXT_KEY(ZBA); + EXT_KEY(ZBB); @@ -39016,10 +42981,13 @@ index 000000000000..052a41f53dc2 + EXT_KEY(ZCA); + EXT_KEY(ZCB); + EXT_KEY(ZCMOP); ++ EXT_KEY(ZICBOM); + EXT_KEY(ZICBOZ); ++ EXT_KEY(ZICNTR); + EXT_KEY(ZICOND); + EXT_KEY(ZIHINTNTL); + EXT_KEY(ZIHINTPAUSE); ++ EXT_KEY(ZIHPM); + EXT_KEY(ZIMOP); + EXT_KEY(ZKND); + EXT_KEY(ZKNE); @@ -39037,6 +43005,8 @@ index 000000000000..052a41f53dc2 + EXT_KEY(ZVE64D); + EXT_KEY(ZVE64F); + EXT_KEY(ZVE64X); ++ EXT_KEY(ZVFBFMIN); ++ EXT_KEY(ZVFBFWMA); + EXT_KEY(ZVFH); + EXT_KEY(ZVFHMIN); + EXT_KEY(ZVKB); @@ -39053,6 +43023,7 @@ index 000000000000..052a41f53dc2 + EXT_KEY(ZCD); + EXT_KEY(ZCF); + EXT_KEY(ZFA); ++ EXT_KEY(ZFBFMIN); + EXT_KEY(ZFH); + EXT_KEY(ZFHMIN); + } @@ -39066,7 +43037,7 @@ index 000000000000..052a41f53dc2 + pair->value &= ~missing; +} + -+static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) ++static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext) +{ + struct riscv_hwprobe pair; + @@ -39129,6 +43100,11 @@ index 000000000000..052a41f53dc2 + if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) + pair->value = riscv_cboz_block_size; + break; ++ case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE: ++ pair->value = 0; ++ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOM)) ++ pair->value = riscv_cbom_block_size; ++ break; + case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS: + pair->value = user_max_virt_addr(); + break; @@ -39896,14 +43872,26 @@ index 000000000000..4d8dfc974f00 + .ext_data = riscv_isa_vendor_ext_andes, +}; diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig -index dfc237d7875b..148e52b516cf 100644 +index dfc237d7875b..83798ee0065f 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig -@@ -32,6 +32,7 @@ config KVM +@@ -23,15 +23,19 @@ config KVM + select HAVE_KVM_EVENTFD + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD ++ select HAVE_KVM_IRQ_BYPASS + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_MSI + select HAVE_KVM_VCPU_ASYNC_IOCTL + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_GENERIC_HARDWARE_ENABLING + select KVM_MMIO ++ select KVM_VFIO select KVM_XFER_TO_GUEST_WORK select MMU_NOTIFIER select PREEMPT_NOTIFIERS + select SCHED_INFO ++ select SRCU help Support hosting virtualized guest machines. @@ -40039,24 +44027,194 @@ index 5cd407c6a8e4..39cd26af5a69 100644 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx) { diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c -index c1585444f856..a8085cd8215e 100644 +index c1585444f856..c39ccd7583ee 100644 --- a/arch/riscv/kvm/aia_imsic.c +++ b/arch/riscv/kvm/aia_imsic.c -@@ -9,13 +9,13 @@ +@@ -9,13 +9,15 @@ #include #include +#include #include ++#include #include #include #include #include #include -#include ++#include #define IMSIC_MAX_EIX (IMSIC_MAX_ID / BITS_PER_TYPE(u64)) +@@ -676,6 +678,14 @@ static void imsic_swfile_update(struct kvm_vcpu *vcpu, + imsic_swfile_extirq_update(vcpu); + } + ++static u64 kvm_riscv_aia_msi_addr_mask(struct kvm_aia *aia) ++{ ++ u64 group_mask = BIT(aia->nr_group_bits) - 1; ++ ++ return (group_mask << (aia->nr_group_shift - IMSIC_MMIO_PAGE_SHIFT)) | ++ (BIT(aia->nr_hart_bits + aia->nr_guest_bits) - 1); ++} ++ + void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu) + { + unsigned long flags; +@@ -727,6 +737,132 @@ void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu) + kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei); + } + ++void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, ++ struct kvm_kernel_irq_routing_entry *old, ++ struct kvm_kernel_irq_routing_entry *new) ++{ ++ struct riscv_iommu_ir_vcpu_info vcpu_info; ++ struct kvm *kvm = irqfd->kvm; ++ struct kvm_aia *aia = &kvm->arch.aia; ++ int host_irq = irqfd->producer->irq; ++ struct irq_data *irqdata = irq_get_irq_data(host_irq); ++ unsigned long tmp, flags; ++ struct kvm_vcpu *vcpu; ++ struct imsic *imsic; ++ struct msi_msg msg; ++ u64 msi_addr_mask; ++ gpa_t target; ++ int ret; ++ ++ if (old && old->type == KVM_IRQ_ROUTING_MSI && ++ new && new->type == KVM_IRQ_ROUTING_MSI && ++ !memcmp(&old->msi, &new->msi, sizeof(new->msi))) ++ return; ++ ++ if (!new) { ++ if (!WARN_ON_ONCE(!old) && old->type == KVM_IRQ_ROUTING_MSI) { ++ ret = irq_set_vcpu_affinity(host_irq, NULL); ++ WARN_ON_ONCE(ret && ret != -EOPNOTSUPP); ++ } ++ return; ++ } ++ ++ if (new->type != KVM_IRQ_ROUTING_MSI) ++ return; ++ ++ target = ((gpa_t)new->msi.address_hi << 32) | new->msi.address_lo; ++ if (WARN_ON_ONCE(target & (IMSIC_MMIO_PAGE_SZ - 1))) ++ return; ++ ++ msg = (struct msi_msg){ ++ .address_hi = new->msi.address_hi, ++ .address_lo = new->msi.address_lo, ++ .data = new->msi.data, ++ }; ++ ++ kvm_for_each_vcpu(tmp, vcpu, kvm) { ++ if (target == vcpu->arch.aia_context.imsic_addr) ++ break; ++ } ++ if (!vcpu) ++ return; ++ ++ msi_addr_mask = kvm_riscv_aia_msi_addr_mask(aia); ++ vcpu_info = (struct riscv_iommu_ir_vcpu_info){ ++ .gpa = target, ++ .msi_addr_mask = msi_addr_mask, ++ .msi_addr_pattern = (target >> IMSIC_MMIO_PAGE_SHIFT) & ~msi_addr_mask, ++ .group_index_bits = aia->nr_group_bits, ++ .group_index_shift = aia->nr_group_shift, ++ }; ++ ++ imsic = vcpu->arch.aia_context.imsic_state; ++ ++ read_lock_irqsave(&imsic->vsfile_lock, flags); ++ ++ if (WARN_ON_ONCE(imsic->vsfile_cpu < 0)) ++ goto out; ++ ++ vcpu_info.hpa = imsic->vsfile_pa; ++ ++ ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); ++ WARN_ON_ONCE(ret && ret != -EOPNOTSUPP); ++ if (ret) ++ goto out; ++ ++ irq_data_get_irq_chip(irqdata)->irq_write_msi_msg(irqdata, &msg); ++ ++out: ++ read_unlock_irqrestore(&imsic->vsfile_lock, flags); ++} ++ ++static void kvm_riscv_vcpu_irq_update(struct kvm_vcpu *vcpu) ++{ ++ struct kvm *kvm = vcpu->kvm; ++ struct imsic *imsic = vcpu->arch.aia_context.imsic_state; ++ gpa_t gpa = vcpu->arch.aia_context.imsic_addr; ++ struct kvm_aia *aia = &kvm->arch.aia; ++ u64 msi_addr_mask = kvm_riscv_aia_msi_addr_mask(aia); ++ struct riscv_iommu_ir_vcpu_info vcpu_info = { ++ .gpa = gpa, ++ .hpa = imsic->vsfile_pa, ++ .msi_addr_mask = msi_addr_mask, ++ .msi_addr_pattern = (gpa >> IMSIC_MMIO_PAGE_SHIFT) & ~msi_addr_mask, ++ .group_index_bits = aia->nr_group_bits, ++ .group_index_shift = aia->nr_group_shift, ++ }; ++ struct kvm_kernel_irq_routing_entry *irq_entry; ++ struct kvm_kernel_irqfd *irqfd; ++ gpa_t target; ++ int host_irq, ret; ++ ++ spin_lock_irq(&kvm->irqfds.lock); ++ ++ list_for_each_entry(irqfd, &kvm->irqfds.items, list) { ++ if (!irqfd->producer) ++ continue; ++ ++ irq_entry = &irqfd->irq_entry; ++ if (irq_entry->type != KVM_IRQ_ROUTING_MSI) ++ continue; ++ ++ target = ((gpa_t)irq_entry->msi.address_hi << 32) | irq_entry->msi.address_lo; ++ if (WARN_ON_ONCE(target & (IMSIC_MMIO_PAGE_SZ - 1))) ++ continue; ++ ++ if (target != gpa) ++ continue; ++ ++ host_irq = irqfd->producer->irq; ++ ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); ++ WARN_ON_ONCE(ret && ret != -EOPNOTSUPP); ++ if (ret == -EOPNOTSUPP) ++ break; ++ } ++ ++ spin_unlock_irq(&kvm->irqfds.lock); ++} ++ + int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu) + { + unsigned long flags; +@@ -791,14 +927,17 @@ int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu) + if (ret) + goto fail_free_vsfile_hgei; + +- /* TODO: Update the IOMMU mapping ??? */ +- + /* Update new IMSIC VS-file details in IMSIC context */ + write_lock_irqsave(&imsic->vsfile_lock, flags); ++ + imsic->vsfile_hgei = new_vsfile_hgei; + imsic->vsfile_cpu = vcpu->cpu; + imsic->vsfile_va = new_vsfile_va; + imsic->vsfile_pa = new_vsfile_pa; ++ ++ /* Update the IOMMU mapping */ ++ kvm_riscv_vcpu_irq_update(vcpu); ++ + write_unlock_irqrestore(&imsic->vsfile_lock, flags); + + /* diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 48ae0d4b3932..225a435d9c9a 100644 --- a/arch/riscv/kvm/main.c @@ -40083,11 +44241,120 @@ index 44bc324aeeb0..23c0e82b5103 100644 #include #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) +diff --git a/arch/riscv/kvm/trace.h b/arch/riscv/kvm/trace.h +new file mode 100644 +index 000000000000..3d54175d805c +--- /dev/null ++++ b/arch/riscv/kvm/trace.h +@@ -0,0 +1,67 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Tracepoints for RISC-V KVM ++ * ++ * Copyright 2024 Beijing ESWIN Computing Technology Co., Ltd. ++ * ++ */ ++#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_KVM_H ++ ++#include ++ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM kvm ++ ++TRACE_EVENT(kvm_entry, ++ TP_PROTO(struct kvm_vcpu *vcpu), ++ TP_ARGS(vcpu), ++ ++ TP_STRUCT__entry( ++ __field(unsigned long, pc) ++ ), ++ ++ TP_fast_assign( ++ __entry->pc = vcpu->arch.guest_context.sepc; ++ ), ++ ++ TP_printk("PC: 0x016%lx", __entry->pc) ++); ++ ++TRACE_EVENT(kvm_exit, ++ TP_PROTO(struct kvm_cpu_trap *trap), ++ TP_ARGS(trap), ++ ++ TP_STRUCT__entry( ++ __field(unsigned long, sepc) ++ __field(unsigned long, scause) ++ __field(unsigned long, stval) ++ __field(unsigned long, htval) ++ __field(unsigned long, htinst) ++ ), ++ ++ TP_fast_assign( ++ __entry->sepc = trap->sepc; ++ __entry->scause = trap->scause; ++ __entry->stval = trap->stval; ++ __entry->htval = trap->htval; ++ __entry->htinst = trap->htinst; ++ ), ++ ++ TP_printk("SEPC:0x%lx, SCAUSE:0x%lx, STVAL:0x%lx, HTVAL:0x%lx, HTINST:0x%lx", ++ __entry->sepc, ++ __entry->scause, ++ __entry->stval, ++ __entry->htval, ++ __entry->htinst) ++); ++ ++#endif /* _TRACE_RSICV_KVM_H */ ++ ++#undef TRACE_INCLUDE_PATH ++#define TRACE_INCLUDE_PATH . ++#undef TRACE_INCLUDE_FILE ++#define TRACE_INCLUDE_FILE trace ++ ++/* This part must be outside protection */ ++#include diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c -index 9584d62c96ee..ea0b599608ef 100644 +index 9584d62c96ee..c1267fb71ff8 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c -@@ -83,6 +83,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) +@@ -21,16 +21,25 @@ + #include + #include + ++#define CREATE_TRACE_POINTS ++#include "trace.h" ++ + const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, ecall_exit_stat), + STATS_DESC_COUNTER(VCPU, wfi_exit_stat), ++ STATS_DESC_COUNTER(VCPU, wrs_exit_stat), + STATS_DESC_COUNTER(VCPU, mmio_exit_user), + STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), + STATS_DESC_COUNTER(VCPU, csr_exit_user), + STATS_DESC_COUNTER(VCPU, csr_exit_kernel), + STATS_DESC_COUNTER(VCPU, signal_exits), +- STATS_DESC_COUNTER(VCPU, exits) ++ STATS_DESC_COUNTER(VCPU, exits), ++ STATS_DESC_COUNTER(VCPU, instr_illegal_exits), ++ STATS_DESC_COUNTER(VCPU, load_misaligned_exits), ++ STATS_DESC_COUNTER(VCPU, store_misaligned_exits), ++ STATS_DESC_COUNTER(VCPU, load_access_exits), ++ STATS_DESC_COUNTER(VCPU, store_access_exits), + }; + + const struct kvm_stats_header kvm_vcpu_stats_header = { +@@ -66,6 +75,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) + + memcpy(cntx, reset_cntx, sizeof(*cntx)); + ++ memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr)); ++ + kvm_riscv_vcpu_fp_reset(vcpu); + + kvm_riscv_vcpu_vector_reset(vcpu); +@@ -83,6 +94,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.hfence_tail = 0; memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); @@ -40096,7 +44363,7 @@ index 9584d62c96ee..ea0b599608ef 100644 /* Reset the guest CSRs for hotplug usecase */ if (loaded) kvm_arch_vcpu_load(vcpu, smp_processor_id()); -@@ -143,6 +145,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +@@ -143,6 +156,12 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (rc) return rc; @@ -40109,7 +44376,78 @@ index 9584d62c96ee..ea0b599608ef 100644 /* Reset VCPU */ kvm_riscv_reset_vcpu(vcpu); -@@ -544,6 +552,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +@@ -493,31 +512,42 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + return -EINVAL; + } + +-static void kvm_riscv_vcpu_update_config(const unsigned long *isa) ++static void kvm_riscv_vcpu_setup_config(struct kvm_vcpu *vcpu) + { +- u64 henvcfg = 0; ++ const unsigned long *isa = vcpu->arch.isa; ++ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + if (riscv_isa_extension_available(isa, SVPBMT)) +- henvcfg |= ENVCFG_PBMTE; ++ cfg->henvcfg |= ENVCFG_PBMTE; + + if (riscv_isa_extension_available(isa, SSTC)) +- henvcfg |= ENVCFG_STCE; ++ cfg->henvcfg |= ENVCFG_STCE; + + if (riscv_isa_extension_available(isa, ZICBOM)) +- henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); ++ cfg->henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE); + + if (riscv_isa_extension_available(isa, ZICBOZ)) +- henvcfg |= ENVCFG_CBZE; +- +- csr_write(CSR_HENVCFG, henvcfg); +-#ifdef CONFIG_32BIT +- csr_write(CSR_HENVCFGH, henvcfg >> 32); +-#endif ++ cfg->henvcfg |= ENVCFG_CBZE; ++ ++ if (riscv_isa_extension_available(isa, SVADU) && ++ !riscv_isa_extension_available(isa, SVADE)) ++ cfg->henvcfg |= ENVCFG_ADUE; ++ ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { ++ cfg->hstateen0 |= SMSTATEEN0_HSENVCFG; ++ if (riscv_isa_extension_available(isa, SSAIA)) ++ cfg->hstateen0 |= SMSTATEEN0_AIA_IMSIC | ++ SMSTATEEN0_AIA | ++ SMSTATEEN0_AIA_ISEL; ++ if (riscv_isa_extension_available(isa, SMSTATEEN)) ++ cfg->hstateen0 |= SMSTATEEN0_SSTATEEN0; ++ } + } + + void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + { + struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; ++ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; + + csr_write(CSR_VSSTATUS, csr->vsstatus); + csr_write(CSR_VSIE, csr->vsie); +@@ -528,8 +558,14 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) + csr_write(CSR_VSTVAL, csr->vstval); + csr_write(CSR_HVIP, csr->hvip); + csr_write(CSR_VSATP, csr->vsatp); +- +- kvm_riscv_vcpu_update_config(vcpu->arch.isa); ++ csr_write(CSR_HENVCFG, cfg->henvcfg); ++ if (IS_ENABLED(CONFIG_32BIT)) ++ csr_write(CSR_HENVCFGH, cfg->henvcfg >> 32); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) { ++ csr_write(CSR_HSTATEEN0, cfg->hstateen0); ++ if (IS_ENABLED(CONFIG_32BIT)) ++ csr_write(CSR_HSTATEEN0H, cfg->hstateen0 >> 32); ++ } + + kvm_riscv_gstage_update_hgatp(vcpu); + +@@ -544,6 +580,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_vcpu_aia_load(vcpu, cpu); @@ -40118,7 +44456,7 @@ index 9584d62c96ee..ea0b599608ef 100644 vcpu->cpu = cpu; } -@@ -617,6 +627,9 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) +@@ -617,6 +655,9 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) kvm_riscv_hfence_process(vcpu); @@ -40128,6 +44466,139 @@ index 9584d62c96ee..ea0b599608ef 100644 } } +@@ -628,6 +669,32 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) + kvm_riscv_vcpu_aia_update_hvip(vcpu); + } + ++static __always_inline void kvm_riscv_vcpu_swap_in_guest_state(struct kvm_vcpu *vcpu) ++{ ++ struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; ++ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; ++ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; ++ ++ vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && ++ (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) ++ vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, ++ smcsr->sstateen0); ++} ++ ++static __always_inline void kvm_riscv_vcpu_swap_in_host_state(struct kvm_vcpu *vcpu) ++{ ++ struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; ++ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; ++ struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; ++ ++ csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN) && ++ (cfg->hstateen0 & SMSTATEEN0_SSTATEEN0)) ++ smcsr->sstateen0 = csr_swap(CSR_SSTATEEN0, ++ vcpu->arch.host_sstateen0); ++} ++ + /* + * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while + * the vCPU is running. +@@ -637,10 +704,12 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) + */ + static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) + { ++ kvm_riscv_vcpu_swap_in_guest_state(vcpu); + guest_state_enter_irqoff(); + __kvm_riscv_switch_to(&vcpu->arch); + vcpu->arch.last_exit_cpu = vcpu->cpu; + guest_state_exit_irqoff(); ++ kvm_riscv_vcpu_swap_in_host_state(vcpu); + } + + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +@@ -649,6 +718,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) + struct kvm_cpu_trap trap; + struct kvm_run *run = vcpu->run; + ++ if (!vcpu->arch.ran_atleast_once) ++ kvm_riscv_vcpu_setup_config(vcpu); ++ + /* Mark this VCPU ran at least once */ + vcpu->arch.ran_atleast_once = true; + +@@ -748,6 +820,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) + */ + kvm_riscv_local_tlb_sanitize(vcpu); + ++ trace_kvm_entry(vcpu); ++ + guest_timing_enter_irqoff(); + + kvm_riscv_vcpu_enter_exit(vcpu); +@@ -786,6 +860,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) + + local_irq_enable(); + ++ trace_kvm_exit(&trap); ++ + preempt_enable(); + + kvm_vcpu_srcu_read_lock(vcpu); +diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c +index 2415722c01b8..e992b2870cc0 100644 +--- a/arch/riscv/kvm/vcpu_exit.c ++++ b/arch/riscv/kvm/vcpu_exit.c +@@ -165,6 +165,17 @@ void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, + vcpu->arch.guest_context.sstatus |= SR_SPP; + } + ++static inline int vcpu_redirect(struct kvm_vcpu *vcpu, struct kvm_cpu_trap *trap) ++{ ++ int ret = -EFAULT; ++ ++ if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { ++ kvm_riscv_vcpu_trap_redirect(vcpu, trap); ++ ret = 1; ++ } ++ return ret; ++} ++ + /* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. +@@ -183,12 +194,32 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + run->exit_reason = KVM_EXIT_UNKNOWN; + switch (trap->scause) { + case EXC_INST_ILLEGAL: ++ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ILLEGAL_INSN); ++ vcpu->stat.instr_illegal_exits++; ++ ret = vcpu_redirect(vcpu, trap); ++ break; + case EXC_LOAD_MISALIGNED: ++ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_LOAD); ++ vcpu->stat.load_misaligned_exits++; ++ ret = vcpu_redirect(vcpu, trap); ++ break; + case EXC_STORE_MISALIGNED: +- if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) { +- kvm_riscv_vcpu_trap_redirect(vcpu, trap); +- ret = 1; +- } ++ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_MISALIGNED_STORE); ++ vcpu->stat.store_misaligned_exits++; ++ ret = vcpu_redirect(vcpu, trap); ++ break; ++ case EXC_LOAD_ACCESS: ++ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_LOAD); ++ vcpu->stat.load_access_exits++; ++ ret = vcpu_redirect(vcpu, trap); ++ break; ++ case EXC_STORE_ACCESS: ++ kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_ACCESS_STORE); ++ vcpu->stat.store_access_exits++; ++ ret = vcpu_redirect(vcpu, trap); ++ break; ++ case EXC_INST_ACCESS: ++ ret = vcpu_redirect(vcpu, trap); + break; + case EXC_VIRTUAL_INST_FAULT: + if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV) diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c index 08ba48a395aa..030904d82b58 100644 --- a/arch/riscv/kvm/vcpu_fp.c @@ -40141,26 +44612,363 @@ index 08ba48a395aa..030904d82b58 100644 #ifdef CONFIG_FPU void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c +index 7a6abed41bc1..97dec18e6989 100644 +--- a/arch/riscv/kvm/vcpu_insn.c ++++ b/arch/riscv/kvm/vcpu_insn.c +@@ -7,6 +7,8 @@ + #include + #include + ++#include ++ + #define INSN_OPCODE_MASK 0x007c + #define INSN_OPCODE_SHIFT 2 + #define INSN_OPCODE_SYSTEM 28 +@@ -14,6 +16,9 @@ + #define INSN_MASK_WFI 0xffffffff + #define INSN_MATCH_WFI 0x10500073 + ++#define INSN_MASK_WRS 0xffffffff ++#define INSN_MATCH_WRS 0x00d00073 ++ + #define INSN_MATCH_CSRRW 0x1073 + #define INSN_MASK_CSRRW 0x707f + #define INSN_MATCH_CSRRS 0x2073 +@@ -201,6 +206,13 @@ static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) + return KVM_INSN_CONTINUE_NEXT_SEPC; + } + ++static int wrs_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) ++{ ++ vcpu->stat.wrs_exit_stat++; ++ kvm_vcpu_on_spin(vcpu, vcpu->arch.guest_context.sstatus & SR_SPP); ++ return KVM_INSN_CONTINUE_NEXT_SEPC; ++} ++ + struct csr_func { + unsigned int base; + unsigned int count; +@@ -213,9 +225,20 @@ struct csr_func { + unsigned long wr_mask); + }; + ++static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num, ++ unsigned long *val, unsigned long new_val, ++ unsigned long wr_mask) ++{ ++ if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR)) ++ return KVM_INSN_ILLEGAL_TRAP; ++ ++ return KVM_INSN_EXIT_TO_USER_SPACE; ++} ++ + static const struct csr_func csr_funcs[] = { + KVM_RISCV_VCPU_AIA_CSR_FUNCS + KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS ++ { .base = CSR_SEED, .count = 1, .func = seed_csr_rmw }, + }; + + /** +@@ -365,6 +388,11 @@ static const struct insn_func system_opcode_funcs[] = { + .match = INSN_MATCH_WFI, + .func = wfi_insn, + }, ++ { ++ .mask = INSN_MASK_WRS, ++ .match = INSN_MATCH_WRS, ++ .func = wrs_insn, ++ }, + }; + + static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c -index d520b25d8561..6fe35616ad43 100644 +index d520b25d8561..06a989dcd2d5 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c -@@ -13,7 +13,7 @@ +@@ -13,8 +13,9 @@ #include #include #include -#include +#include #include ++#include #include -@@ -865,59 +865,66 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu) + #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0) +@@ -34,21 +35,68 @@ static const unsigned long kvm_isa_ext_arr[] = { + [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m, + [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v, + /* Multi letter extensions (alphabetically sorted) */ ++ [KVM_RISCV_ISA_EXT_SMNPM] = RISCV_ISA_EXT_SSNPM, ++ KVM_ISA_EXT_ARR(SMSTATEEN), + KVM_ISA_EXT_ARR(SSAIA), ++ KVM_ISA_EXT_ARR(SSNPM), + KVM_ISA_EXT_ARR(SSTC), ++ KVM_ISA_EXT_ARR(SVADE), ++ KVM_ISA_EXT_ARR(SVADU), + KVM_ISA_EXT_ARR(SVINVAL), + KVM_ISA_EXT_ARR(SVNAPOT), + KVM_ISA_EXT_ARR(SVPBMT), ++ KVM_ISA_EXT_ARR(SVVPTC), ++ KVM_ISA_EXT_ARR(ZAAMO), ++ KVM_ISA_EXT_ARR(ZABHA), ++ KVM_ISA_EXT_ARR(ZACAS), ++ KVM_ISA_EXT_ARR(ZALRSC), ++ KVM_ISA_EXT_ARR(ZAWRS), + KVM_ISA_EXT_ARR(ZBA), + KVM_ISA_EXT_ARR(ZBB), ++ KVM_ISA_EXT_ARR(ZBC), ++ KVM_ISA_EXT_ARR(ZBKB), ++ KVM_ISA_EXT_ARR(ZBKC), ++ KVM_ISA_EXT_ARR(ZBKX), + KVM_ISA_EXT_ARR(ZBS), ++ KVM_ISA_EXT_ARR(ZCA), ++ KVM_ISA_EXT_ARR(ZCB), ++ KVM_ISA_EXT_ARR(ZCD), ++ KVM_ISA_EXT_ARR(ZCF), ++ KVM_ISA_EXT_ARR(ZCMOP), ++ KVM_ISA_EXT_ARR(ZFA), ++ KVM_ISA_EXT_ARR(ZFH), ++ KVM_ISA_EXT_ARR(ZFHMIN), + KVM_ISA_EXT_ARR(ZICBOM), + KVM_ISA_EXT_ARR(ZICBOZ), ++ KVM_ISA_EXT_ARR(ZICCRSE), + KVM_ISA_EXT_ARR(ZICNTR), ++ KVM_ISA_EXT_ARR(ZICOND), + KVM_ISA_EXT_ARR(ZICSR), + KVM_ISA_EXT_ARR(ZIFENCEI), ++ KVM_ISA_EXT_ARR(ZIHINTNTL), + KVM_ISA_EXT_ARR(ZIHINTPAUSE), + KVM_ISA_EXT_ARR(ZIHPM), ++ KVM_ISA_EXT_ARR(ZIMOP), ++ KVM_ISA_EXT_ARR(ZKND), ++ KVM_ISA_EXT_ARR(ZKNE), ++ KVM_ISA_EXT_ARR(ZKNH), ++ KVM_ISA_EXT_ARR(ZKR), ++ KVM_ISA_EXT_ARR(ZKSED), ++ KVM_ISA_EXT_ARR(ZKSH), ++ KVM_ISA_EXT_ARR(ZKT), ++ KVM_ISA_EXT_ARR(ZTSO), ++ KVM_ISA_EXT_ARR(ZVBB), ++ KVM_ISA_EXT_ARR(ZVBC), ++ KVM_ISA_EXT_ARR(ZVFH), ++ KVM_ISA_EXT_ARR(ZVFHMIN), ++ KVM_ISA_EXT_ARR(ZVKB), ++ KVM_ISA_EXT_ARR(ZVKG), ++ KVM_ISA_EXT_ARR(ZVKNED), ++ KVM_ISA_EXT_ARR(ZVKNHA), ++ KVM_ISA_EXT_ARR(ZVKNHB), ++ KVM_ISA_EXT_ARR(ZVKSED), ++ KVM_ISA_EXT_ARR(ZVKSH), ++ KVM_ISA_EXT_ARR(ZVKT), + }; + + static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) +@@ -68,6 +116,12 @@ static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) + switch (ext) { + case KVM_RISCV_ISA_EXT_H: + return false; ++ case KVM_RISCV_ISA_EXT_SVADU: ++ /* ++ * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero. ++ * Guest OS can use Svadu only when host OS enable Svadu. ++ */ ++ return arch_has_hw_pte_young(); + case KVM_RISCV_ISA_EXT_V: + return riscv_v_vstate_ctrl_user_allowed(); + default: +@@ -80,23 +134,76 @@ static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext) + static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) + { + switch (ext) { ++ /* Extensions which don't have any mechanism to disable */ + case KVM_RISCV_ISA_EXT_A: + case KVM_RISCV_ISA_EXT_C: + case KVM_RISCV_ISA_EXT_I: + case KVM_RISCV_ISA_EXT_M: +- case KVM_RISCV_ISA_EXT_SSAIA: ++ case KVM_RISCV_ISA_EXT_SMNPM: ++ case KVM_RISCV_ISA_EXT_SSNPM: + case KVM_RISCV_ISA_EXT_SSTC: + case KVM_RISCV_ISA_EXT_SVINVAL: + case KVM_RISCV_ISA_EXT_SVNAPOT: ++ case KVM_RISCV_ISA_EXT_SVVPTC: ++ case KVM_RISCV_ISA_EXT_ZAAMO: ++ case KVM_RISCV_ISA_EXT_ZABHA: ++ case KVM_RISCV_ISA_EXT_ZACAS: ++ case KVM_RISCV_ISA_EXT_ZALRSC: ++ case KVM_RISCV_ISA_EXT_ZAWRS: + case KVM_RISCV_ISA_EXT_ZBA: + case KVM_RISCV_ISA_EXT_ZBB: ++ case KVM_RISCV_ISA_EXT_ZBC: ++ case KVM_RISCV_ISA_EXT_ZBKB: ++ case KVM_RISCV_ISA_EXT_ZBKC: ++ case KVM_RISCV_ISA_EXT_ZBKX: + case KVM_RISCV_ISA_EXT_ZBS: ++ case KVM_RISCV_ISA_EXT_ZCA: ++ case KVM_RISCV_ISA_EXT_ZCB: ++ case KVM_RISCV_ISA_EXT_ZCD: ++ case KVM_RISCV_ISA_EXT_ZCF: ++ case KVM_RISCV_ISA_EXT_ZCMOP: ++ case KVM_RISCV_ISA_EXT_ZFA: ++ case KVM_RISCV_ISA_EXT_ZFH: ++ case KVM_RISCV_ISA_EXT_ZFHMIN: ++ case KVM_RISCV_ISA_EXT_ZICCRSE: + case KVM_RISCV_ISA_EXT_ZICNTR: ++ case KVM_RISCV_ISA_EXT_ZICOND: + case KVM_RISCV_ISA_EXT_ZICSR: + case KVM_RISCV_ISA_EXT_ZIFENCEI: ++ case KVM_RISCV_ISA_EXT_ZIHINTNTL: + case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: + case KVM_RISCV_ISA_EXT_ZIHPM: ++ case KVM_RISCV_ISA_EXT_ZIMOP: ++ case KVM_RISCV_ISA_EXT_ZKND: ++ case KVM_RISCV_ISA_EXT_ZKNE: ++ case KVM_RISCV_ISA_EXT_ZKNH: ++ case KVM_RISCV_ISA_EXT_ZKR: ++ case KVM_RISCV_ISA_EXT_ZKSED: ++ case KVM_RISCV_ISA_EXT_ZKSH: ++ case KVM_RISCV_ISA_EXT_ZKT: ++ case KVM_RISCV_ISA_EXT_ZTSO: ++ case KVM_RISCV_ISA_EXT_ZVBB: ++ case KVM_RISCV_ISA_EXT_ZVBC: ++ case KVM_RISCV_ISA_EXT_ZVFH: ++ case KVM_RISCV_ISA_EXT_ZVFHMIN: ++ case KVM_RISCV_ISA_EXT_ZVKB: ++ case KVM_RISCV_ISA_EXT_ZVKG: ++ case KVM_RISCV_ISA_EXT_ZVKNED: ++ case KVM_RISCV_ISA_EXT_ZVKNHA: ++ case KVM_RISCV_ISA_EXT_ZVKNHB: ++ case KVM_RISCV_ISA_EXT_ZVKSED: ++ case KVM_RISCV_ISA_EXT_ZVKSH: ++ case KVM_RISCV_ISA_EXT_ZVKT: + return false; ++ /* Extensions which can be disabled using Smstateen */ ++ case KVM_RISCV_ISA_EXT_SSAIA: ++ return riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN); ++ case KVM_RISCV_ISA_EXT_SVADE: ++ /* ++ * The henvcfg.ADUE is read-only zero if menvcfg.ADUE is zero. ++ * Svade is not allowed to disable when the platform use Svade. ++ */ ++ return arch_has_hw_pte_young(); + default: + break; + } +@@ -378,6 +485,34 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu, + return 0; + } + ++static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu, ++ unsigned long reg_num, ++ unsigned long reg_val) ++{ ++ struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; ++ ++ if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / ++ sizeof(unsigned long)) ++ return -EINVAL; ++ ++ ((unsigned long *)csr)[reg_num] = reg_val; ++ return 0; ++} ++ ++static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu, ++ unsigned long reg_num, ++ unsigned long *out_val) ++{ ++ struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; ++ ++ if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / ++ sizeof(unsigned long)) ++ return -EINVAL; ++ ++ *out_val = ((unsigned long *)csr)[reg_num]; ++ return 0; ++} ++ + static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) + { +@@ -401,6 +536,12 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu, + case KVM_REG_RISCV_CSR_AIA: + rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, ®_val); + break; ++ case KVM_REG_RISCV_CSR_SMSTATEEN: ++ rc = -EINVAL; ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) ++ rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, ++ ®_val); ++ break; + default: + rc = -ENOENT; + break; +@@ -440,6 +581,12 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, + case KVM_REG_RISCV_CSR_AIA: + rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); + break; ++ case KVM_REG_RISCV_CSR_SMSTATEEN: ++ rc = -EINVAL; ++ if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) ++ rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, ++ reg_val); ++ break; + default: + rc = -ENOENT; + break; +@@ -696,6 +843,8 @@ static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu) + + if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) + n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); ++ if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) ++ n += sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long); + + return n; + } +@@ -704,7 +853,7 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) + { + int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); +- int n2 = 0; ++ int n2 = 0, n3 = 0; + + /* copy general csr regs */ + for (int i = 0; i < n1; i++) { +@@ -738,7 +887,25 @@ static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu, + } + } + +- return n1 + n2; ++ /* copy Smstateen csr regs */ ++ if (riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) { ++ n3 = sizeof(struct kvm_riscv_smstateen_csr) / sizeof(unsigned long); ++ ++ for (int i = 0; i < n3; i++) { ++ u64 size = IS_ENABLED(CONFIG_32BIT) ? ++ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; ++ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR | ++ KVM_REG_RISCV_CSR_SMSTATEEN | i; ++ ++ if (uindices) { ++ if (put_user(reg, uindices)) ++ return -EFAULT; ++ uindices++; ++ } ++ } ++ } ++ ++ return n1 + n2 + n3; + } + + static inline unsigned long num_timer_regs(void) +@@ -865,59 +1032,66 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu) return copy_isa_ext_reg_indices(vcpu, NULL);; } -static inline unsigned long num_sbi_ext_regs(void) -+static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) - { +-{ - /* - * number of KVM_REG_RISCV_SBI_SINGLE + - * 2 x (number of KVM_REG_RISCV_SBI_MULTI) @@ -40169,7 +44977,8 @@ index d520b25d8561..6fe35616ad43 100644 -} - -static int copy_sbi_ext_reg_indices(u64 __user *uindices) --{ ++static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) + { - int n; + unsigned int n = 0; @@ -40254,7 +45063,7 @@ index d520b25d8561..6fe35616ad43 100644 } /* -@@ -936,7 +943,8 @@ unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu) +@@ -936,7 +1110,8 @@ unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu) res += num_fp_f_regs(vcpu); res += num_fp_d_regs(vcpu); res += num_isa_ext_regs(vcpu); @@ -40264,7 +45073,7 @@ index d520b25d8561..6fe35616ad43 100644 return res; } -@@ -984,9 +992,15 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, +@@ -984,9 +1159,15 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, return ret; uindices += ret; @@ -40281,7 +45090,7 @@ index d520b25d8561..6fe35616ad43 100644 return 0; } -@@ -1009,12 +1023,14 @@ int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, +@@ -1009,12 +1190,14 @@ int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); @@ -40298,7 +45107,7 @@ index d520b25d8561..6fe35616ad43 100644 default: break; } -@@ -1040,12 +1056,14 @@ int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, +@@ -1040,12 +1223,14 @@ int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); @@ -40834,6 +45643,55 @@ index b430cbb69521..b339a2682f25 100644 #include #include +diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c +index 7e2b50c692c1..e90e91dfe3c3 100644 +--- a/arch/riscv/kvm/vm.c ++++ b/arch/riscv/kvm/vm.c +@@ -11,6 +11,8 @@ + #include + #include + #include ++#include ++#include + + const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +@@ -55,6 +57,35 @@ void kvm_arch_destroy_vm(struct kvm *kvm) + kvm_riscv_aia_destroy_vm(kvm); + } + ++bool kvm_arch_has_irq_bypass(void) ++{ ++ return true; ++} ++ ++int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, ++ struct irq_bypass_producer *prod) ++{ ++ struct kvm_kernel_irqfd *irqfd = ++ container_of(cons, struct kvm_kernel_irqfd, consumer); ++ ++ irqfd->producer = prod; ++ kvm_arch_update_irqfd_routing(irqfd, NULL, &irqfd->irq_entry); ++ ++ return 0; ++} ++ ++void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, ++ struct irq_bypass_producer *prod) ++{ ++ struct kvm_kernel_irqfd *irqfd = ++ container_of(cons, struct kvm_kernel_irqfd, consumer); ++ ++ WARN_ON(irqfd->producer != prod); ++ ++ kvm_arch_update_irqfd_routing(irqfd, &irqfd->irq_entry, NULL); ++ irqfd->producer = NULL; ++} ++ + int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irql, + bool line_status) + { diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 26cb2502ecf8..4157f446d5bc 100644 --- a/arch/riscv/lib/Makefile @@ -41361,7 +46219,7 @@ index 000000000000..be38a93cedae +} +#endif diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S -index 3ab438f30d13..a1e4a3c42925 100644 +index 3ab438f30d13..743f08fea4ac 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -3,6 +3,8 @@ @@ -41387,6 +46245,24 @@ index 3ab438f30d13..a1e4a3c42925 100644 /* Enable access to user memory */ li t6, SR_SUM +@@ -35,7 +44,7 @@ SYM_FUNC_START(__asm_copy_to_user) + * Use byte copy only if too small. + * SZREG holds 4 for RV32 and 8 for RV64 + */ +- li a3, 9*SZREG /* size must be larger than size in word_copy */ ++ li a3, 9*SZREG-1 /* size must >= (word_copy stride + SZREG-1) */ + bltu a2, a3, .Lbyte_copy_tail + + /* +@@ -94,7 +103,7 @@ SYM_FUNC_START(__asm_copy_to_user) + fixup REG_S t4, 7*SZREG(a0), 10f + addi a0, a0, 8*SZREG + addi a1, a1, 8*SZREG +- bltu a0, t0, 2b ++ bleu a0, t0, 2b + + addi t0, t0, 8*SZREG /* revert to original value */ + j .Lbyte_copy_tail @@ -181,6 +190,7 @@ SYM_FUNC_START(__asm_copy_to_user) sub a0, t5, a0 ret @@ -41542,7 +46418,7 @@ index 000000000000..b28f2430e52f +SYM_FUNC_END(xor_regs_5_) +EXPORT_SYMBOL(xor_regs_5_) diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c -index f1387272a551..55a34f2020a8 100644 +index f1387272a551..a349355d472e 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -3,7 +3,9 @@ @@ -41555,9 +46431,39 @@ index f1387272a551..55a34f2020a8 100644 #include #ifdef CONFIG_SMP -@@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void) - unsigned long cbom_hartid, cboz_hartid; - u32 cbom_block_size = 0, cboz_block_size = 0; +@@ -80,12 +82,12 @@ void flush_icache_mm(struct mm_struct *mm, bool local) + #endif /* CONFIG_SMP */ + + #ifdef CONFIG_MMU +-void flush_icache_pte(pte_t pte) ++void flush_icache_pte(struct mm_struct *mm, pte_t pte) + { + struct folio *folio = page_folio(pte_page(pte)); + + if (!test_bit(PG_dcache_clean, &folio->flags)) { +- flush_icache_all(); ++ flush_icache_mm(mm, false); + set_bit(PG_dcache_clean, &folio->flags); + } + } +@@ -97,6 +99,9 @@ EXPORT_SYMBOL_GPL(riscv_cbom_block_size); + unsigned int riscv_cboz_block_size; + EXPORT_SYMBOL_GPL(riscv_cboz_block_size); + ++unsigned int riscv_cbop_block_size; ++EXPORT_SYMBOL_GPL(riscv_cbop_block_size); ++ + static void __init cbo_get_block_size(struct device_node *node, + const char *name, u32 *block_size, + unsigned long *first_hartid) +@@ -121,16 +126,29 @@ static void __init cbo_get_block_size(struct device_node *node, + + void __init riscv_init_cbo_blocksizes(void) + { +- unsigned long cbom_hartid, cboz_hartid; +- u32 cbom_block_size = 0, cboz_block_size = 0; ++ unsigned long cbom_hartid, cboz_hartid, cbop_hartid; ++ u32 cbom_block_size = 0, cboz_block_size = 0, cbop_block_size = 0; struct device_node *node; + struct acpi_table_header *rhct; + acpi_status status; @@ -41569,6 +46475,8 @@ index f1387272a551..55a34f2020a8 100644 + &cbom_block_size, &cbom_hartid); + cbo_get_block_size(node, "riscv,cboz-block-size", + &cboz_block_size, &cboz_hartid); ++ cbo_get_block_size(node, "riscv,cbop-block-size", ++ &cbop_block_size, &cbop_hartid); + } + } else { + status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); @@ -41581,11 +46489,19 @@ index f1387272a551..55a34f2020a8 100644 - &cbom_block_size, &cbom_hartid); - cbo_get_block_size(node, "riscv,cboz-block-size", - &cboz_block_size, &cboz_hartid); -+ acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL); ++ acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, &cbop_block_size); + acpi_put_table((struct acpi_table_header *)rhct); } if (cbom_block_size) +@@ -138,4 +156,7 @@ void __init riscv_init_cbo_blocksizes(void) + + if (cboz_block_size) + riscv_cboz_block_size = cboz_block_size; ++ ++ if (cbop_block_size) ++ riscv_cbop_block_size = cbop_block_size; + } diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index a77342eb3489..32031a7d96d4 100644 --- a/arch/riscv/mm/dma-noncoherent.c @@ -41634,11 +46550,52 @@ index a77342eb3489..32031a7d96d4 100644 } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, +diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c +index bdf8ac6c7e30..07fa26cb9529 100644 +--- a/arch/riscv/mm/init.c ++++ b/arch/riscv/mm/init.c +@@ -37,6 +37,8 @@ + + #include "../kernel/head.h" + ++u64 new_vmalloc[NR_CPUS / sizeof(u64) + 1]; ++ + struct kernel_mapping kernel_map __ro_after_init; + EXPORT_SYMBOL(kernel_map); + #ifdef CONFIG_XIP_KERNEL diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c -index ef887efcb679..21ea6ed76470 100644 +index ef887efcb679..7787a59a6cc1 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c -@@ -36,6 +36,7 @@ pud_t *pud_offset(p4d_t *p4d, unsigned long address) +@@ -9,13 +9,26 @@ int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) + { ++ asm goto(ALTERNATIVE("nop", "j %l[svvptc]", 0, RISCV_ISA_EXT_SVVPTC, 1) ++ : : : : svvptc); ++ + if (!pte_same(ptep_get(ptep), entry)) +- __set_pte_at(ptep, entry); ++ __set_pte_at(vma->vm_mm, ptep, entry); + /* + * update_mmu_cache will unconditionally execute, handling both + * the case that the PTE changed and the spurious fault case. + */ + return true; ++ ++svvptc: ++ if (!pte_same(ptep_get(ptep), entry)) { ++ __set_pte_at(vma->vm_mm, ptep, entry); ++ /* Here only not svadu is impacted */ ++ flush_tlb_page(vma, address); ++ return true; ++ } ++ ++ return false; + } + + int ptep_test_and_clear_young(struct vm_area_struct *vma, +@@ -36,6 +49,7 @@ pud_t *pud_offset(p4d_t *p4d, unsigned long address) return (pud_t *)p4d; } @@ -41646,7 +46603,7 @@ index ef887efcb679..21ea6ed76470 100644 p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { -@@ -44,6 +45,7 @@ p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +@@ -44,6 +58,7 @@ p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) return (p4d_t *)pgd; } @@ -41710,6 +46667,744 @@ index 324e8cd9b502..a9f4af9f7f3f 100644 for (i = 0; i < nr_ptes_in_range; ++i) { local_flush_tlb_page_asid(start, asid); start += stride; +diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h +index a5ce1ab76ece..06f17ddc02a9 100644 +--- a/arch/riscv/net/bpf_jit.h ++++ b/arch/riscv/net/bpf_jit.h +@@ -18,6 +18,16 @@ static inline bool rvc_enabled(void) + return IS_ENABLED(CONFIG_RISCV_ISA_C); + } + ++static inline bool rvzba_enabled(void) ++{ ++ return IS_ENABLED(CONFIG_RISCV_ISA_ZBA) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBA); ++} ++ ++static inline bool rvzbb_enabled(void) ++{ ++ return IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBB); ++} ++ + enum { + RV_REG_ZERO = 0, /* The constant value 0 */ + RV_REG_RA = 1, /* Return address */ +@@ -730,6 +740,44 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2) + return rv_css_insn(0x6, imm, rs2, 0x2); + } + ++/* RVZBA instructions. */ ++static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2) ++{ ++ return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33); ++} ++ ++static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2) ++{ ++ return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33); ++} ++ ++/* RVZBB instrutions. */ ++static inline u32 rvzbb_sextb(u8 rd, u8 rs1) ++{ ++ return rv_i_insn(0x604, rs1, 1, rd, 0x13); ++} ++ ++static inline u32 rvzbb_sexth(u8 rd, u8 rs1) ++{ ++ return rv_i_insn(0x605, rs1, 1, rd, 0x13); ++} ++ ++static inline u32 rvzbb_zexth(u8 rd, u8 rs) ++{ ++ if (IS_ENABLED(CONFIG_64BIT)) ++ return rv_i_insn(0x80, rs, 4, rd, 0x3b); ++ ++ return rv_i_insn(0x80, rs, 4, rd, 0x33); ++} ++ ++static inline u32 rvzbb_rev8(u8 rd, u8 rs) ++{ ++ if (IS_ENABLED(CONFIG_64BIT)) ++ return rv_i_insn(0x6b8, rs, 5, rd, 0x13); ++ ++ return rv_i_insn(0x698, rs, 5, rd, 0x13); ++} ++ + /* + * RV64-only instructions. + * +@@ -905,6 +953,14 @@ static inline u16 rvc_sdsp(u32 imm9, u8 rs2) + return rv_css_insn(0x7, imm, rs2, 0x2); + } + ++/* RV64-only ZBA instructions. */ ++ ++static inline u32 rvzba_zextw(u8 rd, u8 rs1) ++{ ++ /* add.uw rd, rs1, ZERO */ ++ return rv_r_insn(0x04, RV_REG_ZERO, rs1, 0, rd, 0x3b); ++} ++ + #endif /* __riscv_xlen == 64 */ + + /* Helper functions that emit RVC instructions when possible. */ +@@ -1048,6 +1104,28 @@ static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx) + emit(rv_sw(rs1, off, rs2), ctx); + } + ++static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) ++{ ++ if (rvzba_enabled()) { ++ emit(rvzba_sh2add(rd, rs1, rs2), ctx); ++ return; ++ } ++ ++ emit_slli(rd, rs1, 2, ctx); ++ emit_add(rd, rd, rs2, ctx); ++} ++ ++static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) ++{ ++ if (rvzba_enabled()) { ++ emit(rvzba_sh3add(rd, rs1, rs2), ctx); ++ return; ++ } ++ ++ emit_slli(rd, rs1, 3, ctx); ++ emit_add(rd, rd, rs2, ctx); ++} ++ + /* RV64-only helper functions. */ + #if __riscv_xlen == 64 + +@@ -1087,6 +1165,113 @@ static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) + emit(rv_subw(rd, rs1, rs2), ctx); + } + ++static inline void emit_sextb(u8 rd, u8 rs, struct rv_jit_context *ctx) ++{ ++ if (rvzbb_enabled()) { ++ emit(rvzbb_sextb(rd, rs), ctx); ++ return; ++ } ++ ++ emit_slli(rd, rs, 56, ctx); ++ emit_srai(rd, rd, 56, ctx); ++} ++ ++static inline void emit_sexth(u8 rd, u8 rs, struct rv_jit_context *ctx) ++{ ++ if (rvzbb_enabled()) { ++ emit(rvzbb_sexth(rd, rs), ctx); ++ return; ++ } ++ ++ emit_slli(rd, rs, 48, ctx); ++ emit_srai(rd, rd, 48, ctx); ++} ++ ++static inline void emit_sextw(u8 rd, u8 rs, struct rv_jit_context *ctx) ++{ ++ emit_addiw(rd, rs, 0, ctx); ++} ++ ++static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx) ++{ ++ if (rvzbb_enabled()) { ++ emit(rvzbb_zexth(rd, rs), ctx); ++ return; ++ } ++ ++ emit_slli(rd, rs, 48, ctx); ++ emit_srli(rd, rd, 48, ctx); ++} ++ ++static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx) ++{ ++ if (rvzba_enabled()) { ++ emit(rvzba_zextw(rd, rs), ctx); ++ return; ++ } ++ ++ emit_slli(rd, rs, 32, ctx); ++ emit_srli(rd, rd, 32, ctx); ++} ++ ++static inline void emit_bswap(u8 rd, s32 imm, struct rv_jit_context *ctx) ++{ ++ if (rvzbb_enabled()) { ++ int bits = 64 - imm; ++ ++ emit(rvzbb_rev8(rd, rd), ctx); ++ if (bits) ++ emit_srli(rd, rd, bits, ctx); ++ return; ++ } ++ ++ emit_li(RV_REG_T2, 0, ctx); ++ ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); ++ emit_srli(rd, rd, 8, ctx); ++ if (imm == 16) ++ goto out_be; ++ ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); ++ emit_srli(rd, rd, 8, ctx); ++ ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); ++ emit_srli(rd, rd, 8, ctx); ++ if (imm == 32) ++ goto out_be; ++ ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); ++ emit_srli(rd, rd, 8, ctx); ++ ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); ++ emit_srli(rd, rd, 8, ctx); ++ ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); ++ emit_srli(rd, rd, 8, ctx); ++ ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); ++ emit_srli(rd, rd, 8, ctx); ++out_be: ++ emit_andi(RV_REG_T1, rd, 0xff, ctx); ++ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); ++ ++ emit_mv(rd, RV_REG_T2, ctx); ++} ++ + #endif /* __riscv_xlen == 64 */ + + void bpf_jit_build_prologue(struct rv_jit_context *ctx); +diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c +index 529a83b85c1c..90b42a9fbc58 100644 +--- a/arch/riscv/net/bpf_jit_comp32.c ++++ b/arch/riscv/net/bpf_jit_comp32.c +@@ -811,8 +811,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) + * if (!prog) + * goto out; + */ +- emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx); +- emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx); ++ emit_sh2add(RV_REG_T0, lo(idx_reg), lo(arr_reg), ctx); + off = offsetof(struct bpf_array, ptrs); + if (is_12b_check(off, insn)) + return -1; +diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c +index 16eb4cd11cbd..1462291807b7 100644 +--- a/arch/riscv/net/bpf_jit_comp64.c ++++ b/arch/riscv/net/bpf_jit_comp64.c +@@ -142,6 +142,19 @@ static bool in_auipc_jalr_range(s64 val) + val < ((1L << 31) - (1L << 11)); + } + ++/* Modify rd pointer to alternate reg to avoid corrupting original reg */ ++static void emit_sextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx) ++{ ++ emit_sextw(ra, *rd, ctx); ++ *rd = ra; ++} ++ ++static void emit_zextw_alt(u8 *rd, u8 ra, struct rv_jit_context *ctx) ++{ ++ emit_zextw(ra, *rd, ctx); ++ *rd = ra; ++} ++ + /* Emit fixed-length instructions for address */ + static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx) + { +@@ -327,12 +340,6 @@ static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff, + emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx); + } + +-static void emit_zext_32(u8 reg, struct rv_jit_context *ctx) +-{ +- emit_slli(reg, reg, 32, ctx); +- emit_srli(reg, reg, 32, ctx); +-} +- + static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) + { + int tc_ninsn, off, start_insn = ctx->ninsns; +@@ -347,7 +354,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) + */ + tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] : + ctx->offset[0]; +- emit_zext_32(RV_REG_A2, ctx); ++ emit_zextw(RV_REG_A2, RV_REG_A2, ctx); + + off = offsetof(struct bpf_array, map.max_entries); + if (is_12b_check(off, insn)) +@@ -367,8 +374,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) + * if (!prog) + * goto out; + */ +- emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx); ++ emit_sh3add(RV_REG_T2, RV_REG_A2, RV_REG_A1, ctx); + off = offsetof(struct bpf_array, ptrs); + if (is_12b_check(off, insn)) + return -1; +@@ -406,38 +412,6 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, + *rs = bpf_to_rv_reg(insn->src_reg, ctx); + } + +-static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) +-{ +- emit_mv(RV_REG_T2, *rd, ctx); +- emit_zext_32(RV_REG_T2, ctx); +- emit_mv(RV_REG_T1, *rs, ctx); +- emit_zext_32(RV_REG_T1, ctx); +- *rd = RV_REG_T2; +- *rs = RV_REG_T1; +-} +- +-static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) +-{ +- emit_addiw(RV_REG_T2, *rd, 0, ctx); +- emit_addiw(RV_REG_T1, *rs, 0, ctx); +- *rd = RV_REG_T2; +- *rs = RV_REG_T1; +-} +- +-static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) +-{ +- emit_mv(RV_REG_T2, *rd, ctx); +- emit_zext_32(RV_REG_T2, ctx); +- emit_zext_32(RV_REG_T1, ctx); +- *rd = RV_REG_T2; +-} +- +-static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) +-{ +- emit_addiw(RV_REG_T2, *rd, 0, ctx); +- *rd = RV_REG_T2; +-} +- + static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr, + struct rv_jit_context *ctx) + { +@@ -520,32 +494,32 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, + emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) : + rv_amoadd_w(rs, rs, rd, 1, 1), ctx); + if (!is64) +- emit_zext_32(rs, ctx); ++ emit_zextw(rs, rs, ctx); + break; + case BPF_AND | BPF_FETCH: + emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) : + rv_amoand_w(rs, rs, rd, 1, 1), ctx); + if (!is64) +- emit_zext_32(rs, ctx); ++ emit_zextw(rs, rs, ctx); + break; + case BPF_OR | BPF_FETCH: + emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) : + rv_amoor_w(rs, rs, rd, 1, 1), ctx); + if (!is64) +- emit_zext_32(rs, ctx); ++ emit_zextw(rs, rs, ctx); + break; + case BPF_XOR | BPF_FETCH: + emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) : + rv_amoxor_w(rs, rs, rd, 1, 1), ctx); + if (!is64) +- emit_zext_32(rs, ctx); ++ emit_zextw(rs, rs, ctx); + break; + /* src_reg = atomic_xchg(dst_reg + off16, src_reg); */ + case BPF_XCHG: + emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) : + rv_amoswap_w(rs, rs, rd, 1, 1), ctx); + if (!is64) +- emit_zext_32(rs, ctx); ++ emit_zextw(rs, rs, ctx); + break; + /* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */ + case BPF_CMPXCHG: +@@ -1086,7 +1060,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + case BPF_ALU64 | BPF_MOV | BPF_X: + if (imm == 1) { + /* Special mov32 for zext */ +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + } + switch (insn->off) { +@@ -1094,16 +1068,17 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit_mv(rd, rs, ctx); + break; + case 8: ++ emit_sextb(rd, rs, ctx); ++ break; + case 16: +- emit_slli(RV_REG_T1, rs, 64 - insn->off, ctx); +- emit_srai(rd, RV_REG_T1, 64 - insn->off, ctx); ++ emit_sexth(rd, rs, ctx); + break; + case 32: +- emit_addiw(rd, rs, 0, ctx); ++ emit_sextw(rd, rs, ctx); + break; + } + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + + /* dst = dst OP src */ +@@ -1111,7 +1086,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + case BPF_ALU64 | BPF_ADD | BPF_X: + emit_add(rd, rd, rs, ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: +@@ -1121,31 +1096,31 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit_subw(rd, rd, rs, ctx); + + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + emit_and(rd, rd, rs, ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + emit_or(rd, rd, rs, ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + emit_xor(rd, rd, rs, ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: +@@ -1154,7 +1129,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + else + emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_X: +@@ -1163,25 +1138,25 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + else + emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + + /* dst = -dst */ +@@ -1189,73 +1164,27 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + case BPF_ALU64 | BPF_NEG: + emit_sub(rd, RV_REG_ZERO, rd, ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + + /* dst = BSWAP##imm(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + switch (imm) { + case 16: +- emit_slli(rd, rd, 48, ctx); +- emit_srli(rd, rd, 48, ctx); ++ emit_zexth(rd, rd, ctx); + break; + case 32: + if (!aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case 64: + /* Do nothing */ + break; + } + break; +- + case BPF_ALU | BPF_END | BPF_FROM_BE: + case BPF_ALU64 | BPF_END | BPF_FROM_LE: +- emit_li(RV_REG_T2, 0, ctx); +- +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); +- emit_srli(rd, rd, 8, ctx); +- if (imm == 16) +- goto out_be; +- +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); +- emit_srli(rd, rd, 8, ctx); +- +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); +- emit_srli(rd, rd, 8, ctx); +- if (imm == 32) +- goto out_be; +- +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); +- emit_srli(rd, rd, 8, ctx); +- +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); +- emit_srli(rd, rd, 8, ctx); +- +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); +- emit_srli(rd, rd, 8, ctx); +- +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx); +- emit_srli(rd, rd, 8, ctx); +-out_be: +- emit_andi(RV_REG_T1, rd, 0xff, ctx); +- emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx); +- +- emit_mv(rd, RV_REG_T2, ctx); ++ emit_bswap(rd, imm, ctx); + break; + + /* dst = imm */ +@@ -1263,7 +1192,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + case BPF_ALU64 | BPF_MOV | BPF_K: + emit_imm(rd, imm, ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + + /* dst = dst OP imm */ +@@ -1276,7 +1205,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit_add(rd, rd, RV_REG_T1, ctx); + } + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: +@@ -1287,7 +1216,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit_sub(rd, rd, RV_REG_T1, ctx); + } + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: +@@ -1298,7 +1227,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit_and(rd, rd, RV_REG_T1, ctx); + } + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: +@@ -1309,7 +1238,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit_or(rd, rd, RV_REG_T1, ctx); + } + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: +@@ -1320,7 +1249,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit_xor(rd, rd, RV_REG_T1, ctx); + } + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_K: +@@ -1328,7 +1257,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit(is64 ? rv_mul(rd, rd, RV_REG_T1) : + rv_mulw(rd, rd, RV_REG_T1), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: +@@ -1340,7 +1269,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : + rv_divuw(rd, rd, RV_REG_T1), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_K: +@@ -1352,14 +1281,14 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : + rv_remuw(rd, rd, RV_REG_T1), ctx); + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + emit_slli(rd, rd, imm, ctx); + + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: +@@ -1369,7 +1298,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit(rv_srliw(rd, rd, imm), ctx); + + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: +@@ -1379,7 +1308,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + emit(rv_sraiw(rd, rd, imm), ctx); + + if (!is64 && !aux->verifier_zext) +- emit_zext_32(rd, ctx); ++ emit_zextw(rd, rd, ctx); + break; + + /* JUMP off */ +@@ -1420,10 +1349,13 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + rvoff = rv_offset(i, off, ctx); + if (!is64) { + s = ctx->ninsns; +- if (is_signed_bpf_cond(BPF_OP(code))) +- emit_sext_32_rd_rs(&rd, &rs, ctx); +- else +- emit_zext_32_rd_rs(&rd, &rs, ctx); ++ if (is_signed_bpf_cond(BPF_OP(code))) { ++ emit_sextw_alt(&rs, RV_REG_T1, ctx); ++ emit_sextw_alt(&rd, RV_REG_T2, ctx); ++ } else { ++ emit_zextw_alt(&rs, RV_REG_T1, ctx); ++ emit_zextw_alt(&rd, RV_REG_T2, ctx); ++ } + e = ctx->ninsns; + + /* Adjust for extra insns */ +@@ -1434,8 +1366,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + /* Adjust for and */ + rvoff -= 4; + emit_and(RV_REG_T1, rd, rs, ctx); +- emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, +- ctx); ++ emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); + } else { + emit_branch(BPF_OP(code), rd, rs, rvoff, ctx); + } +@@ -1464,18 +1395,18 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + case BPF_JMP32 | BPF_JSLE | BPF_K: + rvoff = rv_offset(i, off, ctx); + s = ctx->ninsns; +- if (imm) { ++ if (imm) + emit_imm(RV_REG_T1, imm, ctx); +- rs = RV_REG_T1; +- } else { +- /* If imm is 0, simply use zero register. */ +- rs = RV_REG_ZERO; +- } ++ rs = imm ? RV_REG_T1 : RV_REG_ZERO; + if (!is64) { +- if (is_signed_bpf_cond(BPF_OP(code))) +- emit_sext_32_rd(&rd, ctx); +- else +- emit_zext_32_rd_t1(&rd, ctx); ++ if (is_signed_bpf_cond(BPF_OP(code))) { ++ emit_sextw_alt(&rd, RV_REG_T2, ctx); ++ /* rs has been sign extended */ ++ } else { ++ emit_zextw_alt(&rd, RV_REG_T2, ctx); ++ if (imm) ++ emit_zextw(rs, rs, ctx); ++ } + } + e = ctx->ninsns; + +@@ -1499,7 +1430,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, + * as t1 is used only in comparison against zero. + */ + if (!is64 && imm < 0) +- emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx); ++ emit_sextw(RV_REG_T1, RV_REG_T1, ctx); + e = ctx->ninsns; + rvoff -= ninsns_rvoff(e - s); + emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx); diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig index 75b3cf116dc8..7a67b60074fb 100644 --- a/arch/sw_64/Kconfig @@ -41735,6 +47430,118 @@ index 551829884734..dcfaa3812306 100644 #ifndef __ASSEMBLY__ #include +diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c +index c92d88680dbf..1715e5f06a59 100644 +--- a/arch/x86/kernel/crash.c ++++ b/arch/x86/kernel/crash.c +@@ -386,8 +386,8 @@ int crash_load_segments(struct kimage *image) + if (ret) + return ret; + image->elf_load_addr = kbuf.mem; +- pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- image->elf_load_addr, kbuf.bufsz, kbuf.memsz); ++ kexec_dprintk("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ image->elf_load_addr, kbuf.bufsz, kbuf.memsz); + + return ret; + } +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c +index a61c12c01270..e9ae0eac6bf9 100644 +--- a/arch/x86/kernel/kexec-bzimage64.c ++++ b/arch/x86/kernel/kexec-bzimage64.c +@@ -82,7 +82,7 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params, + + cmdline_ptr[cmdline_len - 1] = '\0'; + +- pr_debug("Final command line is: %s\n", cmdline_ptr); ++ kexec_dprintk("Final command line is: %s\n", cmdline_ptr); + cmdline_ptr_phys = bootparams_load_addr + cmdline_offset; + cmdline_low_32 = cmdline_ptr_phys & 0xffffffffUL; + cmdline_ext_32 = cmdline_ptr_phys >> 32; +@@ -272,7 +272,12 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params, + + nr_e820_entries = params->e820_entries; + ++ kexec_dprintk("E820 memmap:\n"); + for (i = 0; i < nr_e820_entries; i++) { ++ kexec_dprintk("%016llx-%016llx (%d)\n", ++ params->e820_table[i].addr, ++ params->e820_table[i].addr + params->e820_table[i].size - 1, ++ params->e820_table[i].type); + if (params->e820_table[i].type != E820_TYPE_RAM) + continue; + start = params->e820_table[i].addr; +@@ -424,7 +429,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, + * command line. Make sure it does not overflow + */ + if (cmdline_len + MAX_ELFCOREHDR_STR_LEN > header->cmdline_size) { +- pr_debug("Appending elfcorehdr= to command line exceeds maximum allowed length\n"); ++ kexec_dprintk("Appending elfcorehdr= to command line exceeds maximum allowed length\n"); + return ERR_PTR(-EINVAL); + } + +@@ -445,7 +450,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, + return ERR_PTR(ret); + } + +- pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem); ++ kexec_dprintk("Loaded purgatory at 0x%lx\n", pbuf.mem); + + + /* +@@ -490,8 +495,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel, + if (ret) + goto out_free_params; + bootparam_load_addr = kbuf.mem; +- pr_debug("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- bootparam_load_addr, kbuf.bufsz, kbuf.bufsz); ++ kexec_dprintk("Loaded boot_param, command line and misc at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ bootparam_load_addr, kbuf.bufsz, kbuf.bufsz); + + /* Load kernel */ + kbuf.buffer = kernel + kern16_size; +@@ -505,8 +510,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel, + goto out_free_params; + kernel_load_addr = kbuf.mem; + +- pr_debug("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- kernel_load_addr, kbuf.bufsz, kbuf.memsz); ++ kexec_dprintk("Loaded 64bit kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ kernel_load_addr, kbuf.bufsz, kbuf.memsz); + + /* Load initrd high */ + if (initrd) { +@@ -520,8 +525,8 @@ static void *bzImage64_load(struct kimage *image, char *kernel, + goto out_free_params; + initrd_load_addr = kbuf.mem; + +- pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", +- initrd_load_addr, initrd_len, initrd_len); ++ kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", ++ initrd_load_addr, initrd_len, initrd_len); + + setup_initrd(params, initrd_load_addr, initrd_len); + } +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 2139f728aecc..3b6fb233ded7 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -13494,10 +13494,12 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, + kvm_arch_end_assignment(irqfd->kvm); + } + +-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, +- uint32_t guest_irq, bool set) ++int kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, ++ struct kvm_kernel_irq_routing_entry *old, ++ struct kvm_kernel_irq_routing_entry *new) + { +- return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set); ++ return static_call(kvm_x86_pi_update_irte)(irqfd->kvm, irqfd->producer->irq, ++ irqfd->gsi, 1); + } + + bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 8e1ef5345b7a..a67bb8f982bd 100644 --- a/arch/x86/mm/pgtable.c @@ -41750,10 +47557,10 @@ index 8e1ef5345b7a..a67bb8f982bd 100644 paravirt_tlb_remove_table(tlb, virt_to_page(pud)); } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig -index e66874d4c3e3..535df8bb14f4 100644 +index e74d1873bc3c..59feaff53901 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig -@@ -281,7 +281,7 @@ config ACPI_CPPC_LIB +@@ -288,7 +288,7 @@ config ACPI_CPPC_LIB config ACPI_PROCESSOR tristate "Processor" @@ -41762,6 +47569,17 @@ index e66874d4c3e3..535df8bb14f4 100644 select ACPI_PROCESSOR_IDLE select ACPI_CPU_FREQ_PSS if X86 || IA64 || LOONGARCH select THERMAL +@@ -559,6 +559,10 @@ if ARM64 + source "drivers/acpi/arm64/Kconfig" + endif + ++if RISCV ++source "drivers/acpi/riscv/Kconfig" ++endif ++ + config ACPI_PPTT + bool + diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index eaa09bf52f17..d367e649714f 100644 --- a/drivers/acpi/Makefile @@ -41859,8 +47677,183 @@ index 98a2ab3b6844..1a418424d250 100644 } static void acpi_lpss_link_consumer(struct device *dev1, +diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig +index 1dce3ad7c9bd..de7707cea07f 100644 +--- a/drivers/acpi/apei/Kconfig ++++ b/drivers/acpi/apei/Kconfig +@@ -52,6 +52,11 @@ config ACPI_APEI_SEA + depends on ARM64 && ACPI_APEI_GHES + default y + ++config ACPI_APEI_SSE ++ bool ++ depends on RISCV && RISCV_SSE && ACPI_APEI_GHES ++ default y ++ + config ACPI_APEI_MEMORY_FAILURE + bool "APEI memory error recovering support" + depends on ACPI_APEI && MEMORY_FAILURE +diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c +index 97a068d86396..629b07dde5be 100644 +--- a/drivers/acpi/apei/ghes.c ++++ b/drivers/acpi/apei/ghes.c +@@ -17,6 +17,7 @@ + * Author: Huang Ying + */ + ++#include + #include + #include + #include +@@ -94,6 +95,11 @@ + #define FIX_APEI_GHES_SDEI_CRITICAL __end_of_fixed_addresses + #endif + ++#ifndef CONFIG_RISCV_SSE ++#define FIX_APEI_GHES_SSE_LOW_PRIORITY __end_of_fixed_addresses ++#define FIX_APEI_GHES_SSE_HIGH_PRIORITY __end_of_fixed_addresses ++#endif ++ + static ATOMIC_NOTIFIER_HEAD(ghes_report_chain); + + static inline bool is_hest_type_generic_v2(struct ghes *ghes) +@@ -1188,6 +1194,7 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, + return rc; + } + ++#if defined(CONFIG_HAVE_ACPI_APEI_NMI) || defined(CONFIG_ACPI_APEI_SEA) + static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list, + enum fixed_addresses fixmap_idx) + { +@@ -1206,6 +1213,7 @@ static int ghes_in_nmi_spool_from_list(struct list_head *rcu_list, + + return ret; + } ++#endif + + #ifdef CONFIG_ACPI_APEI_SEA + static LIST_HEAD(ghes_sea); +@@ -1360,6 +1368,63 @@ static int apei_sdei_unregister_ghes(struct ghes *ghes) + return sdei_unregister_ghes(ghes); + } + ++#if defined(CONFIG_ACPI_APEI_SSE) ++/* SSE Handlers */ ++static int __ghes_sse_callback(struct ghes *ghes, ++ enum fixed_addresses fixmap_idx) ++{ ++ if (!ghes_in_nmi_queue_one_entry(ghes, fixmap_idx)) { ++ irq_work_queue(&ghes_proc_irq_work); ++ ++ return 0; ++ } ++ ++ return -ENOENT; ++} ++ ++/* Low priority */ ++static int ghes_sse_lo_callback(u32 event_num, void *arg, struct pt_regs *regs) ++{ ++ static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sse_lo); ++ struct ghes *ghes = arg; ++ int err; ++ ++ raw_spin_lock(&ghes_notify_lock_sse_lo); ++ err = __ghes_sse_callback(ghes, FIX_APEI_GHES_SSE_LOW_PRIORITY); ++ raw_spin_unlock(&ghes_notify_lock_sse_lo); ++ ++ return err; ++} ++ ++/* High priority */ ++static int ghes_sse_hi_callback(u32 event_num, void *arg, struct pt_regs *regs) ++{ ++ static DEFINE_RAW_SPINLOCK(ghes_notify_lock_sse_hi); ++ struct ghes *ghes = arg; ++ int err; ++ ++ raw_spin_lock(&ghes_notify_lock_sse_hi); ++ err = __ghes_sse_callback(ghes, FIX_APEI_GHES_SSE_HIGH_PRIORITY); ++ raw_spin_unlock(&ghes_notify_lock_sse_hi); ++ ++ return err; ++} ++ ++static int apei_sse_register_ghes(struct ghes *ghes) ++{ ++ return sse_register_ghes(ghes, ghes_sse_lo_callback, ++ ghes_sse_hi_callback); ++} ++ ++static int apei_sse_unregister_ghes(struct ghes *ghes) ++{ ++ return sse_unregister_ghes(ghes); ++} ++#else /* CONFIG_ACPI_APEI_SSE */ ++static int apei_sse_register_ghes(struct ghes *ghes) { return -EOPNOTSUPP; } ++static int apei_sse_unregister_ghes(struct ghes *ghes) { return -EOPNOTSUPP; } ++#endif ++ + static int ghes_probe(struct platform_device *ghes_dev) + { + struct acpi_hest_generic *generic; +@@ -1406,6 +1471,14 @@ static int ghes_probe(struct platform_device *ghes_dev) + pr_warn(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", + generic->header.source_id); + goto err; ++ case ACPI_HEST_NOTIFY_SSE: ++ if (!IS_ENABLED(CONFIG_ACPI_APEI_SSE)) { ++ pr_warn(GHES_PFX "Generic hardware error source: %d notified via SSE is not supported\n", ++ generic->header.source_id); ++ rc = -EOPNOTSUPP; ++ goto err; ++ } ++ break; + default: + pr_warn(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", + generic->notify.type, generic->header.source_id); +@@ -1469,6 +1542,17 @@ static int ghes_probe(struct platform_device *ghes_dev) + if (rc) + goto err; + break; ++ ++ case ACPI_HEST_NOTIFY_SSE: ++ rc = apei_sse_register_ghes(ghes); ++ if (rc) { ++ pr_err(GHES_PFX "Failed to register for SSE notification on vector %d\n", ++ generic->notify.vector); ++ goto err; ++ } ++ pr_err(GHES_PFX "Registered SSE notification on vector %d\n", ++ generic->notify.vector); ++ break; + default: + BUG(); + } +@@ -1498,7 +1582,6 @@ static int ghes_probe(struct platform_device *ghes_dev) + + static int ghes_remove(struct platform_device *ghes_dev) + { +- int rc; + struct ghes *ghes; + struct acpi_hest_generic *generic; + +@@ -1532,9 +1615,10 @@ static int ghes_remove(struct platform_device *ghes_dev) + ghes_nmi_remove(ghes); + break; + case ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED: +- rc = apei_sdei_unregister_ghes(ghes); +- if (rc) +- return rc; ++ apei_sdei_unregister_ghes(ghes); ++ break; ++ case ACPI_HEST_NOTIFY_SSE: ++ apei_sse_unregister_ghes(ghes); + break; + default: + BUG(); diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c -index 93d796531af3..52b2abf88689 100644 +index 93d796531af3..f30f138352b7 100644 --- a/drivers/acpi/arm64/dma.c +++ b/drivers/acpi/arm64/dma.c @@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev) @@ -41871,7 +47864,7 @@ index 93d796531af3..52b2abf88689 100644 const struct bus_dma_region *map = NULL; /* -@@ -23,31 +22,23 @@ void acpi_arch_dma_setup(struct device *dev) +@@ -23,31 +22,28 @@ void acpi_arch_dma_setup(struct device *dev) } if (dev->coherent_dma_mask) @@ -41880,6 +47873,11 @@ index 93d796531af3..52b2abf88689 100644 else - size = 1ULL << 32; + end = (1ULL << 32) - 1; ++ ++ if (dev->dma_range_map) { ++ dev_dbg(dev, "dma_range_map already set\n"); ++ return; ++ } ret = acpi_dma_get_range(dev, &map); if (!ret && map) { @@ -41908,10 +47906,46 @@ index 93d796531af3..52b2abf88689 100644 dev->bus_dma_limit = end; dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c -index ebb52d2b22de..678431a862bc 100644 +index ebb52d2b22de..374d52ef4188 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c -@@ -1466,7 +1466,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id) +@@ -1318,10 +1318,10 @@ static bool iort_pci_rc_supports_canwbs(struct acpi_iort_node *node) + static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, + u32 streamid) + { +- const struct iommu_ops *ops; + struct fwnode_handle *iort_fwnode; + +- if (!node) ++ /* If there's no SMMU driver at all, give up now */ ++ if (!node || !iort_iommu_driver_enabled(node->type)) + return -ENODEV; + + iort_fwnode = iort_get_fwnode(node); +@@ -1329,19 +1329,10 @@ static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node, + return -ENODEV; + + /* +- * If the ops look-up fails, this means that either +- * the SMMU drivers have not been probed yet or that +- * the SMMU drivers are not built in the kernel; +- * Depending on whether the SMMU drivers are built-in +- * in the kernel or not, defer the IOMMU configuration +- * or just abort it. ++ * If the SMMU drivers are enabled but not loaded/probed ++ * yet, this will defer. + */ +- ops = iommu_ops_from_fwnode(iort_fwnode); +- if (!ops) +- return iort_iommu_driver_enabled(node->type) ? +- -EPROBE_DEFER : -ENODEV; +- +- return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode, ops); ++ return acpi_iommu_fwspec_init(dev, streamid, iort_fwnode); + } + + struct iort_pci_alias_info { +@@ -1466,7 +1457,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id) { return -ENODEV; } #endif @@ -41920,7 +47954,7 @@ index ebb52d2b22de..678431a862bc 100644 { struct acpi_iort_node *node; struct acpi_iort_named_component *ncomp; -@@ -1483,13 +1483,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size) +@@ -1483,13 +1474,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } @@ -41937,7 +47971,7 @@ index ebb52d2b22de..678431a862bc 100644 { struct acpi_iort_node *node; struct acpi_iort_root_complex *rc; -@@ -1507,8 +1507,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size) +@@ -1507,8 +1498,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } @@ -41948,7 +47982,7 @@ index ebb52d2b22de..678431a862bc 100644 return 0; } -@@ -1516,16 +1516,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size) +@@ -1516,16 +1507,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size) /** * iort_dma_get_ranges() - Look up DMA addressing limit for the device * @dev: device to lookup @@ -42435,16 +48469,30 @@ index f5cb96ff8768..82f049627306 100644 }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; +diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig +new file mode 100644 +index 000000000000..046296a18d00 +--- /dev/null ++++ b/drivers/acpi/riscv/Kconfig +@@ -0,0 +1,7 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++# ++# ACPI Configuration for RISC-V ++# ++ ++config ACPI_RIMT ++ bool diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile -index 8b3b126e0b94..a96fdf1e2cb8 100644 +index 8b3b126e0b94..1284a076fa88 100644 --- a/drivers/acpi/riscv/Makefile +++ b/drivers/acpi/riscv/Makefile -@@ -1,2 +1,4 @@ +@@ -1,2 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y += rhct.o +obj-y += rhct.o init.o irq.o +obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o +obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o ++obj-$(CONFIG_ACPI_RIMT) += rimt.o diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c new file mode 100644 index 000000000000..4cdff387deff @@ -42697,10 +48745,10 @@ index 000000000000..624f9bbdb58c +} diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c new file mode 100644 -index 000000000000..5ef97905a727 +index 000000000000..945e629c6eca --- /dev/null +++ b/drivers/acpi/riscv/init.c -@@ -0,0 +1,13 @@ +@@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023-2024, Ventana Micro Systems Inc @@ -42713,17 +48761,20 @@ index 000000000000..5ef97905a727 +void __init acpi_riscv_init(void) +{ + riscv_acpi_init_gsi_mapping(); ++ if (IS_ENABLED(CONFIG_ACPI_RIMT)) ++ riscv_acpi_rimt_init(); +} diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h new file mode 100644 -index 000000000000..0b9a07e4031f +index 000000000000..1680aa2aaf23 --- /dev/null +++ b/drivers/acpi/riscv/init.h -@@ -0,0 +1,4 @@ +@@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#include + +void __init riscv_acpi_init_gsi_mapping(void); ++void __init riscv_acpi_rimt_init(void); diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c new file mode 100644 index 000000000000..cced960c2aef @@ -43188,11 +49239,545 @@ index b280b3e9c7d9..caa2c16e1697 100644 + } + } +} +diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c +new file mode 100644 +index 000000000000..683fcfe35c31 +--- /dev/null ++++ b/drivers/acpi/riscv/rimt.c +@@ -0,0 +1,520 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2024-2025, Ventana Micro Systems Inc ++ * Author: Sunil V L ++ * ++ */ ++ ++#define pr_fmt(fmt) "ACPI: RIMT: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include "init.h" ++ ++struct rimt_fwnode { ++ struct list_head list; ++ struct acpi_rimt_node *rimt_node; ++ struct fwnode_handle *fwnode; ++}; ++ ++static LIST_HEAD(rimt_fwnode_list); ++static DEFINE_SPINLOCK(rimt_fwnode_lock); ++ ++#define RIMT_TYPE_MASK(type) (1 << (type)) ++#define RIMT_IOMMU_TYPE BIT(0) ++ ++/* Root pointer to the mapped RIMT table */ ++static struct acpi_table_header *rimt_table; ++ ++/** ++ * rimt_set_fwnode() - Create rimt_fwnode and use it to register ++ * iommu data in the rimt_fwnode_list ++ * ++ * @rimt_node: RIMT table node associated with the IOMMU ++ * @fwnode: fwnode associated with the RIMT node ++ * ++ * Returns: 0 on success ++ * <0 on failure ++ */ ++static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node, ++ struct fwnode_handle *fwnode) ++{ ++ struct rimt_fwnode *np; ++ ++ np = kzalloc(sizeof(*np), GFP_ATOMIC); ++ ++ if (WARN_ON(!np)) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&np->list); ++ np->rimt_node = rimt_node; ++ np->fwnode = fwnode; ++ ++ spin_lock(&rimt_fwnode_lock); ++ list_add_tail(&np->list, &rimt_fwnode_list); ++ spin_unlock(&rimt_fwnode_lock); ++ ++ return 0; ++} ++ ++/** ++ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node ++ * ++ * @node: RIMT table node to be looked-up ++ * ++ * Returns: fwnode_handle pointer on success, NULL on failure ++ */ ++static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node) ++{ ++ struct fwnode_handle *fwnode = NULL; ++ struct rimt_fwnode *curr; ++ ++ spin_lock(&rimt_fwnode_lock); ++ list_for_each_entry(curr, &rimt_fwnode_list, list) { ++ if (curr->rimt_node == node) { ++ fwnode = curr->fwnode; ++ break; ++ } ++ } ++ spin_unlock(&rimt_fwnode_lock); ++ ++ return fwnode; ++} ++ ++static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node, ++ void *context) ++{ ++ acpi_status status = AE_NOT_FOUND; ++ struct device *dev = context; ++ ++ if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) { ++ struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data; ++ ++ if (dev_is_pci(dev)) { ++ struct pci_dev *pdev; ++ u16 bdf; ++ ++ pdev = to_pci_dev(dev); ++ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); ++ if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) && ++ bdf == iommu_node->pcie_bdf) { ++ status = AE_OK; ++ } else { ++ status = AE_NOT_FOUND; ++ } ++ } else { ++ struct platform_device *pdev = to_platform_device(dev); ++ struct resource *res; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (res && res->start == iommu_node->base_address) ++ status = AE_OK; ++ else ++ status = AE_NOT_FOUND; ++ } ++ } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { ++ struct acpi_rimt_pcie_rc *pci_rc; ++ struct pci_bus *bus; ++ ++ bus = to_pci_bus(dev); ++ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data; ++ ++ /* ++ * It is assumed that PCI segment numbers maps one-to-one ++ * with root complexes. Each segment number can represent only ++ * one root complex. ++ */ ++ status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ? ++ AE_OK : AE_NOT_FOUND; ++ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { ++ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; ++ struct acpi_rimt_platform_device *ncomp; ++ struct device *plat_dev = dev; ++ struct acpi_device *adev; ++ ++ /* ++ * Walk the device tree to find a device with an ++ * ACPI companion; there is no point in scanning ++ * RIMT for a device matching a platform device if ++ * the device does not have an ACPI companion to ++ * start with. ++ */ ++ do { ++ adev = ACPI_COMPANION(plat_dev); ++ if (adev) ++ break; ++ ++ plat_dev = plat_dev->parent; ++ } while (plat_dev); ++ ++ if (!adev) ++ return status; ++ ++ status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); ++ if (ACPI_FAILURE(status)) { ++ dev_warn(plat_dev, "Can't get device full path name\n"); ++ return status; ++ } ++ ++ ncomp = (struct acpi_rimt_platform_device *)node->node_data; ++ status = !strcmp(ncomp->device_name, buf.pointer) ? ++ AE_OK : AE_NOT_FOUND; ++ acpi_os_free(buf.pointer); ++ } ++ ++ return status; ++} ++ ++static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type, ++ void *context) ++{ ++ struct acpi_rimt_node *rimt_node, *rimt_end; ++ struct acpi_table_rimt *rimt; ++ int i; ++ ++ if (!rimt_table) ++ return NULL; ++ ++ /* Get the first RIMT node */ ++ rimt = (struct acpi_table_rimt *)rimt_table; ++ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt, ++ rimt->node_offset); ++ rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, ++ rimt_table->length); ++ ++ for (i = 0; i < rimt->num_nodes; i++) { ++ if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND, ++ "RIMT node pointer overflows, bad table!\n")) ++ return NULL; ++ ++ if (rimt_node->type == type && ++ ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context))) ++ return rimt_node; ++ ++ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node, ++ rimt_node->length); ++ } ++ ++ return NULL; ++} ++ ++static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node) ++{ ++ struct acpi_rimt_pcie_rc *pci_rc; ++ ++ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data; ++ return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED; ++} ++ ++static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid) ++{ ++ struct fwnode_handle *rimt_fwnode; ++ ++ if (!node) ++ return -ENODEV; ++ ++ rimt_fwnode = rimt_get_fwnode(node); ++ ++ /* ++ * The IOMMU drivers may not be probed yet. ++ * Defer the IOMMU configuration ++ */ ++ if (!rimt_fwnode) ++ return -EPROBE_DEFER; ++ ++ return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode); ++} ++ ++struct rimt_pci_alias_info { ++ struct device *dev; ++ struct acpi_rimt_node *node; ++ const struct iommu_ops *ops; ++}; ++ ++static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) ++{ ++ if (rid_in < map->source_id_base || ++ (rid_in > map->source_id_base + map->num_ids)) ++ return -ENXIO; ++ ++ *rid_out = map->dest_id_base + (rid_in - map->source_id_base); ++ return 0; ++} ++ ++static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node, ++ u32 *id_out, int index) ++{ ++ struct acpi_rimt_platform_device *plat_node; ++ u32 id_mapping_offset, num_id_mapping; ++ struct acpi_rimt_pcie_rc *pci_node; ++ struct acpi_rimt_id_mapping *map; ++ struct acpi_rimt_node *parent; ++ ++ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { ++ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data; ++ id_mapping_offset = pci_node->id_mapping_offset; ++ num_id_mapping = pci_node->num_id_mappings; ++ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { ++ plat_node = (struct acpi_rimt_platform_device *)&node->node_data; ++ id_mapping_offset = plat_node->id_mapping_offset; ++ num_id_mapping = plat_node->num_id_mappings; ++ } else { ++ return NULL; ++ } ++ ++ if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping) ++ return NULL; ++ ++ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node, ++ id_mapping_offset + index * sizeof(*map)); ++ ++ /* Firmware bug! */ ++ if (!map->dest_offset) { ++ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", ++ node, node->type); ++ return NULL; ++ } ++ ++ parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset); ++ ++ if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE || ++ node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { ++ *id_out = map->dest_id_base; ++ return parent; ++ } ++ ++ return NULL; ++} ++ ++/* ++ * RISC-V supports IOMMU as a PCI device or a platform device. ++ * When it is a platform device, there should be a namespace device as ++ * well along with RIMT. To create the link between RIMT information and ++ * the platform device, the IOMMU driver should register itself with the ++ * RIMT module. This is true for PCI based IOMMU as well. ++ */ ++int rimt_iommu_register(struct device *dev) ++{ ++ struct fwnode_handle *rimt_fwnode; ++ struct acpi_rimt_node *node; ++ ++ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev); ++ if (!node) { ++ pr_err("Could not find IOMMU node in RIMT\n"); ++ return -ENODEV; ++ } ++ ++ if (dev_is_pci(dev)) { ++ rimt_fwnode = acpi_alloc_fwnode_static(); ++ if (!rimt_fwnode) ++ return -ENOMEM; ++ ++ rimt_fwnode->dev = dev; ++ if (!dev->fwnode) ++ dev->fwnode = rimt_fwnode; ++ ++ rimt_set_fwnode(node, rimt_fwnode); ++ } else { ++ rimt_set_fwnode(node, dev->fwnode); ++ } ++ ++ return 0; ++} ++ ++#ifdef CONFIG_IOMMU_API ++ ++static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node, ++ u32 id_in, u32 *id_out, ++ u8 type_mask) ++{ ++ struct acpi_rimt_platform_device *plat_node; ++ u32 id_mapping_offset, num_id_mapping; ++ struct acpi_rimt_pcie_rc *pci_node; ++ u32 id = id_in; ++ ++ /* Parse the ID mapping tree to find specified node type */ ++ while (node) { ++ struct acpi_rimt_id_mapping *map; ++ int i, rc = 0; ++ u32 map_id = id; ++ ++ if (RIMT_TYPE_MASK(node->type) & type_mask) { ++ if (id_out) ++ *id_out = id; ++ return node; ++ } ++ ++ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) { ++ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data; ++ id_mapping_offset = pci_node->id_mapping_offset; ++ num_id_mapping = pci_node->num_id_mappings; ++ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) { ++ plat_node = (struct acpi_rimt_platform_device *)&node->node_data; ++ id_mapping_offset = plat_node->id_mapping_offset; ++ num_id_mapping = plat_node->num_id_mappings; ++ } else { ++ goto fail_map; ++ } ++ ++ if (!id_mapping_offset || !num_id_mapping) ++ goto fail_map; ++ ++ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node, ++ id_mapping_offset); ++ ++ /* Firmware bug! */ ++ if (!map->dest_offset) { ++ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n", ++ node, node->type); ++ goto fail_map; ++ } ++ ++ /* Do the ID translation */ ++ for (i = 0; i < num_id_mapping; i++, map++) { ++ rc = rimt_id_map(map, node->type, map_id, &id); ++ if (!rc) ++ break; ++ } ++ ++ if (i == num_id_mapping) ++ goto fail_map; ++ ++ node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, ++ rc ? 0 : map->dest_offset); ++ } ++ ++fail_map: ++ /* Map input ID to output ID unchanged on mapping failure */ ++ if (id_out) ++ *id_out = id_in; ++ ++ return NULL; ++} ++ ++static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out, ++ u8 type_mask, int index) ++{ ++ struct acpi_rimt_node *parent; ++ u32 id; ++ ++ parent = rimt_node_get_id(node, &id, index); ++ if (!parent) ++ return NULL; ++ ++ if (!(RIMT_TYPE_MASK(parent->type) & type_mask)) ++ parent = rimt_node_map_id(parent, id, id_out, type_mask); ++ else ++ if (id_out) ++ *id_out = id; ++ ++ return parent; ++} ++ ++static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data) ++{ ++ struct rimt_pci_alias_info *info = data; ++ struct acpi_rimt_node *parent; ++ u32 deviceid; ++ ++ parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE); ++ return rimt_iommu_xlate(info->dev, parent, deviceid); ++} ++ ++static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node) ++{ ++ struct acpi_rimt_node *parent; ++ int err = -ENODEV, i = 0; ++ u32 deviceid = 0; ++ ++ do { ++ parent = rimt_node_map_platform_id(node, &deviceid, ++ RIMT_IOMMU_TYPE, ++ i++); ++ ++ if (parent) ++ err = rimt_iommu_xlate(dev, parent, deviceid); ++ } while (parent && !err); ++ ++ return err; ++} ++ ++static int rimt_plat_iommu_map_id(struct device *dev, ++ struct acpi_rimt_node *node, ++ const u32 *in_id) ++{ ++ struct acpi_rimt_node *parent; ++ u32 deviceid; ++ ++ parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE); ++ if (parent) ++ return rimt_iommu_xlate(dev, parent, deviceid); ++ ++ return -ENODEV; ++} ++ ++/** ++ * rimt_iommu_configure_id - Set-up IOMMU configuration for a device. ++ * ++ * @dev: device to configure ++ * @id_in: optional input id const value pointer ++ * ++ * Returns: 0 on success, <0 on failure ++ */ ++int rimt_iommu_configure_id(struct device *dev, const u32 *id_in) ++{ ++ struct acpi_rimt_node *node; ++ int err = -ENODEV; ++ ++ if (dev_is_pci(dev)) { ++ struct iommu_fwspec *fwspec; ++ struct pci_bus *bus = to_pci_dev(dev)->bus; ++ struct rimt_pci_alias_info info = { .dev = dev }; ++ ++ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev); ++ if (!node) ++ return -ENODEV; ++ ++ info.node = node; ++ err = pci_for_each_dma_alias(to_pci_dev(dev), ++ rimt_pci_iommu_init, &info); ++ ++ fwspec = dev_iommu_fwspec_get(dev); ++ if (fwspec && rimt_pcie_rc_supports_ats(node)) ++ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; ++ } else { ++ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev); ++ if (!node) ++ return -ENODEV; ++ ++ err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) : ++ rimt_plat_iommu_map(dev, node); ++ } ++ ++ return err; ++} ++ ++#endif ++ ++void __init riscv_acpi_rimt_init(void) ++{ ++ acpi_status status; ++ ++ /* rimt_table will be used at runtime after the rimt init, ++ * so we don't need to call acpi_put_table() to release ++ * the RIMT table mapping. ++ */ ++ status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table); ++ if (ACPI_FAILURE(status)) { ++ if (status != AE_NOT_FOUND) { ++ const char *msg = acpi_format_exception(status); ++ ++ pr_err("Failed to get table, %s\n", msg); ++ } ++ ++ return; ++ } ++} diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c -index 9a40052d31f3..84dcd3b5ce83 100644 +index 9a40052d31f3..95bcc37fdddf 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c -@@ -870,6 +870,9 @@ static const char * const acpi_honor_dep_ids[] = { +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -870,6 +871,9 @@ static const char * const acpi_honor_dep_ids[] = { "INTC1059", /* IVSC (TGL) driver must be loaded to allow i2c access to camera sensors */ "INTC1095", /* IVSC (ADL) driver must be loaded to allow i2c access to camera sensors */ "INTC100A", /* IVSC (RPL) driver must be loaded to allow i2c access to camera sensors */ @@ -43202,12 +49787,102 @@ index 9a40052d31f3..84dcd3b5ce83 100644 NULL }; -@@ -2034,54 +2037,18 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) +@@ -1616,10 +1620,9 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) + + #ifdef CONFIG_IOMMU_API + int acpi_iommu_fwspec_init(struct device *dev, u32 id, +- struct fwnode_handle *fwnode, +- const struct iommu_ops *ops) ++ struct fwnode_handle *fwnode) + { +- int ret = iommu_fwspec_init(dev, fwnode, ops); ++ int ret = iommu_fwspec_init(dev, fwnode); + + if (!ret) + ret = iommu_fwspec_add_ids(dev, &id, 1); +@@ -1627,59 +1630,33 @@ int acpi_iommu_fwspec_init(struct device *dev, u32 id, + return ret; + } + +-static inline const struct iommu_ops *acpi_iommu_fwspec_ops(struct device *dev) +-{ +- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); +- +- return fwspec ? fwspec->ops : NULL; +-} +- + static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in) + { + int err; +- const struct iommu_ops *ops; + + /* Serialise to make dev->iommu stable under our potential fwspec */ + mutex_lock(&iommu_probe_device_lock); +- /* +- * If we already translated the fwspec there is nothing left to do, +- * return the iommu_ops. +- */ +- ops = acpi_iommu_fwspec_ops(dev); +- if (ops) { ++ /* If we already translated the fwspec there is nothing left to do */ ++ if (dev_iommu_fwspec_get(dev)) { + mutex_unlock(&iommu_probe_device_lock); + return 0; + } + + err = iort_iommu_configure_id(dev, id_in); ++ if (err && err != -EPROBE_DEFER) ++ err = rimt_iommu_configure_id(dev, id_in); + if (err && err != -EPROBE_DEFER) + err = viot_iommu_configure(dev); +- mutex_unlock(&iommu_probe_device_lock); + +- /* +- * If we have reason to believe the IOMMU driver missed the initial +- * iommu_probe_device() call for dev, replay it to get things in order. +- */ +- if (!err && dev->bus) +- err = iommu_probe_device(dev); ++ mutex_unlock(&iommu_probe_device_lock); + +- /* Ignore all other errors apart from EPROBE_DEFER */ +- if (err == -EPROBE_DEFER) { +- return err; +- } else if (err) { +- dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); +- return -ENODEV; +- } +- if (!acpi_iommu_fwspec_ops(dev)) +- return -ENODEV; +- return 0; ++ return err; + } + + #else /* !CONFIG_IOMMU_API */ + + int acpi_iommu_fwspec_init(struct device *dev, u32 id, +- struct fwnode_handle *fwnode, +- const struct iommu_ops *ops) ++ struct fwnode_handle *fwnode) + { + return -ENODEV; + } +@@ -1712,6 +1689,8 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, + ret = acpi_iommu_configure_id(dev, input_id); + if (ret == -EPROBE_DEFER) + return -EPROBE_DEFER; ++ if (ret) ++ dev_dbg(dev, "Adding to IOMMU failed: %d\n", ret); + + /* + * Historically this routine doesn't fail driver probing due to errors +@@ -2034,54 +2013,18 @@ void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val) mutex_unlock(&acpi_scan_lock); } -static void acpi_scan_init_hotplug(struct acpi_device *adev) --{ ++int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices) + { - struct acpi_hardware_id *hwid; - - if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) { @@ -43226,8 +49901,7 @@ index 9a40052d31f3..84dcd3b5ce83 100644 -} - -static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) -+int acpi_scan_add_dep(acpi_handle handle, struct acpi_handle_list *dep_devices) - { +-{ - struct acpi_handle_list dep_devices; - acpi_status status; u32 count; @@ -43261,7 +49935,7 @@ index 9a40052d31f3..84dcd3b5ce83 100644 if (ACPI_FAILURE(status)) { acpi_handle_debug(handle, "Error reading _DEP device info\n"); continue; -@@ -2100,19 +2067,79 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) +@@ -2100,19 +2043,79 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep) count++; @@ -43344,7 +50018,7 @@ index 9a40052d31f3..84dcd3b5ce83 100644 struct acpi_device **adev_p) { struct acpi_device *device = acpi_fetch_acpi_dev(handle); -@@ -2130,9 +2157,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, +@@ -2130,9 +2133,25 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, if (acpi_device_should_be_hidden(handle)) return AE_OK; @@ -43373,7 +50047,7 @@ index 9a40052d31f3..84dcd3b5ce83 100644 fallthrough; case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */ -@@ -2155,10 +2198,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, +@@ -2155,10 +2174,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep, } /* @@ -43386,7 +50060,7 @@ index 9a40052d31f3..84dcd3b5ce83 100644 if (!device) return AE_CTRL_DEPTH; -@@ -2581,12 +2624,21 @@ int acpi_bus_scan(acpi_handle handle) +@@ -2581,12 +2600,21 @@ int acpi_bus_scan(acpi_handle handle) if (!device) return -ENODEV; @@ -43408,7 +50082,7 @@ index 9a40052d31f3..84dcd3b5ce83 100644 return 0; } EXPORT_SYMBOL(acpi_bus_scan); -@@ -2735,6 +2787,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header, +@@ -2735,6 +2763,8 @@ static int __init acpi_match_madt(union acpi_subtable_headers *header, return 0; } @@ -43417,7 +50091,7 @@ index 9a40052d31f3..84dcd3b5ce83 100644 int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) { int count = 0; -@@ -2743,6 +2797,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) +@@ -2743,6 +2773,7 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr) return 0; mutex_lock(&acpi_probe_mutex); @@ -43721,8 +50395,50 @@ index 2ea14648a661..e84106a4ef59 100644 acpi_status acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) { +diff --git a/drivers/acpi/viot.c b/drivers/acpi/viot.c +index c8025921c129..2aa69a2fba73 100644 +--- a/drivers/acpi/viot.c ++++ b/drivers/acpi/viot.c +@@ -307,21 +307,14 @@ void __init acpi_viot_init(void) + static int viot_dev_iommu_init(struct device *dev, struct viot_iommu *viommu, + u32 epid) + { +- const struct iommu_ops *ops; +- +- if (!viommu) ++ if (!viommu || !IS_ENABLED(CONFIG_VIRTIO_IOMMU)) + return -ENODEV; + + /* We're not translating ourself */ + if (device_match_fwnode(dev, viommu->fwnode)) + return -EINVAL; + +- ops = iommu_ops_from_fwnode(viommu->fwnode); +- if (!ops) +- return IS_ENABLED(CONFIG_VIRTIO_IOMMU) ? +- -EPROBE_DEFER : -ENODEV; +- +- return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode, ops); ++ return acpi_iommu_fwspec_init(dev, epid, viommu->fwnode); + } + + static int viot_pci_dev_iommu_init(struct pci_dev *pdev, u16 dev_id, void *data) +diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c +index 09e72967b8ab..fe1b86fc3fc4 100644 +--- a/drivers/amba/bus.c ++++ b/drivers/amba/bus.c +@@ -363,7 +363,8 @@ static int amba_dma_configure(struct device *dev) + ret = acpi_dma_configure(dev, attr); + } + +- if (!ret && !drv->driver_managed_dma) { ++ /* @drv may not be valid when we're called from the IOMMU layer */ ++ if (!ret && dev->driver && !drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c -index 0e615ed23635..f1918652ddec 100644 +index a280af59a821..8f4982d095d2 100644 --- a/drivers/base/arch_numa.c +++ b/drivers/base/arch_numa.c @@ -535,7 +535,7 @@ static int __init arch_acpi_numa_init(void) @@ -43912,6 +50628,48 @@ index 3348d4db5f1b..0d01890160f3 100644 + platform_msi_free_priv_data(dev); +} +EXPORT_SYMBOL_GPL(platform_device_msi_free_irqs_all); +diff --git a/drivers/base/platform.c b/drivers/base/platform.c +index 76bfcba25003..b66dac644eae 100644 +--- a/drivers/base/platform.c ++++ b/drivers/base/platform.c +@@ -1457,7 +1457,8 @@ static int platform_dma_configure(struct device *dev) + ret = acpi_dma_configure(dev, attr); + } + +- if (!ret && !drv->driver_managed_dma) { ++ /* @drv may not be valid when we're called from the IOMMU layer */ ++ if (!ret && dev->driver && !drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); +diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c +index b405ee330af1..076de4f64c8a 100644 +--- a/drivers/bus/fsl-mc/fsl-mc-bus.c ++++ b/drivers/bus/fsl-mc/fsl-mc-bus.c +@@ -153,7 +153,8 @@ static int fsl_mc_dma_configure(struct device *dev) + else + ret = acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id); + +- if (!ret && !mc_drv->driver_managed_dma) { ++ /* @mc_drv may not be valid when we're called from the IOMMU layer */ ++ if (!ret && dev->driver && !mc_drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); +diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c +index a61834bc84a9..37c775350a79 100644 +--- a/drivers/cdx/cdx.c ++++ b/drivers/cdx/cdx.c +@@ -270,7 +270,8 @@ static int cdx_dma_configure(struct device *dev) + return ret; + } + +- if (!ret && !cdx_drv->driver_managed_dma) { ++ /* @cdx_drv may not be valid when we're called from the IOMMU layer */ ++ if (!ret && dev->driver && !cdx_drv->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); diff --git a/drivers/char/ipmi/ipmi_si_hardcode.c b/drivers/char/ipmi/ipmi_si_hardcode.c index 0c92fa3eee88..3cb4ceb53635 100644 --- a/drivers/char/ipmi/ipmi_si_hardcode.c @@ -46678,22 +53436,22 @@ index 000000000000..81e9f9eb1b20 +#endif diff --git a/drivers/clk/spacemit/Kconfig b/drivers/clk/spacemit/Kconfig new file mode 100644 -index 000000000000..fe905e7cf2d3 +index 000000000000..47cccef86793 --- /dev/null +++ b/drivers/clk/spacemit/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# common clock support for SPACEMIT SoC family. + -+config SPACEMIT_K1X_CCU -+ tristate "Clock support for Spacemit k1x SoCs" -+ depends on SOC_SPACEMIT_K1X ++config SPACEMIT_K1_CCU ++ tristate "Clock support for Spacemit k1 SoC" ++ depends on SOC_SPACEMIT_K1 + help -+ Build the driver for Spacemit K1x Clock Driver. ++ Build the driver for Spacemit K1 Clock Driver. + diff --git a/drivers/clk/spacemit/Makefile b/drivers/clk/spacemit/Makefile new file mode 100644 -index 000000000000..6bfb749658d7 +index 000000000000..889a5c51bfe6 --- /dev/null +++ b/drivers/clk/spacemit/Makefile @@ -0,0 +1,11 @@ @@ -46702,21 +53460,21 @@ index 000000000000..6bfb749658d7 +# Spacemit Clock specific Makefile +# + -+obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu-spacemit-k1x.o -+obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_mix.o -+obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_pll.o -+obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_dpll.o -+obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_ddn.o -+obj-$(CONFIG_SPACEMIT_K1X_CCU) += ccu_ddr.o -diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.c b/drivers/clk/spacemit/ccu-spacemit-k1x.c ++obj-$(CONFIG_SPACEMIT_K1_CCU) += ccu-spacemit-k1.o ++obj-$(CONFIG_SPACEMIT_K1_CCU) += ccu_mix.o ++obj-$(CONFIG_SPACEMIT_K1_CCU) += ccu_pll.o ++obj-$(CONFIG_SPACEMIT_K1_CCU) += ccu_dpll.o ++obj-$(CONFIG_SPACEMIT_K1_CCU) += ccu_ddn.o ++obj-$(CONFIG_SPACEMIT_K1_CCU) += ccu_ddr.o +diff --git a/drivers/clk/spacemit/ccu-spacemit-k1.c b/drivers/clk/spacemit/ccu-spacemit-k1.c new file mode 100644 -index 000000000000..e4b176b39247 +index 000000000000..117ebb97ac27 --- /dev/null -+++ b/drivers/clk/spacemit/ccu-spacemit-k1x.c ++++ b/drivers/clk/spacemit/ccu-spacemit-k1.c @@ -0,0 +1,2123 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* -+ * Spacemit k1x clock controller driver ++ * Spacemit k1 clock controller driver + * + * Copyright (c) 2023, spacemit Corporation. + * @@ -46728,8 +53486,8 @@ index 000000000000..e4b176b39247 +#include +#include +#include -+#include -+#include "ccu-spacemit-k1x.h" ++#include ++#include "ccu-spacemit-k1.h" +#include "ccu_mix.h" +#include "ccu_pll.h" +#include "ccu_ddn.h" @@ -46923,7 +53681,7 @@ index 000000000000..e4b176b39247 +#define RCPU2_PWM9_CLK_RST 0x24 +/* end of RCPU2 register offset */ + -+struct spacemit_k1x_clk k1x_clock_controller; ++struct spacemit_k1_clk k1_clock_controller; + +static const struct ccu_pll_rate_tbl pll2_rate_tbl[] = { + PLL_RATE(3000000000UL, 0x66, 0xdd, 0x50, 0x00, 0x3f, 0xe00000), @@ -48347,7 +55105,7 @@ index 000000000000..e4b176b39247 + 0x6, 0x6, 0x0, + 0); + -+static struct clk_hw_onecell_data spacemit_k1x_hw_clks = { ++static struct clk_hw_onecell_data spacemit_k1_hw_clks = { + .hws = { + [CLK_PLL2] = &pll2.common.hw, + [CLK_PLL3] = &pll3.common.hw, @@ -48592,7 +55350,7 @@ index 000000000000..e4b176b39247 + struct clk_hw *hw_clk; + + for (i = 0; i < tbl_size; i++) { -+ hw_clk = spacemit_k1x_hw_clks.hws[tbl[i].clk_hw_id]; ++ hw_clk = spacemit_k1_hw_clks.hws[tbl[i].clk_hw_id]; + clk = clk_hw_get_clk(hw_clk, tbl[i].name); + if (!IS_ERR_OR_NULL(clk)) + clk_prepare_enable(clk); @@ -48602,7 +55360,7 @@ index 000000000000..e4b176b39247 + } +} + -+unsigned long spacemit_k1x_ddr_freq_tbl[MAX_FREQ_LV + 1] = {0}; ++unsigned long spacemit_k1_ddr_freq_tbl[MAX_FREQ_LV + 1] = {0}; + +void spacemit_fill_ddr_freq_tbl(void) +{ @@ -48610,19 +55368,19 @@ index 000000000000..e4b176b39247 + struct clk *clk; + struct clk_hw *hw_clk; + -+ for (i = 0; i < ARRAY_SIZE(spacemit_k1x_ddr_freq_tbl); i++) { -+ hw_clk = spacemit_k1x_hw_clks.hws[CLK_DFC_LVL0 + i]; ++ for (i = 0; i < ARRAY_SIZE(spacemit_k1_ddr_freq_tbl); i++) { ++ hw_clk = spacemit_k1_hw_clks.hws[CLK_DFC_LVL0 + i]; + clk = clk_hw_get_clk(hw_clk, ddr_clk_parents[i]); + + if (!IS_ERR_OR_NULL(clk)) -+ spacemit_k1x_ddr_freq_tbl[i] = clk_get_rate(clk); ++ spacemit_k1_ddr_freq_tbl[i] = clk_get_rate(clk); + else + pr_err("%s : can't find clk %s\n", + __func__, ddr_clk_parents[i]); + } +} + -+int ccu_common_init(struct clk_hw *hw, struct spacemit_k1x_clk *clk_info) ++int ccu_common_init(struct clk_hw *hw, struct spacemit_k1_clk *clk_info) +{ + struct ccu_common *common = hw_to_ccu_common(hw); + struct ccu_pll *pll = hw_to_ccu_pll(hw); @@ -48678,7 +55436,7 @@ index 000000000000..e4b176b39247 +} + +int spacemit_ccu_probe(struct device_node *node, -+ struct spacemit_k1x_clk *clk_info, ++ struct spacemit_k1_clk *clk_info, + struct clk_hw_onecell_data *hw_clks) +{ + int i, ret; @@ -48723,14 +55481,14 @@ index 000000000000..e4b176b39247 + return ret; +} + -+static void spacemit_k1x_ccu_probe(struct device_node *np) ++static void spacemit_k1_ccu_probe(struct device_node *np) +{ + int ret; -+ struct spacemit_k1x_clk *clk_info; -+ struct clk_hw_onecell_data *hw_clks = &spacemit_k1x_hw_clks; ++ struct spacemit_k1_clk *clk_info; ++ struct clk_hw_onecell_data *hw_clks = &spacemit_k1_hw_clks; + -+ if (of_device_is_compatible(np, "spacemit,k1x-clock")) { -+ clk_info = &k1x_clock_controller; ++ if (of_device_is_compatible(np, "spacemit,k1-clock")) { ++ clk_info = &k1_clock_controller; + + clk_info->mpmu_base = of_iomap(np, 0); + if (!clk_info->mpmu_base) { @@ -48792,7 +55550,7 @@ index 000000000000..e4b176b39247 + goto out; + } + } else { -+ pr_err("not spacemit,k1x-clock\n"); ++ pr_err("not spacemit,k1-clock\n"); + goto out; + } + ret = spacemit_ccu_probe(np, clk_info, hw_clks); @@ -48804,14 +55562,14 @@ index 000000000000..e4b176b39247 + +void *spacemit_get_ddr_freq_tbl(void) +{ -+ return spacemit_k1x_ddr_freq_tbl; ++ return spacemit_k1_ddr_freq_tbl; +} +EXPORT_SYMBOL_GPL(spacemit_get_ddr_freq_tbl); + +u32 spacemit_get_ddr_freq_level(void) +{ + u32 ddr_freq_lvl = 0; -+ struct clk_hw *hw = spacemit_k1x_hw_clks.hws[CLK_DDR]; ++ struct clk_hw *hw = spacemit_k1_hw_clks.hws[CLK_DDR]; + + ddr_freq_lvl = clk_hw_get_parent_index(hw); + @@ -48822,7 +55580,7 @@ index 000000000000..e4b176b39247 +int spacemit_set_ddr_freq_level(u32 level) +{ + int ret = 0; -+ struct clk_hw *hw = spacemit_k1x_hw_clks.hws[CLK_DDR]; ++ struct clk_hw *hw = spacemit_k1_hw_clks.hws[CLK_DDR]; + + if (level < 0 || level > MAX_FREQ_LV) + return -EINVAL; @@ -48835,19 +55593,19 @@ index 000000000000..e4b176b39247 +} +EXPORT_SYMBOL_GPL(spacemit_set_ddr_freq_level); + -+CLK_OF_DECLARE(k1x_clock, "spacemit,k1x-clock", spacemit_k1x_ccu_probe); ++CLK_OF_DECLARE(k1_clock, "spacemit,k1-clock", spacemit_k1_ccu_probe); + -diff --git a/drivers/clk/spacemit/ccu-spacemit-k1x.h b/drivers/clk/spacemit/ccu-spacemit-k1x.h +diff --git a/drivers/clk/spacemit/ccu-spacemit-k1.h b/drivers/clk/spacemit/ccu-spacemit-k1.h new file mode 100644 -index 000000000000..2662b9e40400 +index 000000000000..f7da85ea3c31 --- /dev/null -+++ b/drivers/clk/spacemit/ccu-spacemit-k1x.h ++++ b/drivers/clk/spacemit/ccu-spacemit-k1.h @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023, spacemit Corporation. */ + -+#ifndef _CCU_SPACEMIT_K1X_H_ -+#define _CCU_SPACEMIT_K1X_H_ ++#ifndef _CCU_SPACEMIT_K1_H_ ++#define _CCU_SPACEMIT_K1_H_ + +#include +#include @@ -48893,7 +55651,7 @@ index 000000000000..2662b9e40400 + struct clk_hw hw; +}; + -+struct spacemit_k1x_clk { ++struct spacemit_k1_clk { + void __iomem *mpmu_base; + void __iomem *apmu_base; + void __iomem *apbc_base; @@ -48920,10 +55678,10 @@ index 000000000000..2662b9e40400 +} + +int spacemit_ccu_probe(struct device_node *node, -+ struct spacemit_k1x_clk *clk_info, ++ struct spacemit_k1_clk *clk_info, + struct clk_hw_onecell_data *desc); + -+#endif /* _CCU_SPACEMIT_K1X_H_ */ ++#endif /* _CCU_SPACEMIT_K1_H_ */ diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c new file mode 100644 index 000000000000..a23d9dad8e32 @@ -49093,7 +55851,7 @@ index 000000000000..a23d9dad8e32 + diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h new file mode 100644 -index 000000000000..577f25250a11 +index 000000000000..92a4e3e46262 --- /dev/null +++ b/drivers/clk/spacemit/ccu_ddn.h @@ -0,0 +1,86 @@ @@ -49106,7 +55864,7 @@ index 000000000000..577f25250a11 +#include +#include + -+#include "ccu-spacemit-k1x.h" ++#include "ccu-spacemit-k1.h" + +struct ccu_ddn_tbl { + unsigned int num; @@ -49463,7 +56221,7 @@ index 000000000000..ffd8650a6e79 + diff --git a/drivers/clk/spacemit/ccu_ddr.h b/drivers/clk/spacemit/ccu_ddr.h new file mode 100644 -index 000000000000..960ca3456796 +index 000000000000..c8f648366e3c --- /dev/null +++ b/drivers/clk/spacemit/ccu_ddr.h @@ -0,0 +1,44 @@ @@ -49474,7 +56232,7 @@ index 000000000000..960ca3456796 +#define _CCU_DDR_H_ + +#include -+#include "ccu-spacemit-k1x.h" ++#include "ccu-spacemit-k1.h" +#include "ccu_mix.h" + +struct ccu_ddr { @@ -49643,7 +56401,7 @@ index 000000000000..ff8b699e1ba2 + diff --git a/drivers/clk/spacemit/ccu_dpll.h b/drivers/clk/spacemit/ccu_dpll.h new file mode 100644 -index 000000000000..d5632528dc1f +index 000000000000..6bbf62bb7e19 --- /dev/null +++ b/drivers/clk/spacemit/ccu_dpll.h @@ -0,0 +1,76 @@ @@ -49655,7 +56413,7 @@ index 000000000000..d5632528dc1f + +#include +#include -+#include "ccu-spacemit-k1x.h" ++#include "ccu-spacemit-k1.h" + +struct ccu_dpll_rate_tbl { + unsigned long long rate; @@ -50233,7 +56991,7 @@ index 000000000000..baa341090f53 + diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h new file mode 100644 -index 000000000000..4b7d67cb0225 +index 000000000000..cd087972d62b --- /dev/null +++ b/drivers/clk/spacemit/ccu_mix.h @@ -0,0 +1,380 @@ @@ -50244,7 +57002,7 @@ index 000000000000..4b7d67cb0225 +#define _CCU_MIX_H_ + +#include -+#include "ccu-spacemit-k1x.h" ++#include "ccu-spacemit-k1.h" + +#define SPACEMIT_CLK_GATE_NEED_DELAY BIT(0) + @@ -50911,7 +57669,7 @@ index 000000000000..9bc4d1de8b33 + diff --git a/drivers/clk/spacemit/ccu_pll.h b/drivers/clk/spacemit/ccu_pll.h new file mode 100644 -index 000000000000..0f6f2ed397da +index 000000000000..4b3796787d22 --- /dev/null +++ b/drivers/clk/spacemit/ccu_pll.h @@ -0,0 +1,79 @@ @@ -50923,7 +57681,7 @@ index 000000000000..0f6f2ed397da + +#include +#include -+#include "ccu-spacemit-k1x.h" ++#include "ccu-spacemit-k1.h" + +struct ccu_pll_rate_tbl { + unsigned long long rate; @@ -53621,6 +60379,43 @@ index 46c3aa314f97..63f81fbda8ba 100644 obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o obj-$(CONFIG_SW64_CPUFREQ) += sunway-cpufreq.o +obj-$(CONFIG_RISCV_XUANTIE_TH1520_CPUFREQ) += th1520-cpufreq.o +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c +index 7fb88e1b3922..9d2b1ecd6762 100644 +--- a/drivers/cpufreq/cppc_cpufreq.c ++++ b/drivers/cpufreq/cppc_cpufreq.c +@@ -26,6 +26,20 @@ + + #include + ++#ifdef CONFIG_ARM64 ++/* ++ * cpu_has_amu_feat is exported from arch/arm64/kernel/cpufeature.c ++ * Only declare it for ARM64 builds. ++ */ ++extern bool cpu_has_amu_feat(int cpu); ++#else ++/* For non-ARM64 architectures, AMU feature is not available. */ ++static inline bool cpu_has_amu_feat(int cpu) ++{ ++ return false; ++} ++#endif /* CONFIG_ARM64 */ ++ + static bool boost_supported; + + struct cppc_workaround_oem_info { +@@ -786,6 +800,11 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) + + cpufreq_cpu_put(policy); + ++ /* ++ * Use smp_call_on_cpu for ARM64 with AMU feature to ensure ++ * counters are active. For other architectures, use direct ++ * call. ++ */ + if (cpu_has_amu_feat(cpu)) + ret = smp_call_on_cpu(cpu, cppc_get_perf_ctrs_pair, + &fb_ctrs, false); diff --git a/drivers/cpufreq/th1520-cpufreq.c b/drivers/cpufreq/th1520-cpufreq.c new file mode 100644 index 000000000000..ef157fd3cdf5 @@ -54313,7 +61108,7 @@ index 71d433bb0ce6..50d128a4b343 100644 ret = platform_driver_register(&sbi_cpuidle_driver); if (ret) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig -index e36506471a4f..83510ecf37c3 100644 +index e36506471a4f..28c1b1ce58ae 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -489,6 +489,13 @@ config OWL_DMA @@ -54322,7 +61117,7 @@ index e36506471a4f..83510ecf37c3 100644 +config SPACEMIT_K1_DMA + bool "Spacemit k1 SoCs DMA support" -+ depends on SOC_SPACEMIT_K1X ++ depends on SOC_SPACEMIT_K1 + depends on DMA_ENGINE + help + Enable support for the Spacemit k1 SoCs DMA controller. @@ -56199,7 +62994,7 @@ index 000000000000..d730ad085e0b +MODULE_DESCRIPTION("Spacemit K1 Peripheral DMA Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig -index 0f3cd1b05ae3..d6aaccfffb01 100644 +index ebab663702fc..1782a383ce03 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -155,7 +155,7 @@ config RASPBERRYPI_FIRMWARE @@ -56211,22 +63006,53 @@ index 0f3cd1b05ae3..d6aaccfffb01 100644 depends on HAS_IOPORT_MAP default n help -@@ -315,5 +315,6 @@ source "drivers/firmware/psci/Kconfig" +@@ -312,9 +312,11 @@ source "drivers/firmware/efi/Kconfig" + source "drivers/firmware/imx/Kconfig" + source "drivers/firmware/meson/Kconfig" + source "drivers/firmware/psci/Kconfig" ++source "drivers/firmware/riscv/Kconfig" source "drivers/firmware/smccc/Kconfig" source "drivers/firmware/tegra/Kconfig" source "drivers/firmware/xilinx/Kconfig" +source "drivers/firmware/xuantie/Kconfig" + source "drivers/firmware/ubios_uvb/Kconfig" endmenu diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile -index 28fcddcd688f..c549817a4b42 100644 +index c60933fa11cb..be2fb4f48a23 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile -@@ -38,3 +38,4 @@ obj-y += psci/ +@@ -35,7 +35,9 @@ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ + obj-y += efi/ + obj-y += imx/ + obj-y += psci/ ++obj-y += riscv/ obj-y += smccc/ obj-y += tegra/ obj-y += xilinx/ +obj-y += xuantie/ + obj-$(CONFIG_UDFI) += ubios_uvb/ +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c +index 1a4c02b675f9..70a19af9e48e 100644 +--- a/drivers/firmware/efi/cper.c ++++ b/drivers/firmware/efi/cper.c +@@ -110,6 +110,7 @@ static const char * const proc_type_strs[] = { + "IA32/X64", + "IA64", + "ARM", ++ "RISCV", + }; + + static const char * const proc_isa_strs[] = { +@@ -118,6 +119,8 @@ static const char * const proc_isa_strs[] = { + "X64", + "ARM A32/T32", + "ARM A64", ++ "RISCV32", ++ "RISCV64", + }; + + const char * const cper_proc_error_type_strs[] = { diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index da8eac6dfc0f..08a44140087f 100644 --- a/drivers/firmware/efi/libstub/Makefile @@ -56274,6 +63100,882 @@ index f4fea1ec3201..24b302c9e212 100644 # define FW_CFG_CTRL_OFF 0x08 # define FW_CFG_DATA_OFF 0x00 # define FW_CFG_DMA_OFF 0x10 +diff --git a/drivers/firmware/riscv/Kconfig b/drivers/firmware/riscv/Kconfig +new file mode 100644 +index 000000000000..8056ed3262d9 +--- /dev/null ++++ b/drivers/firmware/riscv/Kconfig +@@ -0,0 +1,15 @@ ++# SPDX-License-Identifier: GPL-2.0-only ++menu "Risc-V Specific firmware drivers" ++depends on RISCV ++ ++config RISCV_SSE ++ bool "Enable SBI Supervisor Software Events support" ++ depends on RISCV_SBI ++ default y ++ help ++ The Supervisor Software Events support allow the SBI to deliver ++ NMI-like notifications to the supervisor mode software. When enable, ++ this option provides support to register callbacks on specific SSE ++ events. ++ ++endmenu +diff --git a/drivers/firmware/riscv/Makefile b/drivers/firmware/riscv/Makefile +new file mode 100644 +index 000000000000..4ccfcbbc28ea +--- /dev/null ++++ b/drivers/firmware/riscv/Makefile +@@ -0,0 +1,3 @@ ++# SPDX-License-Identifier: GPL-2.0 ++ ++obj-$(CONFIG_RISCV_SSE) += riscv_sse.o +diff --git a/drivers/firmware/riscv/riscv_sse.c b/drivers/firmware/riscv/riscv_sse.c +new file mode 100644 +index 000000000000..672b9970ad5d +--- /dev/null ++++ b/drivers/firmware/riscv/riscv_sse.c +@@ -0,0 +1,840 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2024 Rivos Inc. ++ */ ++ ++#define pr_fmt(fmt) "sse: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++struct sse_event { ++ struct list_head list; ++ u32 evt_id; ++ u32 priority; ++ sse_event_handler *handler; ++ void *handler_arg; ++ /* Only valid for global events */ ++ unsigned int cpu; ++ ++ union { ++ struct sse_registered_event *global; ++ struct sse_registered_event __percpu *local; ++ }; ++}; ++ ++static int sse_hp_state; ++static bool sse_available __ro_after_init; ++static DEFINE_SPINLOCK(events_list_lock); ++static LIST_HEAD(events); ++static DEFINE_MUTEX(sse_mutex); ++ ++struct sse_registered_event { ++ struct sse_event_arch_data arch; ++ struct sse_event *event; ++ unsigned long attr; ++ bool is_enabled; ++}; ++ ++void sse_handle_event(struct sse_event_arch_data *arch_event, ++ struct pt_regs *regs) ++{ ++ int ret; ++ struct sse_registered_event *reg_evt = ++ container_of(arch_event, struct sse_registered_event, arch); ++ struct sse_event *evt = reg_evt->event; ++ ++ ret = evt->handler(evt->evt_id, evt->handler_arg, regs); ++ if (ret) ++ pr_warn("event %x handler failed with error %d\n", evt->evt_id, ret); ++} ++ ++static struct sse_event *sse_event_get(u32 evt) ++{ ++ struct sse_event *event = NULL; ++ ++ scoped_guard(spinlock, &events_list_lock) { ++ list_for_each_entry(event, &events, list) { ++ if (event->evt_id == evt) ++ return event; ++ } ++ } ++ ++ return NULL; ++} ++ ++static phys_addr_t sse_event_get_attr_phys(struct sse_registered_event *reg_evt) ++{ ++ phys_addr_t phys; ++ void *addr = ®_evt->attr; ++ ++ if (sse_event_is_global(reg_evt->event->evt_id)) ++ phys = virt_to_phys(addr); ++ else ++ phys = per_cpu_ptr_to_phys(addr); ++ ++ return phys; ++} ++ ++static struct sse_registered_event *sse_get_reg_evt(struct sse_event *event) ++{ ++ if (sse_event_is_global(event->evt_id)) ++ return event->global; ++ else ++ return per_cpu_ptr(event->local, smp_processor_id()); ++} ++ ++static int sse_sbi_event_func(struct sse_event *event, unsigned long func) ++{ ++ struct sbiret ret; ++ u32 evt = event->evt_id; ++ struct sse_registered_event *reg_evt = sse_get_reg_evt(event); ++ ++ ret = sbi_ecall(SBI_EXT_SSE, func, evt, 0, 0, 0, 0, 0); ++ if (ret.error) { ++ pr_warn("Failed to execute func %lx, event %x, error %ld\n", ++ func, evt, ret.error); ++ return sbi_err_map_linux_errno(ret.error); ++ } ++ ++ if (func == SBI_SSE_EVENT_DISABLE) ++ reg_evt->is_enabled = false; ++ else if (func == SBI_SSE_EVENT_ENABLE) ++ reg_evt->is_enabled = true; ++ ++ return 0; ++} ++ ++int sse_event_disable_local(struct sse_event *event) ++{ ++ return sse_sbi_event_func(event, SBI_SSE_EVENT_DISABLE); ++} ++EXPORT_SYMBOL_GPL(sse_event_disable_local); ++ ++int sse_event_enable_local(struct sse_event *event) ++{ ++ return sse_sbi_event_func(event, SBI_SSE_EVENT_ENABLE); ++} ++EXPORT_SYMBOL_GPL(sse_event_enable_local); ++ ++static int sse_event_attr_get_no_lock(struct sse_registered_event *reg_evt, ++ unsigned long attr_id, unsigned long *val) ++{ ++ struct sbiret sret; ++ u32 evt = reg_evt->event->evt_id; ++ unsigned long phys; ++ ++ phys = sse_event_get_attr_phys(reg_evt); ++ ++ sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_READ, evt, attr_id, 1, ++ phys, 0, 0); ++ if (sret.error) { ++ pr_debug("Failed to get event %x attr %lx, error %ld\n", evt, ++ attr_id, sret.error); ++ return sbi_err_map_linux_errno(sret.error); ++ } ++ ++ *val = reg_evt->attr; ++ ++ return 0; ++} ++ ++static int sse_event_attr_set_nolock(struct sse_registered_event *reg_evt, ++ unsigned long attr_id, unsigned long val) ++{ ++ struct sbiret sret; ++ u32 evt = reg_evt->event->evt_id; ++ unsigned long phys; ++ ++ reg_evt->attr = val; ++ phys = sse_event_get_attr_phys(reg_evt); ++ ++ sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_WRITE, evt, attr_id, 1, ++ phys, 0, 0); ++ if (sret.error) ++ pr_debug("Failed to set event %x attr %lx, error %ld\n", evt, ++ attr_id, sret.error); ++ ++ return sbi_err_map_linux_errno(sret.error); ++} ++ ++static void sse_global_event_update_cpu(struct sse_event *event, ++ unsigned int cpu) ++{ ++ struct sse_registered_event *reg_evt = event->global; ++ ++ event->cpu = cpu; ++ arch_sse_event_update_cpu(®_evt->arch, cpu); ++} ++ ++static int sse_event_set_target_cpu_nolock(struct sse_event *event, ++ unsigned int cpu) ++{ ++ unsigned int hart_id = cpuid_to_hartid_map(cpu); ++ struct sse_registered_event *reg_evt = event->global; ++ u32 evt = event->evt_id; ++ bool was_enabled; ++ int ret; ++ ++ if (!sse_event_is_global(evt)) ++ return -EINVAL; ++ ++ was_enabled = reg_evt->is_enabled; ++ if (was_enabled) ++ sse_event_disable_local(event); ++ ++ ret = sse_event_attr_set_nolock(reg_evt, SBI_SSE_ATTR_PREFERRED_HART, ++ hart_id); ++ if (ret == 0) ++ sse_global_event_update_cpu(event, cpu); ++ ++ if (was_enabled) ++ sse_event_enable_local(event); ++ ++ return 0; ++} ++ ++int sse_event_set_target_cpu(struct sse_event *event, unsigned int cpu) ++{ ++ int ret; ++ ++ scoped_guard(mutex, &sse_mutex) { ++ scoped_guard(cpus_read_lock) { ++ if (!cpu_online(cpu)) ++ return -EINVAL; ++ ++ ret = sse_event_set_target_cpu_nolock(event, cpu); ++ } ++ } ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(sse_event_set_target_cpu); ++ ++static int sse_event_init_registered(unsigned int cpu, ++ struct sse_registered_event *reg_evt, ++ struct sse_event *event) ++{ ++ reg_evt->event = event; ++ ++ return arch_sse_init_event(®_evt->arch, event->evt_id, cpu); ++} ++ ++static void sse_event_free_registered(struct sse_registered_event *reg_evt) ++{ ++ arch_sse_free_event(®_evt->arch); ++} ++ ++static int sse_event_alloc_global(struct sse_event *event) ++{ ++ int err; ++ struct sse_registered_event *reg_evt; ++ ++ reg_evt = kzalloc(sizeof(*reg_evt), GFP_KERNEL); ++ if (!reg_evt) ++ return -ENOMEM; ++ ++ event->global = reg_evt; ++ err = sse_event_init_registered(smp_processor_id(), reg_evt, event); ++ if (err) ++ kfree(reg_evt); ++ ++ return err; ++} ++ ++static int sse_event_alloc_local(struct sse_event *event) ++{ ++ int err; ++ unsigned int cpu, err_cpu; ++ struct sse_registered_event *reg_evt; ++ struct sse_registered_event __percpu *reg_evts; ++ ++ reg_evts = alloc_percpu(struct sse_registered_event); ++ if (!reg_evts) ++ return -ENOMEM; ++ ++ event->local = reg_evts; ++ ++ for_each_possible_cpu(cpu) { ++ reg_evt = per_cpu_ptr(reg_evts, cpu); ++ err = sse_event_init_registered(cpu, reg_evt, event); ++ if (err) { ++ err_cpu = cpu; ++ goto err_free_per_cpu; ++ } ++ } ++ ++ return 0; ++ ++err_free_per_cpu: ++ for_each_possible_cpu(cpu) { ++ if (cpu == err_cpu) ++ break; ++ reg_evt = per_cpu_ptr(reg_evts, cpu); ++ sse_event_free_registered(reg_evt); ++ } ++ ++ free_percpu(reg_evts); ++ ++ return err; ++} ++ ++static struct sse_event *sse_event_alloc(u32 evt, u32 priority, ++ sse_event_handler *handler, void *arg) ++{ ++ int err; ++ struct sse_event *event; ++ ++ event = kzalloc(sizeof(*event), GFP_KERNEL); ++ if (!event) ++ return ERR_PTR(-ENOMEM); ++ ++ event->evt_id = evt; ++ event->priority = priority; ++ event->handler_arg = arg; ++ event->handler = handler; ++ ++ if (sse_event_is_global(evt)) ++ err = sse_event_alloc_global(event); ++ else ++ err = sse_event_alloc_local(event); ++ ++ if (err) { ++ kfree(event); ++ return ERR_PTR(err); ++ } ++ ++ return event; ++} ++ ++static int sse_sbi_register_event(struct sse_event *event, ++ struct sse_registered_event *reg_evt) ++{ ++ int ret; ++ ++ ret = sse_event_attr_set_nolock(reg_evt, SBI_SSE_ATTR_PRIO, ++ event->priority); ++ if (ret) ++ return ret; ++ ++ return arch_sse_register_event(®_evt->arch); ++} ++ ++static int sse_event_register_local(struct sse_event *event) ++{ ++ int ret; ++ struct sse_registered_event *reg_evt; ++ ++ reg_evt = per_cpu_ptr(event->local, smp_processor_id()); ++ ret = sse_sbi_register_event(event, reg_evt); ++ if (ret) ++ pr_debug("Failed to register event %x: err %d\n", event->evt_id, ++ ret); ++ ++ return ret; ++} ++ ++static int sse_sbi_unregister_event(struct sse_event *event) ++{ ++ return sse_sbi_event_func(event, SBI_SSE_EVENT_UNREGISTER); ++} ++ ++struct sse_per_cpu_evt { ++ struct sse_event *event; ++ unsigned long func; ++ cpumask_t error; ++}; ++ ++static void sse_event_per_cpu_func(void *info) ++{ ++ int ret; ++ struct sse_per_cpu_evt *cpu_evt = info; ++ ++ if (cpu_evt->func == SBI_SSE_EVENT_REGISTER) ++ ret = sse_event_register_local(cpu_evt->event); ++ else ++ ret = sse_sbi_event_func(cpu_evt->event, cpu_evt->func); ++ ++ if (ret) ++ cpumask_set_cpu(smp_processor_id(), &cpu_evt->error); ++} ++ ++static void sse_event_free(struct sse_event *event) ++{ ++ unsigned int cpu; ++ struct sse_registered_event *reg_evt; ++ ++ if (sse_event_is_global(event->evt_id)) { ++ sse_event_free_registered(event->global); ++ kfree(event->global); ++ } else { ++ for_each_possible_cpu(cpu) { ++ reg_evt = per_cpu_ptr(event->local, cpu); ++ sse_event_free_registered(reg_evt); ++ } ++ free_percpu(event->local); ++ } ++ ++ kfree(event); ++} ++ ++static void sse_on_each_cpu(struct sse_event *event, unsigned long func, ++ unsigned long revert_func) ++{ ++ struct sse_per_cpu_evt cpu_evt; ++ ++ cpu_evt.event = event; ++ cpumask_clear(&cpu_evt.error); ++ cpu_evt.func = func; ++ on_each_cpu(sse_event_per_cpu_func, &cpu_evt, 1); ++ /* ++ * If there are some error reported by CPUs, revert event state on the ++ * other ones ++ */ ++ if (!cpumask_empty(&cpu_evt.error)) { ++ cpumask_t revert; ++ ++ cpumask_andnot(&revert, cpu_online_mask, &cpu_evt.error); ++ cpu_evt.func = revert_func; ++ on_each_cpu_mask(&revert, sse_event_per_cpu_func, &cpu_evt, 1); ++ } ++} ++ ++int sse_event_enable(struct sse_event *event) ++{ ++ int ret = 0; ++ ++ scoped_guard(mutex, &sse_mutex) { ++ scoped_guard(cpus_read_lock) { ++ if (sse_event_is_global(event->evt_id)) { ++ ret = sse_event_enable_local(event); ++ } else { ++ sse_on_each_cpu(event, SBI_SSE_EVENT_ENABLE, ++ SBI_SSE_EVENT_DISABLE); ++ } ++ } ++ } ++ return ret; ++} ++EXPORT_SYMBOL_GPL(sse_event_enable); ++ ++static int sse_events_mask(void) ++{ ++ struct sbiret ret; ++ ++ ret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_HART_MASK, 0, 0, 0, 0, 0, 0); ++ ++ return sbi_err_map_linux_errno(ret.error); ++} ++ ++static int sse_events_unmask(void) ++{ ++ struct sbiret ret; ++ ++ ret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_HART_UNMASK, 0, 0, 0, 0, 0, 0); ++ ++ return sbi_err_map_linux_errno(ret.error); ++} ++ ++static void sse_event_disable_nolock(struct sse_event *event) ++{ ++ struct sse_per_cpu_evt cpu_evt; ++ ++ if (sse_event_is_global(event->evt_id)) { ++ sse_event_disable_local(event); ++ } else { ++ cpu_evt.event = event; ++ cpu_evt.func = SBI_SSE_EVENT_DISABLE; ++ on_each_cpu(sse_event_per_cpu_func, &cpu_evt, 1); ++ } ++} ++ ++void sse_event_disable(struct sse_event *event) ++{ ++ scoped_guard(mutex, &sse_mutex) { ++ scoped_guard(cpus_read_lock) { ++ sse_event_disable_nolock(event); ++ } ++ } ++} ++EXPORT_SYMBOL_GPL(sse_event_disable); ++ ++struct sse_event *sse_event_register(u32 evt, u32 priority, ++ sse_event_handler *handler, void *arg) ++{ ++ struct sse_event *event; ++ int cpu; ++ int ret = 0; ++ ++ if (!sse_available) ++ return ERR_PTR(-EOPNOTSUPP); ++ ++ guard(mutex)(&sse_mutex); ++ if (sse_event_get(evt)) ++ return ERR_PTR(-EEXIST); ++ ++ event = sse_event_alloc(evt, priority, handler, arg); ++ if (IS_ERR(event)) ++ return event; ++ ++ scoped_guard(cpus_read_lock) { ++ if (sse_event_is_global(evt)) { ++ unsigned long preferred_hart; ++ ++ ret = sse_event_attr_get_no_lock(event->global, ++ SBI_SSE_ATTR_PREFERRED_HART, ++ &preferred_hart); ++ if (ret) ++ goto err_event_free; ++ ++ cpu = riscv_hartid_to_cpuid(preferred_hart); ++ sse_global_event_update_cpu(event, cpu); ++ ++ ret = sse_sbi_register_event(event, event->global); ++ if (ret) ++ goto err_event_free; ++ ++ } else { ++ sse_on_each_cpu(event, SBI_SSE_EVENT_REGISTER, ++ SBI_SSE_EVENT_DISABLE); ++ } ++ } ++ ++ scoped_guard(spinlock, &events_list_lock) ++ list_add(&event->list, &events); ++ ++ return event; ++ ++err_event_free: ++ sse_event_free(event); ++ ++ return ERR_PTR(ret); ++} ++EXPORT_SYMBOL_GPL(sse_event_register); ++ ++static void sse_event_unregister_nolock(struct sse_event *event) ++{ ++ struct sse_per_cpu_evt cpu_evt; ++ ++ if (sse_event_is_global(event->evt_id)) { ++ sse_sbi_unregister_event(event); ++ } else { ++ cpu_evt.event = event; ++ cpu_evt.func = SBI_SSE_EVENT_UNREGISTER; ++ on_each_cpu(sse_event_per_cpu_func, &cpu_evt, 1); ++ } ++} ++ ++void sse_event_unregister(struct sse_event *event) ++{ ++ scoped_guard(mutex, &sse_mutex) { ++ scoped_guard(cpus_read_lock) ++ sse_event_unregister_nolock(event); ++ ++ scoped_guard(spinlock, &events_list_lock) ++ list_del(&event->list); ++ ++ sse_event_free(event); ++ } ++} ++EXPORT_SYMBOL_GPL(sse_event_unregister); ++ ++static int sse_cpu_online(unsigned int cpu) ++{ ++ struct sse_event *event; ++ ++ scoped_guard(spinlock, &events_list_lock) { ++ list_for_each_entry(event, &events, list) { ++ if (sse_event_is_global(event->evt_id)) ++ continue; ++ ++ sse_event_register_local(event); ++ if (sse_get_reg_evt(event)) ++ sse_event_enable_local(event); ++ } ++ } ++ ++ /* Ready to handle events. Unmask SSE. */ ++ return sse_events_unmask(); ++} ++ ++static int sse_cpu_teardown(unsigned int cpu) ++{ ++ int ret = 0; ++ unsigned int next_cpu; ++ struct sse_event *event; ++ ++ /* Mask the sse events */ ++ ret = sse_events_mask(); ++ if (ret) ++ return ret; ++ ++ scoped_guard(spinlock, &events_list_lock) { ++ list_for_each_entry(event, &events, list) { ++ if (!sse_event_is_global(event->evt_id)) { ++ if (event->global->is_enabled) ++ sse_event_disable_local(event); ++ ++ sse_sbi_unregister_event(event); ++ continue; ++ } ++ ++ if (event->cpu != smp_processor_id()) ++ continue; ++ ++ /* Update destination hart for global event */ ++ next_cpu = cpumask_any_but(cpu_online_mask, cpu); ++ ret = sse_event_set_target_cpu_nolock(event, next_cpu); ++ } ++ } ++ ++ return ret; ++} ++ ++static void sse_reset(void) ++{ ++ struct sse_event *event; ++ ++ list_for_each_entry(event, &events, list) { ++ sse_event_disable_nolock(event); ++ sse_event_unregister_nolock(event); ++ } ++} ++ ++static int sse_pm_notifier(struct notifier_block *nb, unsigned long action, ++ void *data) ++{ ++ WARN_ON_ONCE(preemptible()); ++ ++ switch (action) { ++ case CPU_PM_ENTER: ++ sse_events_mask(); ++ break; ++ case CPU_PM_EXIT: ++ case CPU_PM_ENTER_FAILED: ++ sse_events_unmask(); ++ break; ++ default: ++ return NOTIFY_DONE; ++ } ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block sse_pm_nb = { ++ .notifier_call = sse_pm_notifier, ++}; ++ ++/* ++ * Mask all CPUs and unregister all events on panic, reboot or kexec. ++ */ ++static int sse_reboot_notifier(struct notifier_block *nb, unsigned long action, ++ void *data) ++{ ++ cpuhp_remove_state(sse_hp_state); ++ sse_reset(); ++ ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block sse_reboot_nb = { ++ .notifier_call = sse_reboot_notifier, ++}; ++ ++static int __init sse_init(void) ++{ ++ int ret; ++ ++ if (sbi_probe_extension(SBI_EXT_SSE) <= 0) { ++ pr_err("Missing SBI SSE extension\n"); ++ return -EOPNOTSUPP; ++ } ++ pr_info("SBI SSE extension detected\n"); ++ ++ ret = cpu_pm_register_notifier(&sse_pm_nb); ++ if (ret) { ++ pr_warn("Failed to register CPU PM notifier...\n"); ++ return ret; ++ } ++ ++ ret = register_reboot_notifier(&sse_reboot_nb); ++ if (ret) { ++ pr_warn("Failed to register reboot notifier...\n"); ++ goto remove_cpupm; ++ } ++ ++ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sse:online", ++ sse_cpu_online, sse_cpu_teardown); ++ if (ret < 0) ++ goto remove_reboot; ++ ++ sse_hp_state = ret; ++ sse_available = true; ++ ++ return 0; ++ ++remove_reboot: ++ unregister_reboot_notifier(&sse_reboot_nb); ++ ++remove_cpupm: ++ cpu_pm_unregister_notifier(&sse_pm_nb); ++ ++ return ret; ++} ++arch_initcall(sse_init); ++ ++struct sse_ghes_callback { ++ struct list_head head; ++ struct ghes *ghes; ++ sse_event_handler *callback; ++}; ++ ++struct sse_ghes_event_data { ++ struct list_head head; ++ u32 event_num; ++ struct list_head callback_list; ++ struct sse_event *event; ++}; ++ ++static DEFINE_SPINLOCK(sse_ghes_event_list_lock); ++static LIST_HEAD(sse_ghes_event_list); ++ ++static int sse_ghes_handler(u32 event_num, void *arg, struct pt_regs *regs) ++{ ++ struct sse_ghes_event_data *ev_data = arg; ++ struct sse_ghes_callback *cb = NULL; ++ ++ list_for_each_entry(cb, &ev_data->callback_list, head) { ++ if (cb && cb->ghes && cb->callback) ++ cb->callback(ev_data->event_num, cb->ghes, regs); ++ } ++ ++ return 0; ++} ++ ++int sse_register_ghes(struct ghes *ghes, sse_event_handler *lo_cb, ++ sse_event_handler *hi_cb) ++{ ++ struct sse_ghes_event_data *ev_data, *evd; ++ struct sse_ghes_callback *cb; ++ u32 ev_num; ++ int err; ++ ++ if (!sse_available) ++ return -EOPNOTSUPP; ++ if (!ghes || !lo_cb || !hi_cb) ++ return -EINVAL; ++ ++ ev_num = ghes->generic->notify.vector; ++ ++ ev_data = NULL; ++ spin_lock(&sse_ghes_event_list_lock); ++ list_for_each_entry(evd, &sse_ghes_event_list, head) { ++ if (evd->event_num == ev_num) { ++ ev_data = evd; ++ break; ++ } ++ } ++ spin_unlock(&sse_ghes_event_list_lock); ++ ++ if (!ev_data) { ++ ev_data = kzalloc(sizeof(*ev_data), GFP_KERNEL); ++ if (!ev_data) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&ev_data->head); ++ ev_data->event_num = ev_num; ++ ++ INIT_LIST_HEAD(&ev_data->callback_list); ++ ++ ev_data->event = sse_event_register(ev_num, ev_num, ++ sse_ghes_handler, ev_data); ++ if (IS_ERR(ev_data->event)) { ++ pr_err("%s: Couldn't register event 0x%x\n", __func__, ev_num); ++ kfree(ev_data); ++ return -ENOMEM; ++ } ++ ++ err = sse_event_enable(ev_data->event); ++ if (err) { ++ pr_err("%s: Couldn't enable event 0x%x\n", __func__, ev_num); ++ sse_event_unregister(ev_data->event); ++ kfree(ev_data); ++ return err; ++ } ++ ++ spin_lock(&sse_ghes_event_list_lock); ++ list_add_tail(&ev_data->head, &sse_ghes_event_list); ++ spin_unlock(&sse_ghes_event_list_lock); ++ } ++ ++ list_for_each_entry(cb, &ev_data->callback_list, head) { ++ if (cb->ghes == ghes) ++ return -EALREADY; ++ } ++ ++ cb = kzalloc(sizeof(*cb), GFP_KERNEL); ++ if (!cb) ++ return -ENOMEM; ++ INIT_LIST_HEAD(&cb->head); ++ cb->ghes = ghes; ++ cb->callback = lo_cb; ++ list_add_tail(&cb->head, &ev_data->callback_list); ++ ++ return 0; ++} ++ ++int sse_unregister_ghes(struct ghes *ghes) ++{ ++ struct sse_ghes_event_data *ev_data, *tmp; ++ struct sse_ghes_callback *cb; ++ int free_ev_data = 0; ++ ++ if (!ghes) ++ return -EINVAL; ++ ++ spin_lock(&sse_ghes_event_list_lock); ++ ++ list_for_each_entry_safe(ev_data, tmp, &sse_ghes_event_list, head) { ++ list_for_each_entry(cb, &ev_data->callback_list, head) { ++ if (cb->ghes != ghes) ++ continue; ++ ++ list_del(&cb->head); ++ kfree(cb); ++ break; ++ } ++ ++ if (list_empty(&ev_data->callback_list)) ++ free_ev_data = 1; ++ ++ if (free_ev_data) { ++ spin_unlock(&sse_ghes_event_list_lock); ++ ++ sse_event_disable(ev_data->event); ++ sse_event_unregister(ev_data->event); ++ ev_data->event = NULL; ++ ++ spin_lock(&sse_ghes_event_list_lock); ++ ++ list_del(&ev_data->head); ++ kfree(ev_data); ++ } ++ } ++ ++ spin_unlock(&sse_ghes_event_list_lock); ++ ++ return 0; ++} diff --git a/drivers/firmware/xuantie/Kconfig b/drivers/firmware/xuantie/Kconfig new file mode 100644 index 000000000000..b10c0416067f @@ -57261,19 +64963,19 @@ index 000000000000..20d216522c81 +} \ No newline at end of file diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig -index 904b71c06eba..77d2d065a1b2 100644 +index 904b71c06eba..dd6f3ebffa96 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -1828,6 +1828,15 @@ config GPIO_SIM This enables the GPIO simulator - a configfs-based GPIO testing driver. -+config GPIO_K1X -+ bool "Spacemit k1x GPIO support" -+ depends on PINCTRL_SPACEMIT_K1X ++config GPIO_K1 ++ bool "Spacemit k1 GPIO support" ++ depends on PINCTRL_SPACEMIT_K1 + help -+ Say yes here to support the k1x GPIO device. -+ The k1x GPIO device may have several banks, and each ++ Say yes here to support the k1 GPIO device. ++ The k1 GPIO device may have several banks, and each + bank control at most 32 GPIO pins. The number of banks + is passed by device tree or platform data. + @@ -57281,14 +64983,14 @@ index 904b71c06eba..77d2d065a1b2 100644 endif diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile -index e44a700ec7d3..06e2c4fcb6c3 100644 +index e44a700ec7d3..aab51d2e492f 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -78,6 +78,7 @@ obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o obj-$(CONFIG_GPIO_IT87) += gpio-it87.o obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o -+obj-$(CONFIG_GPIO_K1X) += gpio-k1x.o ++obj-$(CONFIG_GPIO_K1) += gpio-k1.o obj-$(CONFIG_GPIO_KEMPLD) += gpio-kempld.o obj-$(CONFIG_GPIO_LATCH) += gpio-latch.o obj-$(CONFIG_GPIO_LJCA) += gpio-ljca.o @@ -57330,15 +65032,15 @@ index 6b7d47a52b10..8a63ff1e5f73 100644 /* Only port A can provide interrupts in all configurations of the IP */ if (pp->idx == 0) -diff --git a/drivers/gpio/gpio-k1x.c b/drivers/gpio/gpio-k1x.c +diff --git a/drivers/gpio/gpio-k1.c b/drivers/gpio/gpio-k1.c new file mode 100644 -index 000000000000..4491a9ca4169 +index 000000000000..c423ab8bb0b2 --- /dev/null -+++ b/drivers/gpio/gpio-k1x.c ++++ b/drivers/gpio/gpio-k1.c @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 +/* -+ * spacemit-k1x gpio driver file ++ * spacemit-k1 gpio driver file + * + * Copyright (C) 2023 Spacemit + * @@ -57372,56 +65074,56 @@ index 000000000000..4491a9ca4169 +#define GAPMASK 0x9c +#define GCPMASK 0xa8 + -+#define K1X_BANK_GPIO_NUMBER (32) -+#define BANK_GPIO_MASK (K1X_BANK_GPIO_NUMBER - 1) ++#define K1_BANK_GPIO_NUMBER (32) ++#define BANK_GPIO_MASK (K1_BANK_GPIO_NUMBER - 1) + -+#define k1x_gpio_to_bank_idx(gpio) ((gpio) / K1X_BANK_GPIO_NUMBER) -+#define k1x_gpio_to_bank_offset(gpio) ((gpio) & BANK_GPIO_MASK) -+#define k1x_bank_to_gpio(idx, offset) (((idx) * K1X_BANK_GPIO_NUMBER) | \ ++#define k1_gpio_to_bank_idx(gpio) ((gpio) / K1_BANK_GPIO_NUMBER) ++#define k1_gpio_to_bank_offset(gpio) ((gpio) & BANK_GPIO_MASK) ++#define k1_bank_to_gpio(idx, offset) (((idx) * K1_BANK_GPIO_NUMBER) | \ + ((offset) & BANK_GPIO_MASK)) + -+struct k1x_gpio_bank { ++struct k1_gpio_bank { + void __iomem *reg_bank; + u32 irq_mask; + u32 irq_rising_edge; + u32 irq_falling_edge; +}; + -+struct k1x_gpio_chip { ++struct k1_gpio_chip { + struct gpio_chip chip; + void __iomem *reg_base; + int irq; + struct irq_domain *domain; + unsigned int ngpio; + unsigned int nbank; -+ struct k1x_gpio_bank *banks; ++ struct k1_gpio_bank *banks; +}; + -+static int k1x_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) ++static int k1_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) +{ -+ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1_gpio_chip *k1_chip = container_of(chip, struct k1_gpio_chip, chip); + -+ return irq_create_mapping(k1x_chip->domain, offset); ++ return irq_create_mapping(k1_chip->domain, offset); +} + -+static int k1x_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) ++static int k1_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ -+ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ struct k1_gpio_chip *k1_chip = container_of(chip, struct k1_gpio_chip, chip); ++ struct k1_gpio_bank *bank = &k1_chip->banks[k1_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(offset)); + + writel(bit, bank->reg_bank + GCDR); + + return 0; +} + -+static int k1x_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) ++static int k1_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ -+ struct k1x_gpio_chip *k1x_chip = -+ container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = -+ &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ struct k1_gpio_chip *k1_chip = ++ container_of(chip, struct k1_gpio_chip, chip); ++ struct k1_gpio_bank *bank = ++ &k1_chip->banks[k1_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(offset)); + + /* Set value first. */ + writel(bit, bank->reg_bank + (value ? GPSR : GPCR)); @@ -57430,11 +65132,11 @@ index 000000000000..4491a9ca4169 + return 0; +} + -+static int k1x_gpio_get(struct gpio_chip *chip, unsigned int offset) ++static int k1_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ -+ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ struct k1_gpio_chip *k1_chip = container_of(chip, struct k1_gpio_chip, chip); ++ struct k1_gpio_bank *bank = &k1_chip->banks[k1_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(offset)); + u32 gplr; + + gplr = readl(bank->reg_bank + GPLR); @@ -57442,11 +65144,11 @@ index 000000000000..4491a9ca4169 + return !!(gplr & bit); +} + -+static void k1x_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) ++static void k1_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) +{ -+ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); -+ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(offset)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(offset)); ++ struct k1_gpio_chip *k1_chip = container_of(chip, struct k1_gpio_chip, chip); ++ struct k1_gpio_bank *bank = &k1_chip->banks[k1_gpio_to_bank_idx(offset)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(offset)); + u32 gpdr; + + gpdr = readl(bank->reg_bank + GPDR); @@ -57456,14 +65158,14 @@ index 000000000000..4491a9ca4169 +} + +#ifdef CONFIG_OF_GPIO -+static int k1x_gpio_of_xlate(struct gpio_chip *chip, ++static int k1_gpio_of_xlate(struct gpio_chip *chip, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ -+ struct k1x_gpio_chip *k1x_chip = container_of(chip, struct k1x_gpio_chip, chip); ++ struct k1_gpio_chip *k1_chip = container_of(chip, struct k1_gpio_chip, chip); + + /* GPIO index start from 0. */ -+ if (gpiospec->args[0] >= k1x_chip->ngpio) ++ if (gpiospec->args[0] >= k1_chip->ngpio) + return -EINVAL; + + if (flags) @@ -57473,12 +65175,12 @@ index 000000000000..4491a9ca4169 +} +#endif + -+static int k1x_gpio_irq_type(struct irq_data *d, unsigned int type) ++static int k1_gpio_irq_type(struct irq_data *d, unsigned int type) +{ -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ struct k1_gpio_chip *k1_chip = irq_data_get_irq_chip_data(d); + int gpio = irqd_to_hwirq(d); -+ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ struct k1_gpio_bank *bank = &k1_chip->banks[k1_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(gpio)); + + if (type & IRQ_TYPE_EDGE_RISING) { + bank->irq_rising_edge |= bit; @@ -57499,17 +65201,17 @@ index 000000000000..4491a9ca4169 + return 0; +} + -+static irqreturn_t k1x_gpio_demux_handler(int irq, void *data) ++static irqreturn_t k1_gpio_demux_handler(int irq, void *data) +{ + int i, n; + u32 gedr; + unsigned long pending = 0; + unsigned int irq_num, irqs_handled = 0; -+ struct k1x_gpio_bank *bank; -+ struct k1x_gpio_chip *k1x_chip = (struct k1x_gpio_chip *)data; ++ struct k1_gpio_bank *bank; ++ struct k1_gpio_chip *k1_chip = (struct k1_gpio_chip *)data; + -+ for (i = 0; i < k1x_chip->nbank; i++) { -+ bank = &k1x_chip->banks[i]; ++ for (i = 0; i < k1_chip->nbank; i++) { ++ bank = &k1_chip->banks[i]; + + gedr = readl(bank->reg_bank + GEDR); + if (!gedr) @@ -57522,8 +65224,8 @@ index 000000000000..4491a9ca4169 + continue; + pending = gedr; + for_each_set_bit(n, &pending, BITS_PER_LONG) { -+ irq_num = irq_find_mapping(k1x_chip->domain, -+ k1x_bank_to_gpio(i, n)); ++ irq_num = irq_find_mapping(k1_chip->domain, ++ k1_bank_to_gpio(i, n)); + generic_handle_irq(irq_num); + } + irqs_handled++; @@ -57532,22 +65234,22 @@ index 000000000000..4491a9ca4169 + return irqs_handled ? IRQ_HANDLED : IRQ_NONE; +} + -+static void k1x_ack_muxed_gpio(struct irq_data *d) ++static void k1_ack_muxed_gpio(struct irq_data *d) +{ -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ struct k1_gpio_chip *k1_chip = irq_data_get_irq_chip_data(d); + int gpio = irqd_to_hwirq(d); -+ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ struct k1_gpio_bank *bank = &k1_chip->banks[k1_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(gpio)); + + writel(bit, bank->reg_bank + GEDR); +} + -+static void k1x_mask_muxed_gpio(struct irq_data *d) ++static void k1_mask_muxed_gpio(struct irq_data *d) +{ -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); ++ struct k1_gpio_chip *k1_chip = irq_data_get_irq_chip_data(d); + int gpio = irqd_to_hwirq(d); -+ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); ++ struct k1_gpio_bank *bank = &k1_chip->banks[k1_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(gpio)); + + bank->irq_mask &= ~bit; + @@ -57556,12 +65258,12 @@ index 000000000000..4491a9ca4169 + writel(bit, bank->reg_bank + GCFER); +} + -+static void k1x_unmask_muxed_gpio(struct irq_data *d) ++static void k1_unmask_muxed_gpio(struct irq_data *d) +{ + int gpio = irqd_to_hwirq(d); -+ u32 bit = (1 << k1x_gpio_to_bank_offset(gpio)); -+ struct k1x_gpio_chip *k1x_chip = irq_data_get_irq_chip_data(d); -+ struct k1x_gpio_bank *bank = &k1x_chip->banks[k1x_gpio_to_bank_idx(gpio)]; ++ u32 bit = (1 << k1_gpio_to_bank_offset(gpio)); ++ struct k1_gpio_chip *k1_chip = irq_data_get_irq_chip_data(d); ++ struct k1_gpio_bank *bank = &k1_chip->banks[k1_gpio_to_bank_idx(gpio)]; + + bank->irq_mask |= bit; + @@ -57570,34 +65272,34 @@ index 000000000000..4491a9ca4169 + writel(bit & bank->irq_falling_edge, bank->reg_bank + GSFER); +} + -+static struct irq_chip k1x_muxed_gpio_chip = { -+ .name = "k1x-gpio-irqchip", -+ .irq_ack = k1x_ack_muxed_gpio, -+ .irq_mask = k1x_mask_muxed_gpio, -+ .irq_unmask = k1x_unmask_muxed_gpio, -+ .irq_set_type = k1x_gpio_irq_type, ++static struct irq_chip k1_muxed_gpio_chip = { ++ .name = "k1-gpio-irqchip", ++ .irq_ack = k1_ack_muxed_gpio, ++ .irq_mask = k1_mask_muxed_gpio, ++ .irq_unmask = k1_unmask_muxed_gpio, ++ .irq_set_type = k1_gpio_irq_type, + .flags = IRQCHIP_SKIP_SET_WAKE, +}; + -+static const struct of_device_id k1x_gpio_dt_ids[] = { -+ { .compatible = "spacemit,k1x-gpio"}, ++static const struct of_device_id k1_gpio_dt_ids[] = { ++ { .compatible = "spacemit,k1-gpio"}, + {} +}; + -+static int k1x_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) ++static int k1_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) +{ -+ irq_set_chip_and_handler(irq, &k1x_muxed_gpio_chip, handle_edge_irq); ++ irq_set_chip_and_handler(irq, &k1_muxed_gpio_chip, handle_edge_irq); + irq_set_chip_data(irq, d->host_data); + + return 0; +} + -+static const struct irq_domain_ops k1x_gpio_irq_domain_ops = { -+ .map = k1x_irq_domain_map, ++static const struct irq_domain_ops k1_gpio_irq_domain_ops = { ++ .map = k1_irq_domain_map, + .xlate = irq_domain_xlate_twocell, +}; + -+static int k1x_gpio_probe_dt(struct platform_device *pdev, struct k1x_gpio_chip *k1x_chip) ++static int k1_gpio_probe_dt(struct platform_device *pdev, struct k1_gpio_chip *k1_chip) +{ + u32 offset; + int i, nbank, ret; @@ -57608,10 +65310,10 @@ index 000000000000..4491a9ca4169 + if (nbank == 0) + return -EINVAL; + -+ k1x_chip->banks = devm_kzalloc(&pdev->dev, -+ sizeof(*k1x_chip->banks) * nbank, ++ k1_chip->banks = devm_kzalloc(&pdev->dev, ++ sizeof(*k1_chip->banks) * nbank, + GFP_KERNEL); -+ if (!k1x_chip->banks) ++ if (!k1_chip->banks) + return -ENOMEM; + + i = 0; @@ -57621,22 +65323,22 @@ index 000000000000..4491a9ca4169 + of_node_put(child); + return ret; + } -+ k1x_chip->banks[i].reg_bank = k1x_chip->reg_base + offset; ++ k1_chip->banks[i].reg_bank = k1_chip->reg_base + offset; + i++; + } + -+ k1x_chip->nbank = nbank; -+ k1x_chip->ngpio = k1x_chip->nbank * K1X_BANK_GPIO_NUMBER; ++ k1_chip->nbank = nbank; ++ k1_chip->ngpio = k1_chip->nbank * K1_BANK_GPIO_NUMBER; + + return 0; +} + -+static int k1x_gpio_probe(struct platform_device *pdev) ++static int k1_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np; -+ struct k1x_gpio_chip *k1x_chip; -+ struct k1x_gpio_bank *bank; ++ struct k1_gpio_chip *k1_chip; ++ struct k1_gpio_bank *bank; + struct resource *res; + struct irq_domain *domain; + struct clk *clk; @@ -57648,8 +65350,8 @@ index 000000000000..4491a9ca4169 + if (!np) + return -EINVAL; + -+ k1x_chip = devm_kzalloc(dev, sizeof(*k1x_chip), GFP_KERNEL); -+ if (!k1x_chip) ++ k1_chip = devm_kzalloc(dev, sizeof(*k1_chip), GFP_KERNEL); ++ if (!k1_chip) + return -ENOMEM; + + irq = platform_get_irq(pdev, 0); @@ -57663,10 +65365,10 @@ index 000000000000..4491a9ca4169 + if (!base) + return -EINVAL; + -+ k1x_chip->irq = irq; -+ k1x_chip->reg_base = base; ++ k1_chip->irq = irq; ++ k1_chip->reg_base = base; + -+ ret = k1x_gpio_probe_dt(pdev, k1x_chip); ++ ret = k1_gpio_probe_dt(pdev, k1_chip); + if (ret) { + dev_err(dev, "Fail to initialize gpio unit, error %d.\n", ret); + return ret; @@ -57684,40 +65386,40 @@ index 000000000000..4491a9ca4169 + return ret; + } + -+ domain = irq_domain_add_linear(np, k1x_chip->ngpio, &k1x_gpio_irq_domain_ops, k1x_chip); ++ domain = irq_domain_add_linear(np, k1_chip->ngpio, &k1_gpio_irq_domain_ops, k1_chip); + if (!domain) + return -EINVAL; + -+ k1x_chip->domain = domain; ++ k1_chip->domain = domain; + + /* Initialize the gpio chip */ -+ k1x_chip->chip.label = "k1x-gpio"; -+ k1x_chip->chip.request = gpiochip_generic_request; -+ k1x_chip->chip.free = gpiochip_generic_free; -+ k1x_chip->chip.direction_input = k1x_gpio_direction_input; -+ k1x_chip->chip.direction_output = k1x_gpio_direction_output; -+ k1x_chip->chip.get = k1x_gpio_get; -+ k1x_chip->chip.set = k1x_gpio_set; -+ k1x_chip->chip.to_irq = k1x_gpio_to_irq; ++ k1_chip->chip.label = "k1-gpio"; ++ k1_chip->chip.request = gpiochip_generic_request; ++ k1_chip->chip.free = gpiochip_generic_free; ++ k1_chip->chip.direction_input = k1_gpio_direction_input; ++ k1_chip->chip.direction_output = k1_gpio_direction_output; ++ k1_chip->chip.get = k1_gpio_get; ++ k1_chip->chip.set = k1_gpio_set; ++ k1_chip->chip.to_irq = k1_gpio_to_irq; +#ifdef CONFIG_OF_GPIO -+ k1x_chip->chip.fwnode = of_fwnode_handle(np); -+ k1x_chip->chip.of_xlate = k1x_gpio_of_xlate; -+ k1x_chip->chip.of_gpio_n_cells = 2; ++ k1_chip->chip.fwnode = of_fwnode_handle(np); ++ k1_chip->chip.of_xlate = k1_gpio_of_xlate; ++ k1_chip->chip.of_gpio_n_cells = 2; +#endif -+ k1x_chip->chip.ngpio = k1x_chip->ngpio; ++ k1_chip->chip.ngpio = k1_chip->ngpio; + -+ if (devm_request_irq(&pdev->dev, irq, k1x_gpio_demux_handler, 0, -+ k1x_chip->chip.label, k1x_chip)) { ++ if (devm_request_irq(&pdev->dev, irq, k1_gpio_demux_handler, 0, ++ k1_chip->chip.label, k1_chip)) { + dev_err(&pdev->dev, "failed to request high IRQ\n"); + ret = -ENOENT; + goto err; + } + -+ gpiochip_add(&k1x_chip->chip); ++ gpiochip_add(&k1_chip->chip); + + /* clear all GPIO edge detects */ -+ for (i = 0; i < k1x_chip->nbank; i++) { -+ bank = &k1x_chip->banks[i]; ++ for (i = 0; i < k1_chip->nbank; i++) { ++ bank = &k1_chip->banks[i]; + writel(0xffffffff, bank->reg_bank + GCFER); + writel(0xffffffff, bank->reg_bank + GCRER); + /* Unmask edge detection to AP. */ @@ -57730,19 +65432,19 @@ index 000000000000..4491a9ca4169 + return ret; +} + -+static struct platform_driver k1x_gpio_driver = { -+ .probe = k1x_gpio_probe, ++static struct platform_driver k1_gpio_driver = { ++ .probe = k1_gpio_probe, + .driver = { -+ .name = "k1x-gpio", -+ .of_match_table = k1x_gpio_dt_ids, ++ .name = "k1-gpio", ++ .of_match_table = k1_gpio_dt_ids, + }, +}; + -+static int __init k1x_gpio_init(void) ++static int __init k1_gpio_init(void) +{ -+ return platform_driver_register(&k1x_gpio_driver); ++ return platform_driver_register(&k1_gpio_driver); +} -+subsys_initcall(k1x_gpio_init); ++subsys_initcall(k1_gpio_init); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index b882b26ab500..d502bb36434b 100644 --- a/drivers/gpio/gpio-pca953x.c @@ -57771,7 +65473,7 @@ index b882b26ab500..d502bb36434b 100644 .acpi_match_table = pca953x_acpi_ids, }, diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig -index d1cad875d2f7..191c700fde97 100644 +index 820b6689eaf3..05b4aa3b1f72 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -390,6 +390,10 @@ source "drivers/gpu/drm/sprd/Kconfig" @@ -57803,7 +65505,7 @@ index f93fd0ac8661..caff5405c5f4 100644 obj-$(CONFIG_DRM_PHYTIUM) += phytium/ +obj-$(CONFIG_DRM_VERISILICON) += verisilicon/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c -index 8136e49cb6d1..9a5b5dc210ba 100644 +index eb801df5faa9..c650027063a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1109,6 +1109,8 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) @@ -212022,7 +219724,7 @@ index 000000000000..99b5c3365fb0 +#endif /* !defined(PHYSMEM_DMABUF_H) */ diff --git a/drivers/gpu/drm/img-rogue/physmem_hostmem.c b/drivers/gpu/drm/img-rogue/physmem_hostmem.c new file mode 100644 -index 000000000000..2f1dc409301a +index 000000000000..33d8d649a818 --- /dev/null +++ b/drivers/gpu/drm/img-rogue/physmem_hostmem.c @@ -0,0 +1,206 @@ @@ -212094,9 +219796,9 @@ index 000000000000..2f1dc409301a +static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs = +{ + /* pfnCpuPAddrToDevPAddr */ -+ HostMemCpuPAddrToDevPAddr, ++ .pfnCpuPAddrToDevPAddr = HostMemCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ -+ HostMemDevPAddrToCpuPAddr, ++ .pfnDevPAddrToCpuPAddr = HostMemDevPAddrToCpuPAddr +}; + +static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[]; @@ -212305,7 +220007,7 @@ index 000000000000..cfa453de343a +#endif /* !defined(PHYSMEM_HOSTMEM_H) */ diff --git a/drivers/gpu/drm/img-rogue/physmem_lma.c b/drivers/gpu/drm/img-rogue/physmem_lma.c new file mode 100644 -index 000000000000..4fa61ac18fcf +index 000000000000..3fd806966436 --- /dev/null +++ b/drivers/gpu/drm/img-rogue/physmem_lma.c @@ -0,0 +1,2003 @@ @@ -214154,31 +221856,31 @@ index 000000000000..4fa61ac18fcf + +static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = { + /* pfnLockPhysAddresses */ -+ &PMRLockSysPhysAddressesLocalMem, ++ .pfnLockPhysAddresses = &PMRLockSysPhysAddressesLocalMem, + /* pfnUnlockPhysAddresses */ -+ &PMRUnlockSysPhysAddressesLocalMem, ++ .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesLocalMem, + /* pfnDevPhysAddr */ -+ &PMRSysPhysAddrLocalMem, ++ .pfnDevPhysAddr = &PMRSysPhysAddrLocalMem, + /* pfnAcquireKernelMappingData */ -+ &PMRAcquireKernelMappingDataLocalMem, ++ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataLocalMem, + /* pfnReleaseKernelMappingData */ -+ &PMRReleaseKernelMappingDataLocalMem, ++ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataLocalMem, + /* pfnReadBytes */ -+ &PMRReadBytesLocalMem, ++ .pfnReadBytes = &PMRReadBytesLocalMem, + /* pfnWriteBytes */ -+ &PMRWriteBytesLocalMem, ++ .pfnWriteBytes = &PMRWriteBytesLocalMem, + /* pfnUnpinMem */ -+ NULL, ++ .pfnUnpinMem = NULL, + /* pfnPinMem */ -+ NULL, ++ .pfnPinMem = NULL, + /* pfnChangeSparseMem*/ -+ &PMRChangeSparseMemLocalMem, ++ .pfnChangeSparseMem = &PMRChangeSparseMemLocalMem, + /* pfnChangeSparseMemCPUMap */ -+ &PMRChangeSparseMemCPUMapLocalMem, ++ .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapLocalMem, + /* pfnMMap */ -+ NULL, ++ .pfnMMap = NULL, + /* pfnFinalize */ -+ &PMRFinalizeLocalMem ++ .pfnFinalize = &PMRFinalizeLocalMem +}; + +PVRSRV_ERROR @@ -356108,7 +363810,7 @@ index 000000000000..934974834e50 +#endif /* !defined(SYSCOMMON_H) */ diff --git a/drivers/gpu/drm/img-rogue/sysconfig.c b/drivers/gpu/drm/img-rogue/sysconfig.c new file mode 100644 -index 000000000000..aedd2f8c9dd2 +index 000000000000..cc97421bb505 --- /dev/null +++ b/drivers/gpu/drm/img-rogue/sysconfig.c @@ -0,0 +1,462 @@ @@ -356265,9 +363967,9 @@ index 000000000000..aedd2f8c9dd2 +static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = +{ + /* pfnCpuPAddrToDevPAddr */ -+ UMAPhysHeapCpuPAddrToDevPAddr, ++ .pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr, + /* pfnDevPAddrToCpuPAddr */ -+ UMAPhysHeapDevPAddrToCpuPAddr, ++ .pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr, +}; + +static PVRSRV_ERROR PhysHeapsCreate(PHYS_HEAP_CONFIG **ppasPhysHeapsOut, @@ -363328,7 +371030,7 @@ index 000000000000..58223a0032e9 +******************************************************************************/ diff --git a/drivers/gpu/drm/img-rogue/vmm_type_stub.c b/drivers/gpu/drm/img-rogue/vmm_type_stub.c new file mode 100644 -index 000000000000..747bf4a8e1f1 +index 000000000000..78892ce6f39a --- /dev/null +++ b/drivers/gpu/drm/img-rogue/vmm_type_stub.c @@ -0,0 +1,119 @@ @@ -363409,29 +371111,29 @@ index 000000000000..747bf4a8e1f1 +{ + .sClientFuncTab = { + /* pfnMapDevPhysHeap */ -+ &StubVMMMapDevPhysHeap, ++ .pfnMapDevPhysHeap = &StubVMMMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ -+ &StubVMMUnmapDevPhysHeap ++ .pfnUnmapDevPhysHeap = &StubVMMUnmapDevPhysHeap + }, + + .sServerFuncTab = { + /* pfnMapDevPhysHeap */ -+ &PvzServerMapDevPhysHeap, ++ .pfnMapDevPhysHeap = &PvzServerMapDevPhysHeap, + + /* pfnUnmapDevPhysHeap */ -+ &PvzServerUnmapDevPhysHeap ++ .pfnUnmapDevPhysHeap = &PvzServerUnmapDevPhysHeap + }, + + .sVmmFuncTab = { + /* pfnOnVmOnline */ -+ &PvzServerOnVmOnline, ++ .pfnOnVmOnline = &PvzServerOnVmOnline, + + /* pfnOnVmOffline */ -+ &PvzServerOnVmOffline, ++ .pfnOnVmOffline = &PvzServerOnVmOffline, + + /* pfnVMMConfigure */ -+ &PvzServerVMMConfigure ++ .pfnVMMConfigure = &PvzServerVMMConfigure + } +}; + @@ -364627,6 +372329,41 @@ index 000000000000..06af8d97ede4 +void xuantie_mfg_disable(struct gpu_plat_if *mfg); + +#endif /* XUANTIE_SYS_H*/ +diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c +index d5512037c38b..2a94e82316f9 100644 +--- a/drivers/gpu/drm/msm/msm_iommu.c ++++ b/drivers/gpu/drm/msm/msm_iommu.c +@@ -407,10 +407,13 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks) + struct msm_iommu *iommu; + int ret; + +- domain = iommu_domain_alloc(dev->bus); +- if (!domain) ++ if (!device_iommu_mapped(dev)) + return NULL; + ++ domain = iommu_paging_domain_alloc(dev); ++ if (IS_ERR(domain)) ++ return ERR_CAST(domain); ++ + iommu_set_pgtable_quirks(domain, quirks); + + iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +index 87caa4a72921..763c4c2925f9 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +@@ -120,8 +120,8 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) + mutex_init(&tdev->iommu.mutex); + + if (device_iommu_mapped(dev)) { +- tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); +- if (!tdev->iommu.domain) ++ tdev->iommu.domain = iommu_paging_domain_alloc(dev); ++ if (IS_ERR(tdev->iommu.domain)) + goto error; + + /* diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index 3a2f4a9f1d46..974b66e92fdb 100644 --- a/drivers/gpu/drm/panel/Kconfig @@ -365447,6 +373184,54 @@ index 9961251b44ba..4fb67e6c583b 100644 /* force MSI on */ if (radeon_msi == 1) +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +index ab55d7132550..52126ffb9280 100644 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +@@ -103,13 +103,17 @@ static int rockchip_drm_init_iommu(struct drm_device *drm_dev) + struct rockchip_drm_private *private = drm_dev->dev_private; + struct iommu_domain_geometry *geometry; + u64 start, end; ++ int ret; + + if (IS_ERR_OR_NULL(private->iommu_dev)) + return 0; + +- private->domain = iommu_domain_alloc(private->iommu_dev->bus); +- if (!private->domain) +- return -ENOMEM; ++ private->domain = iommu_paging_domain_alloc(private->iommu_dev); ++ if (IS_ERR(private->domain)) { ++ ret = PTR_ERR(private->domain); ++ private->domain = NULL; ++ return ret; ++ } + + geometry = &private->domain->geometry; + start = geometry->aperture_start; +diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c +index 373bcd79257e..9dcf95640cf7 100644 +--- a/drivers/gpu/drm/tegra/drm.c ++++ b/drivers/gpu/drm/tegra/drm.c +@@ -1134,6 +1134,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) + + static int host1x_drm_probe(struct host1x_device *dev) + { ++ struct device *dma_dev = dev->dev.parent; + struct tegra_drm *tegra; + struct drm_device *drm; + int err; +@@ -1148,8 +1149,8 @@ static int host1x_drm_probe(struct host1x_device *dev) + goto put; + } + +- if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) { +- tegra->domain = iommu_domain_alloc(&platform_bus_type); ++ if (host1x_drm_wants_iommu(dev) && device_iommu_mapped(dma_dev)) { ++ tegra->domain = iommu_paging_domain_alloc(dma_dev); + if (!tegra->domain) { + err = -ENOMEM; + goto free; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 0b3f4267130c..f469067c8187 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -376950,6 +384735,24 @@ index 000000000000..bc39aef686af + +extern struct platform_driver virtual_display_platform_driver; +#endif /* __VS_VIRTUAL_H_ */ +diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c +index 7c6699aed7d2..5b4d41ed8e95 100644 +--- a/drivers/gpu/host1x/dev.c ++++ b/drivers/gpu/host1x/dev.c +@@ -379,9 +379,10 @@ static struct iommu_domain *host1x_iommu_attach(struct host1x *host) + if (err < 0) + goto put_group; + +- host->domain = iommu_domain_alloc(&platform_bus_type); +- if (!host->domain) { +- err = -ENOMEM; ++ host->domain = iommu_paging_domain_alloc(host->dev); ++ if (IS_ERR(host->domain)) { ++ err = PTR_ERR(host->domain); ++ host->domain = NULL; + goto put_cache; + } + diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c index 50a8b9c3f94d..5df34cb5e05a 100644 --- a/drivers/hwmon/mr75203.c @@ -377193,7 +384996,7 @@ index 000000000000..913746dc5387 + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig -index 585e7e4a1875..cf688f7c698e 100644 +index b9c71741165b..2acaeb506236 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -303,6 +303,14 @@ config I2C_SIS96X @@ -377211,8 +385014,30 @@ index 585e7e4a1875..cf688f7c698e 100644 config I2C_VIA tristate "VIA VT82C586B" depends on PCI +@@ -628,6 +636,21 @@ config I2C_DESIGNWARE_PCI + This driver can also be built as a module. If so, the module + will be called i2c-designware-pci. + ++config I2C_LRW_CORE ++ tristate ++ select REGMAP ++ ++config I2C_LRW_PLATFORM ++ tristate "LRW Platform" ++ depends on COMMON_CLK ++ select I2C_LRW_CORE ++ help ++ If you say yes to this option, support will be included for the ++ LRW I2C adapter. ++ ++ This driver can also be built as a module. If so, the module ++ will be called i2c-lrw-platform. ++ + config I2C_DIGICOLOR + tristate "Conexant Digicolor I2C driver" + depends on ARCH_DIGICOLOR || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile -index 738519b0a9cb..8f0fa01f5498 100644 +index 738519b0a9cb..a4598474fc82 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o @@ -377231,6 +385056,17 @@ index 738519b0a9cb..8f0fa01f5498 100644 i2c-designware-core-y += i2c-designware-master.o i2c-designware-core-$(CONFIG_I2C_DESIGNWARE_SLAVE) += i2c-designware-slave.o obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o +@@ -63,6 +65,10 @@ i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_AMDPSP) += i2c-designware-amdpsp + i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o + obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o + i2c-designware-pci-y := i2c-designware-pcidrv.o ++obj-$(CONFIG_I2C_LRW_CORE) += i2c-lrw-core.o ++i2c-lrw-core-y += i2c-lrw-master.o ++obj-$(CONFIG_I2C_LRW_PLATFORM) += i2c-lrw-platform.o ++i2c-lrw-platform-y := i2c-lrw-platdrv.o + obj-$(CONFIG_I2C_DIGICOLOR) += i2c-digicolor.o + obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o + obj-$(CONFIG_I2C_EMEV2) += i2c-emev2.o diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index c283743916fe..0bc9f45ebc54 100644 --- a/drivers/i2c/busses/i2c-designware-common.c @@ -377879,6 +385715,1684 @@ index 11a75130a109..dfd49670d6d2 100644 ret = i2c_dw_validate_speed(dev); if (ret) +diff --git a/drivers/i2c/busses/i2c-lrw-core.h b/drivers/i2c/busses/i2c-lrw-core.h +new file mode 100644 +index 000000000000..474206810dd8 +--- /dev/null ++++ b/drivers/i2c/busses/i2c-lrw-core.h +@@ -0,0 +1,350 @@ ++/* SPDX-License-Identifier: GPL-2.0+ */ ++/* ++ * I2C adapter driver for LRW ++ * ++ * Copyright (c) 2025, LRW CORPORATION. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define LRW_IC_DEFAULT_FUNCTIONALITY \ ++ (I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | \ ++ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | \ ++ I2C_FUNC_SMBUS_I2C_BLOCK) ++ ++#define I2C_TXFLR_MASK 0xFF ++#define I2C_RXFLR_MASK 0xFF ++#define I2C_RXFLR_SHIFT 16 ++#define I2C_FIFO_DEPTH 32 ++ ++#define LRW_IC_CON_MASTER BIT(0) ++#define LRW_IC_CON_SPEED_STD_FAST (0 << 1) ++#define LRW_IC_CON_SPEED_HIGH (1 << 1) ++#define LRW_IC_CON_SPEED_MASK BIT(1) ++#define LRW_IC_CON_10BITADDR_SLAVE BIT(2) ++#define LRW_IC_CON_10BITADDR_MASTER BIT(3) ++#define LRW_IC_CON_RESTART_EN BIT(4) ++#define LRW_IC_CON_STOP_DET_IFADDRESSED BIT(5) ++#define LRW_IC_CON_TX_EMPTY_CTRL BIT(6) ++#define LRW_IC_CON_RX_FIFO_FULL_HLD_CTRL BIT(7) ++#define IC_CON_BUS_CLEAR_CTRL_POS BIT(8) ++ ++#define LRW_IC_DATA_CMD_DAT GENMASK(7, 0) ++ ++/* ++ * Registers offset ++ */ ++#define LRW_IC_VERSION 0x0 ++#define LRW_IC_CON 0x04 ++#define LRW_IC_ENABLE 0x08 ++#define LRW_IC_DATA_CMD 0x0c ++ ++#define LRW_IC_FS_SCL_HCNT 0x10 ++#define LRW_IC_FS_SCL_LCNT 0x14 ++#define LRW_IC_FS_SPKLEN 0x18 ++#define LRW_IC_HS_SCL_HCNT 0x1c ++#define LRW_IC_HS_SCL_LCNT 0x20 ++#define LRW_IC_HS_SPKLEN 0x24 ++#define LRW_IC_HS_CTRCODE 0x28 ++#define LRW_IC_TAR 0x2c ++#define LRW_IC_SAR1 0x30 ++#define LRW_IC_SAR2 0x34 ++#define LRW_IC_SAR3 0x38 ++#define LRW_IC_SAR4 0x3c ++#define LRW_IC_RX_TL 0x40 ++#define LRW_IC_TX_TL 0x44 ++#define LRW_IC_FIFO_LEVEL 0x48 ++#define LRW_IC_TX_ABRT_SOURCE 0x4c ++#define LRW_IC_CLR_RAW_INTR 0x50 ++#define LRW_IC_INTR_MASK 0x54 ++#define LRW_IC_INTR_STAT 0x58 ++#define LRW_IC_DMA_CTRL 0x5c ++#define LRW_IC_SDA_SETUP 0x60 ++#define LRW_IC_SDA_HOLD 0x64 ++#define LRW_IC_BUS_FREE 0x68 ++ ++#define LRW_IC_START_HOLD 0x6c ++#define LRW_IC_RESTART_HOLD_HS 0x70 ++#define LRW_IC_RESTART_SETUP 0x74 ++#define LRW_IC_STOP_SETUP 0x78 ++ ++#define LRW_IC_SCL_STUCK_AT_LOW_TIMEOUT_MIN 0x7c ++#define LRW_IC_SCL_STUCK_AT_LOW_TIMEOUT_MAX 0x80 ++#define LRW_IC_SDA_STUCK_AT_LOW_TIMEOUT 0x84 ++#define LRW_IC_REG_TIMEOUT_RST 0x88 ++#define LRW_IC_STATUS 0x8c ++#define LRW_IC_DEBUG 0x90 ++ ++#define LRW_SMBUS_CLR_RAW_INTR 0xa4 ++#define LRW_SMBUS_INTR_MASK 0xa8 ++#define LRW_SMBUS_INTR_STAT 0xac ++ ++#define LRW_IC_CLR_INTR BIT(31) ++ ++#define LRW_IC_SDA_HOLD_MIN_VERS 0x1C240615 ++#define LRW_IC_INTR_RX_OVER BIT(0) ++#define LRW_IC_INTR_RX_FULL BIT(1) ++#define LRW_IC_INTR_TX_EMPTY BIT(2) ++#define LRW_IC_INTR_RD_REQ BIT(3) ++#define LRW_IC_INTR_TX_ABRT BIT(4) ++#define LRW_IC_INTR_RX_DONE BIT(5) ++#define LRW_IC_INTR_ACTIVITY BIT(6) ++#define LRW_IC_INTR_STOP_DET BIT(7) ++#define LRW_IC_INTR_START_DET BIT(8) ++#define LRW_IC_INTR_GEN_CALL BIT(9) ++#define LRW_IC_INTR_RESTART_DET BIT(10) ++#define LRW_IC_INTR_MST_ON_HOLD BIT(11) ++ ++#define LRW_IC_INTR_DEFAULT_MASK \ ++ (LRW_IC_INTR_RX_FULL | LRW_IC_INTR_TX_ABRT | LRW_IC_INTR_STOP_DET) ++#define LRW_IC_INTR_MASTER_MASK \ ++ (LRW_IC_INTR_DEFAULT_MASK | LRW_IC_INTR_TX_EMPTY) ++#define LRW_IC_INTR_SLAVE_MASK \ ++ (LRW_IC_INTR_DEFAULT_MASK | LRW_IC_INTR_RX_DONE | LRW_IC_INTR_RD_REQ) ++ ++#define LRW_IC_ENABLE_ABORT BIT(1) ++ ++#define LRW_IC_STATUS_ACTIVITY 0x1 ++#define LRW_IC_STATUS_TFE BIT(2) ++#define LRW_IC_STATUS_MASTER_ACTIVITY BIT(5) ++#define LRW_IC_STATUS_SLAVE_ACTIVITY BIT(6) ++ ++#define LRW_IC_SDA_HOLD_RX_SHIFT 16 ++#define LRW_IC_SDA_HOLD_RX_MASK GENMASK(23, LRW_IC_SDA_HOLD_RX_SHIFT) ++ ++#define LRW_IC_ERR_TX_ABRT 0x1 ++#define I2C_SCL_TIMEOUT_ERROR 0x2 ++ ++#define LRW_IC_TAR_10BITADDR_MASTER BIT(12) ++ ++#define IC_INTR_SCL_STUCK_AT_LOW_MIN BIT(12) ++/*IC_TX_ABRT_SOURCE*/ ++#define IC_ABRT_SDA_STUCK_AT_LOW_POS BIT(17) ++/* IC_ENABLE */ ++#define IC_SDA_STUCK_RECOVERY_ENABLE_POS BIT(3) ++#define IC_SDA_STUCK_AT_LOW_RECOVERIED 0x0 ++ ++/* ++ * Sofware status flags ++ */ ++#define STATUS_ACTIVE BIT(0) ++#define STATUS_WRITE_IN_PROGRESS BIT(1) ++#define STATUS_READ_IN_PROGRESS BIT(2) ++#define STATUS_MASK GENMASK(2, 0) ++ ++/* ++ * operation modes ++ */ ++#define LRW_IC_MASTER 0 ++#define LRW_IC_SLAVE 1 ++ ++/* ++ * Hardware abort codes from the LRW_IC_TX_ABRT_SOURCE register ++ * ++ * Only expected abort codes are listed here ++ * refer to the datasheet for the full list ++ */ ++#define ABRT_7B_ADDR_NOACK 0 ++#define ABRT_10ADDR1_NOACK 1 ++#define ABRT_10ADDR2_NOACK 2 ++#define ABRT_TXDATA_NOACK 3 ++#define ABRT_GCALL_NOACK 4 ++#define ABRT_GCALL_READ 5 ++#define ABRT_HS_ACK_DET 6 ++#define ABRT_SBYTE_ACKDET 7 ++#define ABRT_HS_NORSTRT 8 ++#define ABRT_SBYTE_NORSTRT 9 ++#define ABRT_10B_RD_NORSTRT 10 ++#define ABRT_MASTER_DIS 11 ++#define ARB_LOST 12 ++#define ABRT_SLAVE_FLUSH_TXFIFO 13 ++#define ABRT_SLAVE_ARBLOST 14 ++#define ABRT_SLAVE_RD_INTX 15 ++#define ABRT_USER_ABRT 16 ++#define ABRT_SDA_STUCK_AT_LOW 17 ++ ++#define LRW_IC_TX_ABRT_7B_ADDR_NOACK BIT(ABRT_7B_ADDR_NOACK) ++#define LRW_IC_TX_ABRT_10ADDR1_NOACK BIT(ABRT_10ADDR1_NOACK) ++#define LRW_IC_TX_ABRT_10ADDR2_NOACK BIT(ABRT_10ADDR2_NOACK) ++#define LRW_IC_TX_ABRT_TXDATA_NOACK BIT(ABRT_TXDATA_NOACK) ++#define LRW_IC_TX_ABRT_GCALL_NOACK BIT(ABRT_GCALL_NOACK) ++#define LRW_IC_TX_ABRT_GCALL_READ BIT(ABRT_GCALL_READ) ++#define LRW_IC_TX_ABRT_SBYTE_ACKDET BIT(ABRT_SBYTE_ACKDET) ++#define LRW_IC_TX_ABRT_SBYTE_NORSTRT BIT(ABRT_SBYTE_NORSTRT) ++#define LRW_IC_TX_ABRT_10B_RD_NORSTRT BIT(ABRT_10B_RD_NORSTRT) ++#define LRW_IC_TX_ABRT_MASTER_DIS BIT(ABRT_MASTER_DIS) ++#define LRW_IC_TX_ARB_LOST BIT(ARB_LOST) ++#define LRW_IC_TX_ABRT_SDA_STUCK_AT_LOW BIT(ABRT_SDA_STUCK_AT_LOW) ++#define LRW_IC_RX_ABRT_SLAVE_RD_INTX BIT(ABRT_SLAVE_RD_INTX) ++#define LRW_IC_RX_ABRT_SLAVE_ARBLOST BIT(ABRT_SLAVE_ARBLOST) ++#define LRW_IC_RX_ABRT_SLAVE_FLUSH_TXFIFO BIT(ABRT_SLAVE_FLUSH_TXFIFO) ++ ++#define LRW_IC_TX_ABRT_NOACK \ ++ (LRW_IC_TX_ABRT_7B_ADDR_NOACK | LRW_IC_TX_ABRT_10ADDR1_NOACK | \ ++ LRW_IC_TX_ABRT_10ADDR2_NOACK | LRW_IC_TX_ABRT_TXDATA_NOACK | \ ++ LRW_IC_TX_ABRT_GCALL_NOACK) ++ ++struct clk; ++struct device; ++struct reset_control; ++ ++/** ++ * struct lrw_i2c_dev - private i2c-lrw data ++ * @dev: driver model device node ++ * @map: IO registers map ++ * @sysmap: System controller registers map ++ * @base: IO registers pointer ++ * @ext: Extended IO registers pointer ++ * @cmd_complete: tx completion indicator ++ * @clk: input reference clock ++ * @pclk: clock required to access the registers ++ * @rst: optional reset for the controller ++ * @slave: represent an I2C slave device ++ * @get_clk_rate_khz: callback to retrieve IP specific bus speed ++ * @cmd_err: run time hadware error code ++ * @msgs: points to an array of messages currently being transferred ++ * @msgs_num: the number of elements in msgs ++ * @msg_write_idx: the element index of the current tx message in the msgs array ++ * @tx_buf_len: the length of the current tx buffer ++ * @tx_buf: the current tx buffer ++ * @msg_read_idx: the element index of the current rx message in the msgs array ++ * @rx_buf_len: the length of the current rx buffer ++ * @rx_buf: the current rx buffer ++ * @msg_err: error status of the current transfer ++ * @status: i2c master status, one of STATUS_* ++ * @abort_source: copy of the TX_ABRT_SOURCE register ++ * @irq: interrupt number for the i2c master ++ * @flags: platform specific flags like type of IO accessors or model ++ * @adapter: i2c subsystem adapter node ++ * @functionality: I2C_FUNC_* ORed bits to reflect what controller does support ++ * @master_cfg: configuration for the master device ++ * @tx_fifo_depth: depth of the hardware tx fifo ++ * @rx_fifo_depth: depth of the hardware rx fifo ++ * @rx_outstanding: current master-rx elements in tx fifo ++ * @timings: bus clock frequency, SDA hold and other timings ++ * @sda_hold_time: SDA hold value ++ * @fs_hcnt: fast speed HCNT value ++ * @fs_lcnt: fast speed LCNT value ++ * @hs_hcnt: high speed HCNT value ++ * @hs_lcnt: high speed LCNT value ++ * @acquire_lock: function to acquire a hardware lock on the bus ++ * @release_lock: function to release a hardware lock on the bus ++ * @semaphore_idx: Index of table with semaphore type attached to the bus. It's ++ * -1 if there is no semaphore. ++ * @shared_with_punit: true if this bus is shared with the SoCs PUNIT ++ * @disable: function to disable the controller ++ * @init: function to initialize the I2C hardware ++ * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing ++ * @rinfo: I²C GPIO recovery information ++ * ++ * HCNT and LCNT parameters can be used if the platform knows more accurate ++ * values than the one computed based only on the input clock frequency. ++ * Leave them to be %0 if not used. ++ */ ++struct i2c_lrw_dev { ++ struct device *dev; ++ struct regmap *map; ++ struct regmap *sysmap; ++ void __iomem *base; ++ void __iomem *ext; ++ struct completion cmd_complete; ++ struct clk *clk; ++ struct clk *pclk; ++ struct reset_control *rst; ++ struct i2c_client *slave; ++ u32 (*get_clk_rate_khz)(struct i2c_lrw_dev *dev); ++ int cmd_err; ++ struct i2c_msg *msgs; ++ int msgs_num; ++ int msg_write_idx; ++ u32 tx_buf_len; ++ u8 *tx_buf; ++ int msg_read_idx; ++ u32 rx_buf_len; ++ u8 *rx_buf; ++ int msg_err; ++ unsigned int status; ++ unsigned int abort_source; ++ int irq; ++ u32 flags; ++ struct i2c_adapter adapter; ++ u32 functionality; ++ u32 master_cfg; ++ unsigned int tx_fifo_depth; ++ unsigned int rx_fifo_depth; ++ int rx_outstanding; ++ struct i2c_timings timings; ++ u32 sda_hold_time; ++ u32 sda_stuck_at_low_timeout; ++ u16 fs_hcnt; ++ u16 fs_lcnt; ++ u16 hs_hcnt; ++ u16 hs_lcnt; ++ int (*acquire_lock)(void); ++ void (*release_lock)(void); ++ int semaphore_idx; ++ void (*disable)(struct i2c_lrw_dev *dev); ++ int (*init)(struct i2c_lrw_dev *dev); ++ int (*set_sda_hold_time)(struct i2c_lrw_dev *dev); ++ struct i2c_bus_recovery_info rinfo; ++}; ++ ++#define MODEL_MASK GENMASK(11, 8) ++ ++int i2c_lrw_prepare_clk(struct i2c_lrw_dev *dev, bool prepare); ++ ++static inline void __i2c_lrw_enable(struct i2c_lrw_dev *dev) ++{ ++ dev->status |= STATUS_ACTIVE; ++ regmap_write(dev->map, LRW_IC_ENABLE, 1); ++} ++ ++static inline void __i2c_lrw_disable_nowait(struct i2c_lrw_dev *dev) ++{ ++ regmap_write(dev->map, LRW_IC_ENABLE, 0); ++ dev->status &= ~STATUS_ACTIVE; ++} ++ ++void __i2c_lrw_disable(struct i2c_lrw_dev *dev); ++ ++extern void i2c_lrw_configure_master(struct i2c_lrw_dev *dev); ++extern int i2c_lrw_probe_master(struct i2c_lrw_dev *dev); ++ ++static inline int i2c_lrw_probe(struct i2c_lrw_dev *dev) ++{ ++ return i2c_lrw_probe_master(dev); ++} ++ ++static inline void i2c_lrw_configure(struct i2c_lrw_dev *dev) ++{ ++ i2c_lrw_configure_master(dev); ++} ++ ++static inline int i2c_lrw_probe_lock_support(struct i2c_lrw_dev *dev) ++{ ++ return 0; ++} ++ ++int i2c_lrw_validate_speed(struct i2c_lrw_dev *dev); ++void i2c_lrw_adjust_bus_speed(struct i2c_lrw_dev *dev); ++ ++#if IS_ENABLED(CONFIG_ACPI) ++int i2c_lrw_acpi_configure(struct device *device); ++static inline int i2c_lrw_dt_configure(struct device *device) ++{ ++ return -ENODEV; ++} ++#else ++static inline int i2c_lrw_acpi_configure(struct device *device) ++{ ++ return -ENODEV; ++} ++int i2c_lrw_dt_configure(struct device *device); ++#endif +diff --git a/drivers/i2c/busses/i2c-lrw-master.c b/drivers/i2c/busses/i2c-lrw-master.c +new file mode 100644 +index 000000000000..ab87b033a1e6 +--- /dev/null ++++ b/drivers/i2c/busses/i2c-lrw-master.c +@@ -0,0 +1,1075 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * I2C adapter driver for LRW ++ * ++ * Copyright (c) 2025, LRW CORPORATION. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "i2c-lrw-core.h" ++ ++static char *abort_sources[] = { ++ [ABRT_7B_ADDR_NOACK] = "slave address not acknowledged (7bit mode)", ++ [ABRT_10ADDR1_NOACK] = ++ "first address byte not acknowledged (10bit mode)", ++ [ABRT_10ADDR2_NOACK] = ++ "second address byte not acknowledged (10bit mode)", ++ [ABRT_TXDATA_NOACK] = "data not acknowledged", ++ [ABRT_GCALL_NOACK] = "no acknowledgment for a general call", ++ [ABRT_GCALL_READ] = "read after general call", ++ [ABRT_SBYTE_ACKDET] = "start byte acknowledged", ++ [ABRT_SBYTE_NORSTRT] = ++ "trying to send start byte when restart is disabled", ++ [ABRT_10B_RD_NORSTRT] = ++ "trying to read when restart is disabled (10bit mode)", ++ [ABRT_MASTER_DIS] = "trying to use disabled adapter", ++ [ARB_LOST] = "lost arbitration", ++ [ABRT_SLAVE_FLUSH_TXFIFO] = ++ "read command so flush old data in the TX FIFO", ++ [ABRT_SLAVE_ARBLOST] = ++ "slave lost the bus while transmitting data to a remote master", ++ [ABRT_SLAVE_RD_INTX] = "incorrect slave-transmitter mode configuration", ++ [ABRT_SDA_STUCK_AT_LOW] = "SDA line stuck at low timeout", ++}; ++ ++static int lrw_reg_read(void *context, unsigned int reg, unsigned int *val) ++{ ++ struct i2c_lrw_dev *dev = context; ++ ++ *val = readl_relaxed(dev->base + reg); ++ ++ return 0; ++} ++ ++static int lrw_reg_write(void *context, unsigned int reg, unsigned int val) ++{ ++ struct i2c_lrw_dev *dev = context; ++ ++ writel_relaxed(val, dev->base + reg); ++ ++ return 0; ++} ++ ++static int i2c_lrw_acquire_lock(struct i2c_lrw_dev *dev) ++{ ++ int ret; ++ ++ if (!dev->acquire_lock) ++ return 0; ++ ++ ret = dev->acquire_lock(); ++ if (!ret) ++ return 0; ++ ++ dev_err(dev->dev, "couldn't acquire bus ownership\n"); ++ ++ return ret; ++} ++ ++static void i2c_lrw_release_lock(struct i2c_lrw_dev *dev) ++{ ++ if (dev->release_lock) ++ dev->release_lock(); ++} ++ ++static int i2c_lrw_init_regmap(struct i2c_lrw_dev *dev) ++{ ++ struct regmap_config map_cfg = { ++ .reg_bits = 32, ++ .val_bits = 32, ++ .reg_stride = 4, ++ .disable_locking = true, ++ .reg_read = lrw_reg_read, ++ .reg_write = lrw_reg_write, ++ .max_register = LRW_SMBUS_INTR_STAT, ++ }; ++ ++ if (dev->map) ++ return 0; ++ ++ dev->map = devm_regmap_init(dev->dev, NULL, dev, &map_cfg); ++ if (IS_ERR(dev->map)) { ++ dev_err(dev->dev, "Failed to init the registers map\n"); ++ return PTR_ERR(dev->map); ++ } ++ ++ return 0; ++} ++ ++static const u32 supported_speeds[] = { ++ I2C_MAX_HIGH_SPEED_MODE_FREQ, ++ I2C_MAX_FAST_MODE_PLUS_FREQ, ++ I2C_MAX_FAST_MODE_FREQ, ++ I2C_MAX_STANDARD_MODE_FREQ, ++}; ++ ++int i2c_lrw_validate_speed(struct i2c_lrw_dev *dev) ++{ ++ struct i2c_timings *t = &dev->timings; ++ unsigned int i; ++ ++ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { ++ if (t->bus_freq_hz == supported_speeds[i]) ++ return 0; ++ } ++ ++ dev_err(dev->dev, ++ "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", ++ t->bus_freq_hz); ++ ++ return -EINVAL; ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_validate_speed); ++ ++#ifdef CONFIG_ACPI ++ ++#include ++ ++static void i2c_lrw_acpi_params(struct device *device, char method[], u16 *hcnt, ++ u16 *lcnt, u32 *sda_hold, u32 *sda_stuck_timout) ++{ ++ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER }; ++ acpi_handle handle = ACPI_HANDLE(device); ++ union acpi_object *obj; ++ ++ if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf))) ++ return; ++ ++ obj = (union acpi_object *)buf.pointer; ++ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 4) { ++ const union acpi_object *objs = obj->package.elements; ++ ++ *hcnt = (u16)objs[0].integer.value; ++ *lcnt = (u16)objs[1].integer.value; ++ *sda_hold = (u32)objs[2].integer.value; ++ *sda_stuck_timout = (u32)objs[3].integer.value; ++ } ++ kfree(buf.pointer); ++} ++ ++int i2c_lrw_acpi_configure(struct device *device) ++{ ++ struct i2c_lrw_dev *dev = dev_get_drvdata(device); ++ struct i2c_timings *t = &dev->timings; ++ u32 hs_ht = 0, fs_ht = 0; ++ u32 sda_stuck_at_low_timeout; ++ ++ i2c_lrw_acpi_params(device, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, ++ &hs_ht, &sda_stuck_at_low_timeout); ++ i2c_lrw_acpi_params(device, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, ++ &fs_ht, &sda_stuck_at_low_timeout); ++ dev->sda_stuck_at_low_timeout = sda_stuck_at_low_timeout; ++ ++ switch (t->bus_freq_hz) { ++ case I2C_MAX_STANDARD_MODE_FREQ: ++ case I2C_MAX_FAST_MODE_FREQ: ++ case I2C_MAX_FAST_MODE_PLUS_FREQ: ++ dev->sda_hold_time = fs_ht; ++ break; ++ case I2C_MAX_HIGH_SPEED_MODE_FREQ: ++ dev->sda_hold_time = hs_ht; ++ break; ++ default: ++ dev_err(dev->dev, ++ "%d Hz is unsupported, only 100kHz, 400kHz, 1MHz and 3.4MHz are supported\n", ++ t->bus_freq_hz); ++ break; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_acpi_configure); ++ ++static u32 i2c_lrw_acpi_round_bus_speed(struct device *device) ++{ ++ u32 acpi_speed; ++ int i; ++ ++ acpi_speed = i2c_acpi_find_bus_speed(device); ++ ++ for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) { ++ if (acpi_speed >= supported_speeds[i]) ++ return supported_speeds[i]; ++ } ++ ++ return 0; ++} ++ ++#else /* CONFIG_ACPI */ ++ ++static inline u32 i2c_lrw_acpi_round_bus_speed(struct device *device) ++{ ++ return 0; ++} ++ ++int i2c_lrw_dt_configure(struct device *device) ++{ ++ struct i2c_lrw_dev *dev = dev_get_drvdata(device); ++ struct device_node *node = device->of_node; ++ u32 raw_property; ++ ++ if (node) { ++ of_property_read_u32(node, "hs_hcnt", &raw_property); ++ dev->hs_hcnt = (u16)raw_property; ++ of_property_read_u32(node, "hs_lcnt", &raw_property); ++ dev->hs_lcnt = (u16)raw_property; ++ of_property_read_u32(node, "fs_hcnt", &raw_property); ++ dev->fs_hcnt = (u16)raw_property; ++ of_property_read_u32(node, "fs_lcnt", &raw_property); ++ dev->fs_lcnt = (u16)raw_property; ++ of_property_read_u32(node, "sda_hold_time", ++ &dev->sda_hold_time); ++ of_property_read_u32(node, "sda_stuck_at_low_timeout", ++ &dev->sda_stuck_at_low_timeout); ++ } ++ dev_dbg(dev->dev, "dt dev param = %x, %x, %x, %x, %x, %x\n", ++ dev->hs_hcnt, dev->hs_lcnt, dev->fs_hcnt, dev->fs_lcnt, ++ dev->sda_hold_time, dev->sda_stuck_at_low_timeout); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_dt_configure); ++ ++#endif /* CONFIG_ACPI */ ++ ++void i2c_lrw_adjust_bus_speed(struct i2c_lrw_dev *dev) ++{ ++ u32 acpi_speed = i2c_lrw_acpi_round_bus_speed(dev->dev); ++ struct i2c_timings *t = &dev->timings; ++ ++ if (acpi_speed && t->bus_freq_hz) ++ t->bus_freq_hz = min(t->bus_freq_hz, acpi_speed); ++ else if (acpi_speed || t->bus_freq_hz) ++ t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed); ++ else ++ t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_adjust_bus_speed); ++ ++static u32 i2c_lrw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, ++ int offset) ++{ ++ if (cond) ++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * tSYMBOL, 1000000) - ++ 5 + offset; ++ else ++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tSYMBOL + tf), ++ 1000000) - 3 + offset; ++} ++ ++static u32 i2c_lrw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) ++{ ++ return DIV_ROUND_CLOSEST_ULL((u64)ic_clk * (tLOW + tf), 1000000) - 1 + ++ offset; ++} ++ ++static int i2c_lrw_set_sda_hold(struct i2c_lrw_dev *dev) ++{ ++ unsigned int reg; ++ int ret; ++ ++ ret = i2c_lrw_acquire_lock(dev); ++ if (ret) ++ return ret; ++ ++ ret = regmap_read(dev->map, LRW_IC_VERSION, ®); ++ if (ret) ++ goto err_release_lock; ++ ++ if (reg >= LRW_IC_SDA_HOLD_MIN_VERS) { ++ if (!dev->sda_hold_time) { ++ ret = regmap_read(dev->map, LRW_IC_SDA_HOLD, ++ &dev->sda_hold_time); ++ if (ret) ++ goto err_release_lock; ++ } ++ ++ if (!(dev->sda_hold_time & LRW_IC_SDA_HOLD_RX_MASK)) ++ dev->sda_hold_time |= 1 << LRW_IC_SDA_HOLD_RX_SHIFT; ++ ++ dev_dbg(dev->dev, "SDA Hold Time TX:RX = %d:%d\n", ++ dev->sda_hold_time & ~(u32)LRW_IC_SDA_HOLD_RX_MASK, ++ dev->sda_hold_time >> LRW_IC_SDA_HOLD_RX_SHIFT); ++ } else if (dev->set_sda_hold_time) { ++ dev->set_sda_hold_time(dev); ++ } else if (dev->sda_hold_time) { ++ dev_warn(dev->dev, ++ "Hardware too old to adjust SDA hold time.\n"); ++ dev->sda_hold_time = 0; ++ } ++ ++err_release_lock: ++ i2c_lrw_release_lock(dev); ++ ++ return ret; ++} ++ ++void __i2c_lrw_disable(struct i2c_lrw_dev *dev) ++{ ++ unsigned int raw_intr_stats; ++ unsigned int enable; ++ int timeout = 100; ++ bool abort_needed; ++ unsigned int status; ++ int ret; ++ ++ regmap_read(dev->map, LRW_IC_CLR_RAW_INTR, &raw_intr_stats); ++ regmap_read(dev->map, LRW_IC_ENABLE, &enable); ++ ++ abort_needed = raw_intr_stats & LRW_IC_INTR_MST_ON_HOLD; ++ if (abort_needed) { ++ regmap_write(dev->map, LRW_IC_ENABLE, ++ enable | LRW_IC_ENABLE_ABORT); ++ ret = regmap_read_poll_timeout(dev->map, LRW_IC_ENABLE, enable, ++ !(enable & LRW_IC_ENABLE_ABORT), ++ 50, 200); ++ if (ret) ++ dev_err(dev->dev, ++ "timeout while trying to abort current transfer\n"); ++ } ++ ++ do { ++ __i2c_lrw_disable_nowait(dev); ++ regmap_read(dev->map, LRW_IC_STATUS, &status); ++ if ((status & 1) == 0) ++ return; ++ ++ usleep_range(25, 250); ++ } while (timeout--); ++ ++ dev_warn(dev->dev, "timeout in disabling adapter\n"); ++} ++ ++int i2c_lrw_bus_recover(struct i2c_adapter *adap) ++{ ++ struct i2c_lrw_dev *dev = i2c_get_adapdata(adap); ++ int timeout = 1000; ++ u32 enabled; ++ ++ if (dev->abort_source & IC_ABRT_SDA_STUCK_AT_LOW_POS) { ++ regmap_update_bits(dev->map, LRW_IC_ENABLE, ++ IC_SDA_STUCK_RECOVERY_ENABLE_POS, ++ IC_SDA_STUCK_RECOVERY_ENABLE_POS); ++ do { ++ regmap_read(dev->map, LRW_IC_ENABLE, &enabled); ++ if ((enabled & IC_SDA_STUCK_RECOVERY_ENABLE_POS) == ++ IC_SDA_STUCK_AT_LOW_RECOVERIED) ++ break; ++ udelay(10); ++ } while (--timeout); ++ ++ if (!timeout) { ++ dev_err(dev->dev, "I2C bus recovery timeout\n"); ++ return -ETIMEDOUT; ++ } ++ ++ dev_err(dev->dev, "I2C hardware recovery complete\n"); ++ } ++ return 0; ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_bus_recover); ++ ++static u32 i2c_lrw_clk_rate(struct i2c_lrw_dev *dev) ++{ ++ if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) ++ return 0; ++ return dev->get_clk_rate_khz(dev); ++} ++ ++int i2c_lrw_prepare_clk(struct i2c_lrw_dev *dev, bool prepare) ++{ ++ int ret; ++ ++ if (prepare) { ++ ret = clk_prepare_enable(dev->pclk); ++ if (ret) ++ return ret; ++ ++ ret = clk_prepare_enable(dev->clk); ++ if (ret) ++ clk_disable_unprepare(dev->pclk); ++ ++ return ret; ++ } ++ ++ clk_disable_unprepare(dev->clk); ++ clk_disable_unprepare(dev->pclk); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_prepare_clk); ++ ++static int i2c_lrw_wait_bus_not_busy(struct i2c_lrw_dev *dev) ++{ ++ u32 status; ++ int ret; ++ ++ ret = regmap_read_poll_timeout(dev->map, LRW_IC_STATUS, status, ++ !(status & LRW_IC_STATUS_ACTIVITY), 1100, ++ 20000); ++ if (ret) { ++ dev_warn(dev->dev, "timeout waiting for bus ready\n"); ++ ++ i2c_recover_bus(&dev->adapter); ++ ++ regmap_read(dev->map, LRW_IC_STATUS, &status); ++ if (!(status & LRW_IC_STATUS_ACTIVITY)) ++ ret = 0; ++ } ++ ++ return ret; ++} ++ ++static int i2c_lrw_handle_tx_abort(struct i2c_lrw_dev *dev) ++{ ++ unsigned long abort_source = dev->abort_source; ++ int i; ++ ++ if (abort_source & LRW_IC_TX_ABRT_NOACK) { ++ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) ++ dev_dbg(dev->dev, "%s: %s\n", __func__, ++ abort_sources[i]); ++ return -EREMOTEIO; ++ } ++ ++ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) ++ dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); ++ ++ if (abort_source & LRW_IC_TX_ARB_LOST) ++ return -EAGAIN; ++ else if (abort_source & LRW_IC_TX_ABRT_GCALL_READ) ++ return -EINVAL; ++ else if (abort_source & LRW_IC_TX_ABRT_SDA_STUCK_AT_LOW) ++ return i2c_lrw_bus_recover(&dev->adapter); ++ else ++ return -EIO; ++} ++ ++static int i2c_lrw_set_fifo_size(struct i2c_lrw_dev *dev) ++{ ++ u32 tx_fifo_depth, rx_fifo_depth; ++ ++ tx_fifo_depth = I2C_FIFO_DEPTH; ++ rx_fifo_depth = I2C_FIFO_DEPTH; ++ if (!dev->tx_fifo_depth) { ++ dev->tx_fifo_depth = tx_fifo_depth; ++ dev->rx_fifo_depth = rx_fifo_depth; ++ } else if (tx_fifo_depth >= 2) { ++ dev->tx_fifo_depth = ++ min_t(u32, dev->tx_fifo_depth, tx_fifo_depth); ++ dev->rx_fifo_depth = ++ min_t(u32, dev->rx_fifo_depth, rx_fifo_depth); ++ } ++ ++ return 0; ++} ++ ++static u32 i2c_lrw_func(struct i2c_adapter *adap) ++{ ++ struct i2c_lrw_dev *dev = i2c_get_adapdata(adap); ++ ++ return dev->functionality; ++} ++ ++static void i2c_lrw_disable(struct i2c_lrw_dev *dev) ++{ ++ int ret; ++ ++ ret = i2c_lrw_acquire_lock(dev); ++ if (ret) ++ return; ++ ++ __i2c_lrw_disable(dev); ++ ++ regmap_write(dev->map, LRW_IC_INTR_MASK, 0); ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, LRW_IC_CLR_INTR, ++ LRW_IC_CLR_INTR); ++ ++ i2c_lrw_release_lock(dev); ++} ++ ++static void i2c_lrw_configure_fifo_master(struct i2c_lrw_dev *dev) ++{ ++ regmap_write(dev->map, LRW_IC_TX_TL, dev->tx_fifo_depth / 2); ++ regmap_write(dev->map, LRW_IC_RX_TL, 0); ++ ++ regmap_write(dev->map, LRW_IC_CON, dev->master_cfg); ++} ++ ++static int i2c_lrw_set_timings_master(struct i2c_lrw_dev *dev) ++{ ++ u32 sda_falling_time, scl_falling_time; ++ struct i2c_timings *t = &dev->timings; ++ const char *fp_str = ""; ++ u32 ic_clk; ++ int ret; ++ ++ sda_falling_time = t->sda_fall_ns ?: 300; ++ scl_falling_time = t->scl_fall_ns ?: 300; ++ ++ if ((dev->master_cfg & LRW_IC_CON_SPEED_MASK) == ++ LRW_IC_CON_SPEED_HIGH) { ++ if (!dev->hs_hcnt || !dev->hs_lcnt) { ++ ic_clk = i2c_lrw_clk_rate(dev); ++ dev->hs_hcnt = i2c_lrw_scl_hcnt(ic_clk, 160, ++ sda_falling_time, 0, 0); ++ dev->hs_lcnt = i2c_lrw_scl_lcnt(ic_clk, 320, ++ scl_falling_time, 0); ++ } else { ++ dev_err(dev->dev, "High Speed not supported!\n"); ++ t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ; ++ dev->master_cfg &= ~LRW_IC_CON_SPEED_MASK; ++ dev->master_cfg |= LRW_IC_CON_SPEED_STD_FAST; ++ dev->hs_hcnt = 0; ++ dev->hs_lcnt = 0; ++ } ++ dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n", ++ dev->hs_hcnt, dev->hs_lcnt); ++ } ++ ++ if (!dev->fs_hcnt || !dev->fs_lcnt) { ++ ic_clk = i2c_lrw_clk_rate(dev); ++ switch (t->bus_freq_hz) { ++ case I2C_MAX_STANDARD_MODE_FREQ: /* 100kHz */ ++ dev->fs_hcnt = i2c_lrw_scl_hcnt(ic_clk, 4000, ++ sda_falling_time, 0, 0); ++ dev->fs_lcnt = i2c_lrw_scl_lcnt(ic_clk, 4700, ++ scl_falling_time, 0); ++ break; ++ case I2C_MAX_FAST_MODE_FREQ: /* 400kHz */ ++ dev->fs_hcnt = i2c_lrw_scl_hcnt(ic_clk, 600, ++ sda_falling_time, 0, 0); ++ dev->fs_lcnt = i2c_lrw_scl_lcnt(ic_clk, 1300, ++ scl_falling_time, 0); ++ break; ++ ++ case I2C_MAX_FAST_MODE_PLUS_FREQ: /* 1MHz */ ++ dev->fs_hcnt = i2c_lrw_scl_hcnt(ic_clk, 260, ++ sda_falling_time, 0, 0); ++ dev->fs_lcnt = i2c_lrw_scl_lcnt(ic_clk, 500, ++ scl_falling_time, 0); ++ fp_str = " Plus"; ++ break; ++ default: ++ break; ++ } ++ dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n", fp_str, ++ dev->fs_hcnt, dev->fs_lcnt); ++ } ++ ++ ret = i2c_lrw_set_sda_hold(dev); ++ if (ret) ++ return ret; ++ ++ dev_dbg(dev->dev, "Bus speed: %s\n", ++ i2c_freq_mode_string(t->bus_freq_hz)); ++ return 0; ++} ++ ++static int i2c_lrw_init_master(struct i2c_lrw_dev *dev) ++{ ++ int ret; ++ ++ ret = i2c_lrw_acquire_lock(dev); ++ if (ret) ++ return ret; ++ ++ __i2c_lrw_disable(dev); ++ ++ regmap_write(dev->map, LRW_IC_FS_SCL_HCNT, dev->fs_hcnt); ++ regmap_write(dev->map, LRW_IC_FS_SCL_LCNT, dev->fs_lcnt); ++ regmap_write(dev->map, LRW_IC_SDA_STUCK_AT_LOW_TIMEOUT, ++ dev->sda_stuck_at_low_timeout); ++ if (dev->hs_hcnt && dev->hs_lcnt) { ++ regmap_write(dev->map, LRW_IC_HS_SCL_HCNT, dev->hs_hcnt); ++ regmap_write(dev->map, LRW_IC_HS_SCL_LCNT, dev->hs_lcnt); ++ } ++ ++ if (dev->sda_hold_time) ++ regmap_write(dev->map, LRW_IC_SDA_HOLD, dev->sda_hold_time); ++ ++ i2c_lrw_configure_fifo_master(dev); ++ i2c_lrw_release_lock(dev); ++ ++ return 0; ++} ++ ++static void i2c_lrw_xfer_init(struct i2c_lrw_dev *dev) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ u32 ic_con = 0, ic_tar = 0; ++ ++ __i2c_lrw_disable(dev); ++ ++ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { ++ ic_con = LRW_IC_CON_10BITADDR_MASTER; ++ ic_tar = LRW_IC_TAR_10BITADDR_MASTER; ++ } ++ ++ regmap_update_bits(dev->map, LRW_IC_CON, LRW_IC_CON_10BITADDR_MASTER, ++ ic_con); ++ regmap_write(dev->map, LRW_IC_TAR, ++ msgs[dev->msg_write_idx].addr | ic_tar); ++ ++ regmap_write(dev->map, LRW_IC_INTR_MASK, 0); ++ ++ __i2c_lrw_enable(dev); ++ ++ regmap_update_bits(dev->map, LRW_IC_CLR_RAW_INTR, LRW_IC_CLR_INTR, ++ LRW_IC_CLR_INTR); ++ regmap_write(dev->map, LRW_IC_INTR_MASK, LRW_IC_INTR_MASTER_MASK); ++} ++ ++static void i2c_lrw_xfer_msg(struct i2c_lrw_dev *dev) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ u32 intr_mask; ++ u32 fifo_level; ++ int tx_limit, rx_limit; ++ u32 addr = msgs[dev->msg_write_idx].addr; ++ u32 buf_len = dev->tx_buf_len; ++ u8 *buf = dev->tx_buf; ++ bool need_restart = false; ++ unsigned int flr; ++ ++ intr_mask = LRW_IC_INTR_MASTER_MASK; ++ ++ for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { ++ u32 flags = msgs[dev->msg_write_idx].flags; ++ ++ if (msgs[dev->msg_write_idx].addr != addr) { ++ dev_err(dev->dev, "%s: invalid target address\n", ++ __func__); ++ dev->msg_err = -EINVAL; ++ break; ++ } ++ ++ if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { ++ buf = msgs[dev->msg_write_idx].buf; ++ buf_len = msgs[dev->msg_write_idx].len; ++ ++ if ((dev->master_cfg & LRW_IC_CON_RESTART_EN) && ++ (dev->msg_write_idx > 0)) ++ need_restart = true; ++ } ++ ++ regmap_read(dev->map, LRW_IC_FIFO_LEVEL, &fifo_level); ++ flr = fifo_level & I2C_TXFLR_MASK; ++ tx_limit = dev->tx_fifo_depth - flr; ++ ++ flr = (fifo_level >> I2C_RXFLR_SHIFT) & I2C_RXFLR_MASK; ++ rx_limit = dev->rx_fifo_depth - flr; ++ ++ while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { ++ u32 cmd = 0; ++ ++ if (dev->msg_write_idx == dev->msgs_num - 1 && ++ buf_len == 1 && !(flags & I2C_M_RECV_LEN)) ++ cmd |= BIT(9); ++ ++ if (need_restart) { ++ cmd |= BIT(10); ++ need_restart = false; ++ } ++ ++ if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { ++ if (dev->rx_outstanding >= dev->rx_fifo_depth) ++ break; ++ ++ regmap_write(dev->map, LRW_IC_DATA_CMD, ++ cmd | 0x100); ++ rx_limit--; ++ dev->rx_outstanding++; ++ } else { ++ regmap_write(dev->map, LRW_IC_DATA_CMD, ++ cmd | *buf++); ++ } ++ tx_limit--; ++ buf_len--; ++ } ++ ++ dev->tx_buf = buf; ++ dev->tx_buf_len = buf_len; ++ ++ if (flags & I2C_M_RECV_LEN) { ++ dev->status |= STATUS_WRITE_IN_PROGRESS; ++ intr_mask &= ~LRW_IC_INTR_TX_EMPTY; ++ break; ++ } ++ ++ if (buf_len > 0) { ++ dev->status |= STATUS_WRITE_IN_PROGRESS; ++ break; ++ } ++ ++ dev->status &= ~STATUS_WRITE_IN_PROGRESS; ++ } ++ ++ if (dev->msg_write_idx == dev->msgs_num) ++ intr_mask &= ~LRW_IC_INTR_TX_EMPTY; ++ ++ if (dev->msg_err) ++ intr_mask = 0; ++ ++ regmap_write(dev->map, LRW_IC_INTR_MASK, intr_mask); ++} ++ ++static u8 i2c_lrw_recv_len(struct i2c_lrw_dev *dev, u8 len) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ u32 flags = msgs[dev->msg_read_idx].flags; ++ ++ len += (flags & I2C_CLIENT_PEC) ? 2 : 1; ++ dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding); ++ msgs[dev->msg_read_idx].len = len; ++ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN; ++ ++ regmap_update_bits(dev->map, LRW_IC_INTR_MASK, LRW_IC_INTR_TX_EMPTY, ++ LRW_IC_INTR_TX_EMPTY); ++ ++ return len; ++} ++ ++static void i2c_lrw_read(struct i2c_lrw_dev *dev) ++{ ++ struct i2c_msg *msgs = dev->msgs; ++ u32 fifo_level; ++ unsigned int rx_valid; ++ ++ for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { ++ unsigned int tmp; ++ u32 len; ++ u8 *buf; ++ ++ if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) ++ continue; ++ ++ if (!(dev->status & STATUS_READ_IN_PROGRESS)) { ++ len = msgs[dev->msg_read_idx].len; ++ buf = msgs[dev->msg_read_idx].buf; ++ } else { ++ len = dev->rx_buf_len; ++ buf = dev->rx_buf; ++ } ++ ++ regmap_read(dev->map, LRW_IC_FIFO_LEVEL, &fifo_level); ++ rx_valid = (fifo_level >> I2C_RXFLR_SHIFT) & I2C_RXFLR_MASK; ++ ++ for (; len > 0 && rx_valid > 0; len--, rx_valid--) { ++ u32 flags = msgs[dev->msg_read_idx].flags; ++ ++ regmap_read(dev->map, LRW_IC_DATA_CMD, &tmp); ++ tmp &= LRW_IC_DATA_CMD_DAT; ++ if (flags & I2C_M_RECV_LEN) { ++ if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) ++ tmp = 1; ++ ++ len = i2c_lrw_recv_len(dev, tmp); ++ } ++ *buf++ = tmp; ++ dev->rx_outstanding--; ++ } ++ ++ if (len > 0) { ++ dev->status |= STATUS_READ_IN_PROGRESS; ++ dev->rx_buf_len = len; ++ dev->rx_buf = buf; ++ return; ++ } ++ dev->status &= ~STATUS_READ_IN_PROGRESS; ++ } ++} ++ ++static int i2c_lrw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], ++ int num) ++{ ++ struct i2c_lrw_dev *dev = i2c_get_adapdata(adap); ++ int ret; ++ ++ reinit_completion(&dev->cmd_complete); ++ dev->msgs = msgs; ++ dev->msgs_num = num; ++ dev->cmd_err = 0; ++ dev->msg_write_idx = 0; ++ dev->msg_read_idx = 0; ++ dev->msg_err = 0; ++ dev->status = 0; ++ dev->abort_source = 0; ++ dev->rx_outstanding = 0; ++ ret = i2c_lrw_acquire_lock(dev); ++ if (ret) ++ goto done_nolock; ++ ret = i2c_lrw_wait_bus_not_busy(dev); ++ if (ret < 0) ++ goto done; ++ i2c_lrw_xfer_init(dev); ++ ++ if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { ++ dev_err(dev->dev, "controller timed out\n"); ++ i2c_lrw_init_master(dev); ++ ret = -ETIMEDOUT; ++ goto done; ++ } ++ ++ dev->status &= ~STATUS_ACTIVE; ++ ++ if (dev->msg_err) { ++ ret = dev->msg_err; ++ goto disable_controller; ++ } ++ ++ if (likely(!dev->cmd_err && !dev->status)) { ++ ret = num; ++ goto disable_controller; ++ } ++ ++ if (dev->cmd_err == LRW_IC_ERR_TX_ABRT) { ++ ret = i2c_lrw_handle_tx_abort(dev); ++ goto disable_controller; ++ } ++ ++ if (dev->status) { ++ dev_err(dev->dev, ++ "transfer terminated early - interrupt latency too high?\n"); ++ ret = -EIO; ++ goto disable_controller; ++ } ++ ++ ret = -EIO; ++ ++disable_controller: ++ __i2c_lrw_disable_nowait(dev); ++done: ++ i2c_lrw_release_lock(dev); ++ ++done_nolock: ++ return ret; ++} ++ ++static const struct i2c_algorithm i2c_lrw_algo = { ++ .master_xfer = i2c_lrw_xfer, ++ .functionality = i2c_lrw_func, ++}; ++ ++static const struct i2c_adapter_quirks i2c_lrw_quirks = { ++ .flags = I2C_AQ_NO_ZERO_LEN, ++}; ++ ++static u32 i2c_lrw_read_clear_intrbits(struct i2c_lrw_dev *dev) ++{ ++ u32 stat; ++ ++ regmap_read(dev->map, LRW_IC_INTR_STAT, &stat); ++ ++ if (stat & LRW_IC_INTR_RX_OVER) ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_RX_OVER, LRW_IC_INTR_RX_OVER); ++ if (stat & LRW_IC_INTR_RD_REQ) ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_RD_REQ, LRW_IC_INTR_RD_REQ); ++ if (stat & LRW_IC_INTR_TX_ABRT) { ++ regmap_read(dev->map, LRW_IC_TX_ABRT_SOURCE, ++ &dev->abort_source); ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_TX_ABRT, LRW_IC_INTR_TX_ABRT); ++ } ++ if (stat & LRW_IC_INTR_RX_DONE) ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_RX_DONE, LRW_IC_INTR_RX_DONE); ++ if (stat & LRW_IC_INTR_ACTIVITY) ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_ACTIVITY, LRW_IC_INTR_ACTIVITY); ++ if (stat & LRW_IC_INTR_STOP_DET) ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_STOP_DET, LRW_IC_INTR_STOP_DET); ++ if (stat & LRW_IC_INTR_START_DET) ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_START_DET, ++ LRW_IC_INTR_START_DET); ++ if (stat & LRW_IC_INTR_GEN_CALL) ++ regmap_write_bits(dev->map, LRW_IC_CLR_RAW_INTR, ++ LRW_IC_INTR_GEN_CALL, LRW_IC_INTR_GEN_CALL); ++ ++ return stat; ++} ++ ++static irqreturn_t i2c_lrw_isr(int this_irq, void *dev_id) ++{ ++ struct i2c_lrw_dev *dev = dev_id; ++ u32 stat, enabled; ++ ++ regmap_read(dev->map, LRW_IC_ENABLE, &enabled); ++ regmap_read(dev->map, LRW_IC_CLR_RAW_INTR, &stat); ++ if (!enabled || !(stat & ~LRW_IC_INTR_ACTIVITY)) ++ return IRQ_NONE; ++ ++ stat = i2c_lrw_read_clear_intrbits(dev); ++ ++ if (!(dev->status & STATUS_ACTIVE)) { ++ regmap_write(dev->map, LRW_IC_INTR_MASK, 0); ++ return IRQ_HANDLED; ++ } ++ ++ if (stat & LRW_IC_INTR_TX_ABRT) { ++ dev->cmd_err |= LRW_IC_ERR_TX_ABRT; ++ dev->status &= ~STATUS_MASK; ++ dev->rx_outstanding = 0; ++ ++ dev_dbg(dev->dev, "abrt intr:0x%x, abort:0x%x\n", stat, ++ dev->abort_source); ++ regmap_write(dev->map, LRW_IC_INTR_MASK, 0); ++ goto tx_aborted; ++ } ++ ++ if (stat & LRW_IC_INTR_RX_FULL) ++ i2c_lrw_read(dev); ++ ++ if (stat & LRW_IC_INTR_TX_EMPTY) ++ i2c_lrw_xfer_msg(dev); ++ ++tx_aborted: ++ if (((stat & (LRW_IC_INTR_TX_ABRT | LRW_IC_INTR_STOP_DET)) || ++ dev->msg_err) && ++ (dev->rx_outstanding == 0)) { ++ complete(&dev->cmd_complete); ++ } else { ++ regmap_read(dev->map, LRW_IC_INTR_MASK, &stat); ++ regmap_write(dev->map, LRW_IC_INTR_MASK, 0); ++ regmap_write(dev->map, LRW_IC_INTR_MASK, stat); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++void i2c_lrw_configure_master(struct i2c_lrw_dev *dev) ++{ ++ struct i2c_timings *t = &dev->timings; ++ ++ dev->functionality = I2C_FUNC_10BIT_ADDR | LRW_IC_DEFAULT_FUNCTIONALITY; ++ ++ dev->master_cfg = LRW_IC_CON_MASTER | LRW_IC_CON_RESTART_EN | ++ IC_CON_BUS_CLEAR_CTRL_POS; ++ ++ switch (t->bus_freq_hz) { ++ case I2C_MAX_STANDARD_MODE_FREQ: ++ case I2C_MAX_FAST_MODE_FREQ: ++ case I2C_MAX_FAST_MODE_PLUS_FREQ: ++ dev->master_cfg |= LRW_IC_CON_SPEED_STD_FAST; ++ break; ++ case I2C_MAX_HIGH_SPEED_MODE_FREQ: ++ dev->master_cfg |= LRW_IC_CON_SPEED_HIGH; ++ break; ++ default: ++ dev_warn(dev->dev, ++ "dev bus_freq_hz outlined in the device datasheet?\n"); ++ } ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_configure_master); ++ ++static int i2c_lrw_init_recovery_info(struct i2c_lrw_dev *dev) ++{ ++ struct i2c_bus_recovery_info *rinfo = &dev->rinfo; ++ struct i2c_adapter *adap = &dev->adapter; ++ ++ rinfo->recover_bus = i2c_lrw_bus_recover; ++ adap->bus_recovery_info = rinfo; ++ ++ return 0; ++} ++ ++int i2c_lrw_probe_master(struct i2c_lrw_dev *dev) ++{ ++ struct i2c_adapter *adap = &dev->adapter; ++ unsigned long irq_flags; ++ unsigned int ic_con; ++ int ret; ++ ++ init_completion(&dev->cmd_complete); ++ ++ dev->init = i2c_lrw_init_master; ++ dev->disable = i2c_lrw_disable; ++ ++ ret = i2c_lrw_init_regmap(dev); ++ if (ret) ++ return ret; ++ ++ ret = i2c_lrw_set_timings_master(dev); ++ if (ret) ++ return ret; ++ ++ ret = i2c_lrw_set_fifo_size(dev); ++ if (ret) ++ return ret; ++ ++ ret = i2c_lrw_acquire_lock(dev); ++ if (ret) ++ return ret; ++ ++ ret = regmap_read(dev->map, LRW_IC_CON, &ic_con); ++ i2c_lrw_release_lock(dev); ++ if (ret) ++ return ret; ++ ++ if (ic_con & IC_CON_BUS_CLEAR_CTRL_POS) ++ dev->master_cfg |= IC_CON_BUS_CLEAR_CTRL_POS; ++ ++ ret = dev->init(dev); ++ if (ret) ++ return ret; ++ ++ snprintf(adap->name, sizeof(adap->name), "LRW I2C adapter"); ++ adap->retries = 3; ++ adap->algo = &i2c_lrw_algo; ++ adap->quirks = &i2c_lrw_quirks; ++ adap->dev.parent = dev->dev; ++ i2c_set_adapdata(adap, dev); ++ ++ irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND; ++ ++ ret = i2c_lrw_acquire_lock(dev); ++ if (ret) ++ return ret; ++ ++ regmap_write(dev->map, LRW_IC_INTR_MASK, 0); ++ regmap_write(dev->map, LRW_SMBUS_INTR_MASK, 0); ++ ++ i2c_lrw_release_lock(dev); ++ ++ ret = devm_request_irq(dev->dev, dev->irq, i2c_lrw_isr, irq_flags, ++ dev_name(dev->dev), dev); ++ if (ret) { ++ dev_err(dev->dev, "failure requesting irq %i: %d\n", dev->irq, ++ ret); ++ return ret; ++ } ++ ++ ret = i2c_lrw_init_recovery_info(dev); ++ if (ret) ++ return ret; ++ ++ ret = i2c_add_numbered_adapter(adap); ++ if (ret) ++ dev_err(dev->dev, "failure adding adapter: %d\n", ret); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(i2c_lrw_probe_master); ++ ++MODULE_DESCRIPTION("LRW I2C bus master adapter"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/i2c/busses/i2c-lrw-platdrv.c b/drivers/i2c/busses/i2c-lrw-platdrv.c +new file mode 100644 +index 000000000000..65a8d69cbfb8 +--- /dev/null ++++ b/drivers/i2c/busses/i2c-lrw-platdrv.c +@@ -0,0 +1,235 @@ ++// SPDX-License-Identifier: GPL-2.0+ ++/* ++ * I2C adapter driver for LRW ++ * ++ * Copyright (c) 2025, LRW CORPORATION. All rights reserved. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "i2c-lrw-core.h" ++ ++static u32 i2c_lrw_get_clk_rate_khz(struct i2c_lrw_dev *dev) ++{ ++ return clk_get_rate(dev->clk) / 1000; ++} ++ ++#ifdef CONFIG_ACPI ++static const struct acpi_device_id i2c_lrw_acpi_match[] = { { "LRWX0002", 0 }, ++ {} }; ++MODULE_DEVICE_TABLE(acpi, i2c_lrw_acpi_match); ++#endif ++ ++#ifdef CONFIG_OF ++ ++static int i2c_lrw_of_configure(struct platform_device *pdev) ++{ ++ struct i2c_lrw_dev *dev = platform_get_drvdata(pdev); ++ ++ switch (dev->flags & MODEL_MASK) { ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id i2c_lrw_of_match[] = { ++ { ++ .compatible = "lrw,sc-i2c", ++ }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, i2c_lrw_of_match); ++#else ++ ++static inline int i2c_lrw_of_configure(struct platform_device *pdev) ++{ ++ return -ENODEV; ++} ++#endif ++ ++static int i2c_lrw_plat_request_regs(struct i2c_lrw_dev *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev->dev); ++ int ret; ++ ++ switch (dev->flags & MODEL_MASK) { ++ default: ++ dev->base = devm_platform_ioremap_resource(pdev, 0); ++ ret = PTR_ERR_OR_ZERO(dev->base); ++ break; ++ } ++ ++ return ret; ++} ++ ++static const struct dmi_system_id i2c_lrw_hwmon_class_dmi[] = { ++ { ++ .ident = "lrw 0002", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "lrw"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "0002"), ++ }, ++ }, ++ { } /* terminate list */ ++}; ++ ++static int i2c_lrw_plat_probe(struct platform_device *pdev) ++{ ++ struct i2c_adapter *adap; ++ struct i2c_lrw_dev *dev; ++ struct i2c_timings *t; ++ int irq, ret; ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; ++ ++ dev = devm_kzalloc(&pdev->dev, sizeof(struct i2c_lrw_dev), GFP_KERNEL); ++ if (!dev) ++ return -ENOMEM; ++ ++ dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); ++ dev->dev = &pdev->dev; ++ dev->irq = irq; ++ ++ platform_set_drvdata(pdev, dev); ++ ++ ret = i2c_lrw_plat_request_regs(dev); ++ if (ret) ++ return ret; ++ ++ dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); ++ if (IS_ERR(dev->rst)) ++ return PTR_ERR(dev->rst); ++ ++ reset_control_deassert(dev->rst); ++ ++ t = &dev->timings; ++ i2c_parse_fw_timings(&pdev->dev, t, false); ++ ++ i2c_lrw_adjust_bus_speed(dev); ++ ++ if (pdev->dev.of_node) ++ i2c_lrw_of_configure(pdev); ++ ++ if (has_acpi_companion(&pdev->dev)) ++ i2c_lrw_acpi_configure(&pdev->dev); ++ else ++ i2c_lrw_dt_configure(&pdev->dev); ++ ++ ret = i2c_lrw_validate_speed(dev); ++ if (ret) ++ goto exit_reset; ++ ++ ret = i2c_lrw_probe_lock_support(dev); ++ if (ret) ++ goto exit_reset; ++ ++ i2c_lrw_configure(dev); ++ ++ dev->pclk = devm_clk_get_optional(&pdev->dev, "pclk"); ++ if (IS_ERR(dev->pclk)) { ++ ret = PTR_ERR(dev->pclk); ++ goto exit_reset; ++ } ++ ++ dev->clk = devm_clk_get_optional(&pdev->dev, NULL); ++ if (IS_ERR(dev->clk)) { ++ ret = PTR_ERR(dev->clk); ++ goto exit_reset; ++ } ++ ++ ret = i2c_lrw_prepare_clk(dev, true); ++ if (ret) ++ goto exit_reset; ++ ++ if (dev->clk) { ++ u64 clk_khz; ++ ++ dev->get_clk_rate_khz = i2c_lrw_get_clk_rate_khz; ++ clk_khz = dev->get_clk_rate_khz(dev); ++ if (!dev->sda_hold_time && t->sda_hold_ns) { ++ dev->sda_hold_time = div_u64( ++ clk_khz * t->sda_hold_ns + 500000, 1000000); ++ } ++ } ++ ++ adap = &dev->adapter; ++ adap->owner = THIS_MODULE; ++ adap->class = dmi_check_system(i2c_lrw_hwmon_class_dmi) ? ++ I2C_CLASS_HWMON : ++ I2C_CLASS_DEPRECATED; ++ ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); ++ adap->dev.of_node = pdev->dev.of_node; ++ adap->nr = -1; ++ ++ ret = i2c_lrw_probe(dev); ++ if (ret) ++ goto exit_reset; ++ ++ return ret; ++ ++exit_reset: ++ reset_control_assert(dev->rst); ++ return ret; ++} ++ ++static void i2c_lrw_plat_remove(struct platform_device *pdev) ++{ ++ struct i2c_lrw_dev *dev = platform_get_drvdata(pdev); ++ ++ i2c_del_adapter(&dev->adapter); ++ ++ dev->disable(dev); ++ reset_control_assert(dev->rst); ++} ++ ++MODULE_ALIAS("platform:i2c_lrw"); ++ ++static struct platform_driver i2c_lrw_driver = { ++ .probe = i2c_lrw_plat_probe, ++ .remove_new = i2c_lrw_plat_remove, ++ .driver = { ++ .name = "i2c_lrw", ++ .of_match_table = of_match_ptr(i2c_lrw_of_match), ++ .acpi_match_table = ACPI_PTR(i2c_lrw_acpi_match), ++ }, ++}; ++ ++static int __init i2c_lrw_init_driver(void) ++{ ++ return platform_driver_register(&i2c_lrw_driver); ++} ++subsys_initcall(i2c_lrw_init_driver); ++ ++static void __exit i2c_lrw_exit_driver(void) ++{ ++ platform_driver_unregister(&i2c_lrw_driver); ++} ++module_exit(i2c_lrw_exit_driver); ++ ++MODULE_AUTHOR("HXW"); ++MODULE_DESCRIPTION("LRW I2C bus adapter"); ++MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-spacemit-k1.c b/drivers/i2c/busses/i2c-spacemit-k1.c new file mode 100644 index 000000000000..dd10c9ff70cc @@ -379186,7 +388700,7 @@ index 000000000000..dd10c9ff70cc +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-spacemit-k1.h b/drivers/i2c/busses/i2c-spacemit-k1.h new file mode 100644 -index 000000000000..50397f8058b0 +index 000000000000..15a59ec0f9b9 --- /dev/null +++ b/drivers/i2c/busses/i2c-spacemit-k1.h @@ -0,0 +1,225 @@ @@ -379198,8 +388712,8 @@ index 000000000000..50397f8058b0 + * + */ + -+#ifndef I2C_SPACEMIT_K1X_H -+#define I2C_SPACEMIT_K1X_H ++#ifndef I2C_SPACEMIT_K1_H ++#define I2C_SPACEMIT_K1_H +#include +#include +#include @@ -379414,7 +388928,7 @@ index 000000000000..50397f8058b0 + u32 apb_clock; +}; + -+#endif /* I2C_SPACEMIT_K1X_H */ ++#endif /* I2C_SPACEMIT_K1_H */ diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 6dee3b686eff..43c0bfab199a 100644 --- a/drivers/iio/adc/Kconfig @@ -380532,6 +390046,25 @@ index 000000000000..8a4f21e1cb17 + /* lock to protect against multiple access to the device */ + struct mutex mlock; +}; +diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c +index 84e0f41e7dfa..f948b76f984d 100644 +--- a/drivers/infiniband/hw/usnic/usnic_uiom.c ++++ b/drivers/infiniband/hw/usnic/usnic_uiom.c +@@ -443,11 +443,11 @@ struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev) + if (!pd) + return ERR_PTR(-ENOMEM); + +- pd->domain = domain = iommu_domain_alloc(dev->bus); +- if (!domain) { ++ pd->domain = domain = iommu_paging_domain_alloc(dev); ++ if (IS_ERR(domain)) { + usnic_err("Failed to allocate IOMMU domain"); + kfree(pd); +- return ERR_PTR(-ENOMEM); ++ return ERR_CAST(domain); + } + + iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 6ba984d7f0b1..64fb6e48d748 100644 --- a/drivers/input/misc/Kconfig @@ -380779,9 +390312,18 @@ index 000000000000..cfc44b7901ae +MODULE_DESCRIPTION("Spacemit P1 Power Key driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig -index 567f15f5b842..30dc1c332a7d 100644 +index 567f15f5b842..cc427017551b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig +@@ -150,7 +150,7 @@ config OF_IOMMU + + # IOMMU-agnostic DMA-mapping layer + config IOMMU_DMA +- def_bool ARM64 || IA64 || X86 || S390 || LOONGARCH ++ def_bool ARM64 || IA64 || X86 || S390 || LOONGARCH || RISCV + select DMA_OPS + select IOMMU_API + select IOMMU_IOVA @@ -205,6 +205,7 @@ source "drivers/iommu/intel/Kconfig" source "drivers/iommu/iommufd/Kconfig" source "drivers/iommu/sw64/Kconfig" @@ -380816,10 +390358,10 @@ index ef3ee95706da..eb1e62cd499a 100644 struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev); struct platform_device *iommu_pdev = of_find_device_by_node(args->np); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c -index 3628c83763e2..a0c5ab209471 100644 +index c0222c4d54a8..0bfebd94258e 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c -@@ -3913,7 +3913,8 @@ static int arm_smmu_clear_dirty_log(struct iommu_domain *domain, +@@ -3924,7 +3924,8 @@ static int arm_smmu_clear_dirty_log(struct iommu_domain *domain, } #endif @@ -380829,7 +390371,7 @@ index 3628c83763e2..a0c5ab209471 100644 { return iommu_fwspec_add_ids(dev, args->args, 1); } -@@ -4460,7 +4461,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) +@@ -4471,7 +4472,8 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) static void arm_smmu_free_msis(void *data) { struct device *dev = data; @@ -380839,7 +390381,7 @@ index 3628c83763e2..a0c5ab209471 100644 } static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) -@@ -4517,7 +4519,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) +@@ -4528,7 +4530,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) } /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */ @@ -380849,10 +390391,28 @@ index 3628c83763e2..a0c5ab209471 100644 dev_warn(dev, "failed to allocate MSIs - falling back to wired irqs\n"); return; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c -index 4598ac7aee81..b101318246d6 100644 +index 4598ac7aee81..cee6088b4afd 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c -@@ -1565,7 +1565,8 @@ static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain, +@@ -183,8 +183,7 @@ static int arm_smmu_register_legacy_master(struct device *dev, + it.cur_count = 1; + } + +- err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, +- &arm_smmu_ops); ++ err = iommu_fwspec_init(dev, NULL); + if (err) + return err; + +@@ -1471,7 +1470,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) + out_cfg_free: + kfree(cfg); + out_free: +- iommu_fwspec_free(dev); + return ERR_PTR(ret); + } + +@@ -1565,7 +1563,8 @@ static int arm_smmu_set_pgtable_quirks(struct iommu_domain *domain, return ret; } @@ -380946,7 +390506,7 @@ index d587bcec3198..2a5fd7b0d785 100644 iommu->qi = NULL; return -ENOMEM; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c -index bb23fc0f4832..124e2dc151f5 100644 +index b1415d2f00a9..8c96ca59f650 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -27,6 +27,7 @@ @@ -381110,10 +390670,10 @@ index bb23fc0f4832..124e2dc151f5 100644 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h -index eaef932ad7c9..5307e144ca12 100644 +index bfce7480ee09..9e566a2f22df 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h -@@ -1076,8 +1076,6 @@ void domain_update_iommu_cap(struct dmar_domain *domain); +@@ -1077,8 +1077,6 @@ void domain_update_iommu_cap(struct dmar_domain *domain); int dmar_ir_support(void); @@ -381463,19 +391023,297 @@ index 000000000000..5a222d0ad25c +} + +#endif /* __IOMMU_PAGES_H */ +diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h +index f163aa0129f4..43bc0d6ac4f0 100644 +--- a/drivers/iommu/iommu-priv.h ++++ b/drivers/iommu/iommu-priv.h +@@ -17,9 +17,20 @@ static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) + return dev->iommu->iommu_dev->ops; + } + ++void dev_iommu_free(struct device *dev); ++ ++const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); ++ ++static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec) ++{ ++ return iommu_ops_from_fwnode(fwspec ? fwspec->iommu_fwnode : NULL); ++} ++ + int iommu_group_replace_domain(struct iommu_group *group, + struct iommu_domain *new_domain); + ++void iommu_fwspec_free(struct device *dev); ++ + int iommu_device_register_bus(struct iommu_device *iommu, + const struct iommu_ops *ops, struct bus_type *bus, + struct notifier_block *nb); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c -index 4d76e3a6afaa..99a096b52390 100644 +index 4d76e3a6afaa..00d68f37dd8a 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c -@@ -3104,7 +3104,7 @@ void iommu_fwspec_free(struct device *dev) +@@ -355,7 +355,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) + return param; + } + +-static void dev_iommu_free(struct device *dev) ++void dev_iommu_free(struct device *dev) + { + struct dev_iommu *param = dev->iommu; + +@@ -416,14 +416,40 @@ EXPORT_SYMBOL_GPL(dev_iommu_priv_set); + * Init the dev->iommu and dev->iommu_group in the struct device and get the + * driver probed + */ +-static int iommu_init_device(struct device *dev, const struct iommu_ops *ops) ++static int iommu_init_device(struct device *dev) + { ++ const struct iommu_ops *ops; + struct iommu_device *iommu_dev; + struct iommu_group *group; + int ret; + + if (!dev_iommu_get(dev)) + return -ENOMEM; ++ /* ++ * For FDT-based systems and ACPI IORT/VIOT, the common firmware parsing ++ * is buried in the bus dma_configure path. Properly unpicking that is ++ * still a big job, so for now just invoke the whole thing. The device ++ * already having a driver bound means dma_configure has already run and ++ * either found no IOMMU to wait for, or we're in its replay call right ++ * now, so either way there's no point calling it again. ++ */ ++ if (!dev->driver && dev->bus->dma_configure) { ++ mutex_unlock(&iommu_probe_device_lock); ++ dev->bus->dma_configure(dev); ++ mutex_lock(&iommu_probe_device_lock); ++ } ++ /* ++ * At this point, relevant devices either now have a fwspec which will ++ * match ops registered with a non-NULL fwnode, or we can reasonably ++ * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can ++ * be present, and that any of their registered instances has suitable ++ * ops for probing, and thus cheekily co-opt the same mechanism. ++ */ ++ ops = iommu_fwspec_ops(dev->iommu->fwspec); ++ if (!ops) { ++ ret = -ENODEV; ++ goto err_free; ++ } + + if (!try_module_get(ops->owner)) { + ret = -EINVAL; +@@ -527,28 +553,10 @@ DEFINE_MUTEX(iommu_probe_device_lock); + + static int __iommu_probe_device(struct device *dev, struct list_head *group_list) + { +- const struct iommu_ops *ops; +- struct iommu_fwspec *fwspec; + struct iommu_group *group; + struct group_device *gdev; + int ret; + +- /* +- * For FDT-based systems and ACPI IORT/VIOT, drivers register IOMMU +- * instances with non-NULL fwnodes, and client devices should have been +- * identified with a fwspec by this point. Otherwise, we can currently +- * assume that only one of Intel, AMD, s390, PAMU or legacy SMMUv2 can +- * be present, and that any of their registered instances has suitable +- * ops for probing, and thus cheekily co-opt the same mechanism. +- */ +- fwspec = dev_iommu_fwspec_get(dev); +- if (fwspec && fwspec->ops) +- ops = fwspec->ops; +- else +- ops = iommu_ops_from_fwnode(NULL); +- +- if (!ops) +- return -ENODEV; + /* + * Serialise to avoid races between IOMMU drivers registering in + * parallel and/or the "replay" calls from ACPI/OF code via client +@@ -562,9 +570,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list + if (dev->iommu_group) + return 0; + +- ret = iommu_init_device(dev, ops); ++ ret = iommu_init_device(dev); + if (ret) + return ret; ++ /* ++ * And if we do now see any replay calls, they would indicate someone ++ * misusing the dma_configure path outside bus code. ++ */ ++ if (dev->driver) ++ dev_WARN(dev, "late IOMMU probe at driver bind, something fishy here!\n"); + + group = dev->iommu_group; + gdev = iommu_group_alloc_device(group, dev); +@@ -1912,31 +1926,6 @@ int bus_iommu_probe(const struct bus_type *bus) + return 0; + } + +-/** +- * iommu_present() - make platform-specific assumptions about an IOMMU +- * @bus: bus to check +- * +- * Do not use this function. You want device_iommu_mapped() instead. +- * +- * Return: true if some IOMMU is present and aware of devices on the given bus; +- * in general it may not be the only IOMMU, and it may not have anything to do +- * with whatever device you are ultimately interested in. +- */ +-bool iommu_present(const struct bus_type *bus) +-{ +- bool ret = false; +- +- for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { +- if (iommu_buses[i] == bus) { +- spin_lock(&iommu_device_lock); +- ret = !list_empty(&iommu_device_list); +- spin_unlock(&iommu_device_lock); +- } +- } +- return ret; +-} +-EXPORT_SYMBOL_GPL(iommu_present); +- + /** + * device_iommu_capable() - check for a general IOMMU capability + * @dev: device to which the capability would be relevant, if available +@@ -2068,37 +2057,21 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type) + return __iommu_domain_alloc(dev_iommu_ops(dev), dev, type); } - EXPORT_SYMBOL_GPL(iommu_fwspec_free); + +-static int __iommu_domain_alloc_dev(struct device *dev, void *data) ++/** ++ * iommu_paging_domain_alloc() - Allocate a paging domain ++ * @dev: device for which the domain is allocated ++ * ++ * Allocate a paging domain which will be managed by a kernel driver. Return ++ * allocated domain if successful, or a ERR pointer for failure. ++ */ ++struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) + { +- const struct iommu_ops **ops = data; +- + if (!dev_has_iommu(dev)) +- return 0; +- +- if (WARN_ONCE(*ops && *ops != dev_iommu_ops(dev), +- "Multiple IOMMU drivers present for bus %s, which the public IOMMU API can't fully support yet. You will still need to disable one or more for this to work, sorry!\n", +- dev_bus_name(dev))) +- return -EBUSY; +- +- *ops = dev_iommu_ops(dev); +- return 0; +-} ++ return ERR_PTR(-ENODEV); + +-struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) +-{ +- const struct iommu_ops *ops = NULL; +- int err = bus_for_each_dev(bus, NULL, &ops, __iommu_domain_alloc_dev); +- struct iommu_domain *domain; +- +- if (err || !ops) +- return NULL; +- +- domain = __iommu_domain_alloc(ops, NULL, IOMMU_DOMAIN_UNMANAGED); +- if (IS_ERR(domain)) +- return NULL; +- return domain; ++ return __iommu_domain_alloc(dev_iommu_ops(dev), dev, IOMMU_DOMAIN_UNMANAGED); + } +-EXPORT_SYMBOL_GPL(iommu_domain_alloc); ++EXPORT_SYMBOL_GPL(iommu_paging_domain_alloc); + + void iommu_domain_free(struct iommu_domain *domain) + { +@@ -3068,13 +3041,16 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) + return ops; + } + +-int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, +- const struct iommu_ops *ops) ++int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode) + { ++ const struct iommu_ops *ops = iommu_ops_from_fwnode(iommu_fwnode); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + ++ if (!ops) ++ return -EPROBE_DEFER; ++ + if (fwspec) +- return ops == fwspec->ops ? 0 : -EINVAL; ++ return ops == iommu_fwspec_ops(fwspec) ? 0 : -EINVAL; + + if (!dev_iommu_get(dev)) + return -ENOMEM; +@@ -3084,9 +3060,8 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, + if (!fwspec) + return -ENOMEM; + +- of_node_get(to_of_node(iommu_fwnode)); ++ fwnode_handle_get(iommu_fwnode); + fwspec->iommu_fwnode = iommu_fwnode; +- fwspec->ops = ops; + dev_iommu_fwspec_set(dev, fwspec); + return 0; + } +@@ -3102,9 +3077,8 @@ void iommu_fwspec_free(struct device *dev) + dev_iommu_fwspec_set(dev, NULL); + } + } +-EXPORT_SYMBOL_GPL(iommu_fwspec_free); -int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) +int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); int i, new_num; +@@ -3369,6 +3343,11 @@ int iommu_device_use_default_domain(struct device *dev) + return 0; + + mutex_lock(&group->mutex); ++ /* We may race against bus_iommu_probe() finalising groups here */ ++ if (!group->default_domain) { ++ ret = -EPROBE_DEFER; ++ goto unlock_out; ++ } + if (group->owner_cnt) { + if (group->owner || !iommu_is_default_domain(group) || + !xa_empty(&group->pasid_array)) { +@@ -3466,6 +3445,11 @@ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) + return -EINVAL; + + mutex_lock(&group->mutex); ++ /* We may race against bus_iommu_probe() finalising groups here */ ++ if (!group->default_domain) { ++ ret = -EPROBE_DEFER; ++ goto unlock_out; ++ } + if (group->owner_cnt) { + ret = -EPERM; + goto unlock_out; +diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c +index 0a5a379a19ac..e6185090ee09 100644 +--- a/drivers/iommu/iommufd/hw_pagetable.c ++++ b/drivers/iommu/iommufd/hw_pagetable.c +@@ -158,9 +158,10 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, + } + hwpt->domain->owner = ops; + } else { +- hwpt->domain = iommu_domain_alloc(idev->dev->bus); +- if (!hwpt->domain) { +- rc = -ENOMEM; ++ hwpt->domain = iommu_paging_domain_alloc(idev->dev); ++ if (IS_ERR(hwpt->domain)) { ++ rc = PTR_ERR(hwpt->domain); ++ hwpt->domain = NULL; + goto out_abort; + } + } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ace1fc4bd34b..cd7219319c8b 100644 --- a/drivers/iommu/ipmmu-vmsa.c @@ -381521,7 +391359,7 @@ index f86af9815d6f..989e0869d805 100644 struct msm_iommu_dev *iommu = NULL, *iter; unsigned long flags; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c -index 51d0eba8cbdf..358e8ee9506c 100644 +index 6948ed8fc6e1..add2eaaaa7ee 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -957,7 +957,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev) @@ -381535,7 +391373,7 @@ index 51d0eba8cbdf..358e8ee9506c 100644 struct platform_device *m4updev; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c -index 32cc8341d372..0ddcd153b568 100644 +index 32cc8341d372..571f8cbbe096 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -398,7 +398,8 @@ static const struct iommu_ops mtk_iommu_v1_ops; @@ -381548,6 +391386,147 @@ index 32cc8341d372..0ddcd153b568 100644 { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct mtk_iommu_v1_data *data; +@@ -451,22 +452,13 @@ static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_arg + + static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev) + { +- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); ++ struct iommu_fwspec *fwspec = NULL; + struct of_phandle_args iommu_spec; + struct mtk_iommu_v1_data *data; + int err, idx = 0, larbid, larbidx; + struct device_link *link; + struct device *larbdev; + +- /* +- * In the deferred case, free the existed fwspec. +- * Always initialize the fwspec internally. +- */ +- if (fwspec) { +- iommu_fwspec_free(dev); +- fwspec = dev_iommu_fwspec_get(dev); +- } +- + while (!of_parse_phandle_with_args(dev->of_node, "iommus", + "#iommu-cells", + idx, &iommu_spec)) { +@@ -481,6 +473,9 @@ static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev) + idx++; + } + ++ if (!fwspec) ++ return ERR_PTR(-ENODEV); ++ + data = dev_iommu_priv_get(dev); + + /* Link the consumer device with the smi-larb device(supplier) */ +diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c +index 719652b60840..98dc69d5daed 100644 +--- a/drivers/iommu/of_iommu.c ++++ b/drivers/iommu/of_iommu.c +@@ -17,30 +17,25 @@ + #include + #include + ++#include "iommu-priv.h" ++ + static int of_iommu_xlate(struct device *dev, + struct of_phandle_args *iommu_spec) + { + const struct iommu_ops *ops; +- struct fwnode_handle *fwnode = &iommu_spec->np->fwnode; + int ret; + +- ops = iommu_ops_from_fwnode(fwnode); +- if ((ops && !ops->of_xlate) || +- !of_device_is_available(iommu_spec->np)) ++ if (!of_device_is_available(iommu_spec->np)) + return -ENODEV; + +- ret = iommu_fwspec_init(dev, &iommu_spec->np->fwnode, ops); ++ ret = iommu_fwspec_init(dev, of_fwnode_handle(iommu_spec->np)); ++ if (ret == -EPROBE_DEFER) ++ return driver_deferred_probe_check_state(dev); + if (ret) + return ret; +- /* +- * The otherwise-empty fwspec handily serves to indicate the specific +- * IOMMU device we're waiting for, which will be useful if we ever get +- * a proper probe-ordering dependency mechanism in future. +- */ +- if (!ops) +- return driver_deferred_probe_check_state(dev); + +- if (!try_module_get(ops->owner)) ++ ops = iommu_ops_from_fwnode(&iommu_spec->np->fwnode); ++ if (!ops->of_xlate || !try_module_get(ops->owner)) + return -ENODEV; + + ret = ops->of_xlate(dev, iommu_spec); +@@ -115,7 +110,7 @@ static int of_iommu_configure_device(struct device_node *master_np, + int of_iommu_configure(struct device *dev, struct device_node *master_np, + const u32 *id) + { +- struct iommu_fwspec *fwspec; ++ bool dev_iommu_present; + int err; + + if (!master_np) +@@ -123,15 +118,11 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np, + + /* Serialise to make dev->iommu stable under our potential fwspec */ + mutex_lock(&iommu_probe_device_lock); +- fwspec = dev_iommu_fwspec_get(dev); +- if (fwspec) { +- if (fwspec->ops) { +- mutex_unlock(&iommu_probe_device_lock); +- return 0; +- } +- /* In the deferred case, start again from scratch */ +- iommu_fwspec_free(dev); ++ if (dev_iommu_fwspec_get(dev)) { ++ mutex_unlock(&iommu_probe_device_lock); ++ return 0; + } ++ dev_iommu_present = dev->iommu; + + /* + * We don't currently walk up the tree looking for a parent IOMMU. +@@ -150,20 +141,24 @@ int of_iommu_configure(struct device *dev, struct device_node *master_np, + } else { + err = of_iommu_configure_device(master_np, dev, id); + } ++ ++ if (err && dev_iommu_present) ++ iommu_fwspec_free(dev); ++ else if (err && dev->iommu) ++ dev_iommu_free(dev); + mutex_unlock(&iommu_probe_device_lock); + +- if (err == -ENODEV || err == -EPROBE_DEFER) +- return err; +- if (err) +- goto err_log; ++ /* ++ * If we're not on the iommu_probe_device() path (as indicated by the ++ * initial dev->iommu) then try to simulate it. This should no longer ++ * happen unless of_dma_configure() is being misused outside bus code. ++ */ ++ if (!err && dev->bus && !dev_iommu_present) ++ err = iommu_probe_device(dev); + +- err = iommu_probe_device(dev); +- if (err) +- goto err_log; +- return 0; ++ if (err && err != -EPROBE_DEFER) ++ dev_dbg(dev, "Adding to IOMMU failed: %d\n", err); + +-err_log: +- dev_dbg(dev, "Adding to IOMMU failed: %pe\n", ERR_PTR(err)); + return err; + } + diff --git a/drivers/iommu/riscv/Kconfig b/drivers/iommu/riscv/Kconfig new file mode 100644 index 000000000000..c071816f59a6 @@ -381576,19 +391555,19 @@ index 000000000000..c071816f59a6 + Support for the PCIe implementation of RISC-V IOMMU architecture. diff --git a/drivers/iommu/riscv/Makefile b/drivers/iommu/riscv/Makefile new file mode 100644 -index 000000000000..f54c9ed17d41 +index 000000000000..8420dd1776cb --- /dev/null +++ b/drivers/iommu/riscv/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only -+obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-platform.o ++obj-$(CONFIG_RISCV_IOMMU) += iommu.o iommu-ir.o iommu-platform.o +obj-$(CONFIG_RISCV_IOMMU_PCI) += iommu-pci.o diff --git a/drivers/iommu/riscv/iommu-bits.h b/drivers/iommu/riscv/iommu-bits.h new file mode 100644 -index 000000000000..98daf0e1a306 +index 000000000000..d3d98dbed709 --- /dev/null +++ b/drivers/iommu/riscv/iommu-bits.h -@@ -0,0 +1,784 @@ +@@ -0,0 +1,795 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2022-2024 Rivos Inc. @@ -381627,6 +391606,10 @@ index 000000000000..98daf0e1a306 +#define RISCV_IOMMU_ATP_PPN_FIELD GENMASK_ULL(43, 0) +#define RISCV_IOMMU_ATP_MODE_FIELD GENMASK_ULL(63, 60) + ++/* RISC-V IOMMU PPN <> PHYS address conversions, PHYS <=> PPN[53:10] */ ++#define riscv_iommu_phys_to_ppn(pa) (((pa) >> 2) & (((1ULL << 44) - 1) << 10)) ++#define riscv_iommu_ppn_to_phys(pn) (((pn) << 2) & (((1ULL << 44) - 1) << 12)) ++ +/* 5.3 IOMMU Capabilities (64bits) */ +#define RISCV_IOMMU_REG_CAPABILITIES 0x0000 +#define RISCV_IOMMU_CAPABILITIES_VERSION GENMASK_ULL(7, 0) @@ -382306,6 +392289,13 @@ index 000000000000..98daf0e1a306 + cmd->dword1 = 0; +} + ++static inline void riscv_iommu_cmd_inval_gvma(struct riscv_iommu_command *cmd) ++{ ++ cmd->dword0 = FIELD_PREP(RISCV_IOMMU_CMD_OPCODE, RISCV_IOMMU_CMD_IOTINVAL_OPCODE) | ++ FIELD_PREP(RISCV_IOMMU_CMD_FUNC, RISCV_IOMMU_CMD_IOTINVAL_FUNC_GVMA); ++ cmd->dword1 = 0; ++} ++ +static inline void riscv_iommu_cmd_inval_set_addr(struct riscv_iommu_command *cmd, + u64 addr) +{ @@ -382373,12 +392363,715 @@ index 000000000000..98daf0e1a306 +} + +#endif /* _RISCV_IOMMU_BITS_H_ */ +diff --git a/drivers/iommu/riscv/iommu-ir.c b/drivers/iommu/riscv/iommu-ir.c +new file mode 100644 +index 000000000000..cb3411f7dca7 +--- /dev/null ++++ b/drivers/iommu/riscv/iommu-ir.c +@@ -0,0 +1,697 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * IOMMU Interrupt Remapping ++ * ++ * Copyright © 2025 Ventana Micro Systems Inc. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "../iommu-pages.h" ++#include "iommu.h" ++ ++static size_t riscv_iommu_ir_group_size(struct riscv_iommu_domain *domain) ++{ ++ phys_addr_t mask = domain->msi_addr_mask; ++ ++ if (domain->group_index_bits) { ++ phys_addr_t group_mask = BIT(domain->group_index_bits) - 1; ++ phys_addr_t group_shift = domain->group_index_shift - 12; ++ ++ mask &= ~(group_mask << group_shift); ++ } ++ ++ return (mask + 1) << 12; ++} ++ ++static int riscv_iommu_ir_map_unmap_imsics(struct riscv_iommu_domain *domain, bool map, ++ gfp_t gfp, size_t *unmapped) ++{ ++ phys_addr_t base = domain->msi_addr_pattern << 12, addr; ++ size_t stride = domain->imsic_stride, map_size = SZ_4K, size; ++ size_t i, j; ++ ++ size = riscv_iommu_ir_group_size(domain); ++ ++ if (stride == SZ_4K) ++ stride = map_size = size; ++ ++ for (i = 0; i < BIT(domain->group_index_bits); i++) { ++ for (j = 0; j < size; j += stride) { ++ addr = (base + j) | (i << domain->group_index_shift); ++ if (map) { ++ int ret = iommu_map(&domain->domain, addr, addr, map_size, ++ IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO, gfp); ++ if (ret) ++ return ret; ++ } else { ++ *unmapped += iommu_unmap(&domain->domain, addr, map_size); ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++static size_t riscv_iommu_ir_unmap_imsics(struct riscv_iommu_domain *domain) ++{ ++ size_t unmapped = 0; ++ ++ riscv_iommu_ir_map_unmap_imsics(domain, false, 0, &unmapped); ++ ++ return unmapped; ++} ++ ++static int riscv_iommu_ir_map_imsics(struct riscv_iommu_domain *domain, gfp_t gfp) ++{ ++ int ret; ++ ++ ret = riscv_iommu_ir_map_unmap_imsics(domain, true, gfp, NULL); ++ if (ret) ++ riscv_iommu_ir_unmap_imsics(domain); ++ ++ return ret; ++} ++ ++static size_t riscv_iommu_ir_compute_msipte_idx(struct riscv_iommu_domain *domain, ++ phys_addr_t msi_pa) ++{ ++ phys_addr_t mask = domain->msi_addr_mask; ++ phys_addr_t addr = msi_pa >> 12; ++ size_t idx; ++ ++ if (domain->group_index_bits) { ++ phys_addr_t group_mask = BIT(domain->group_index_bits) - 1; ++ phys_addr_t group_shift = domain->group_index_shift - 12; ++ phys_addr_t group = (addr >> group_shift) & group_mask; ++ ++ mask &= ~(group_mask << group_shift); ++ idx = addr & mask; ++ idx |= group << fls64(mask); ++ } else { ++ idx = addr & mask; ++ } ++ ++ return idx; ++} ++ ++static size_t riscv_iommu_ir_nr_msiptes(struct riscv_iommu_domain *domain) ++{ ++ phys_addr_t base = domain->msi_addr_pattern << 12; ++ phys_addr_t max_addr = base | (domain->msi_addr_mask << 12); ++ size_t max_idx = riscv_iommu_ir_compute_msipte_idx(domain, max_addr); ++ ++ return max_idx + 1; ++} ++ ++static void riscv_iommu_ir_set_pte(struct riscv_iommu_msipte *pte, u64 addr) ++{ ++ pte->pte = FIELD_PREP(RISCV_IOMMU_MSIPTE_M, 3) | ++ riscv_iommu_phys_to_ppn(addr) | ++ FIELD_PREP(RISCV_IOMMU_MSIPTE_V, 1); ++ pte->mrif_info = 0; ++} ++ ++static void riscv_iommu_ir_clear_pte(struct riscv_iommu_msipte *pte) ++{ ++ pte->pte = 0; ++ pte->mrif_info = 0; ++} ++ ++static void riscv_iommu_ir_msitbl_inval(struct riscv_iommu_domain *domain, ++ struct riscv_iommu_msipte *pte) ++{ ++ struct riscv_iommu_bond *bond; ++ struct riscv_iommu_device *iommu, *prev; ++ struct riscv_iommu_command cmd; ++ ++ riscv_iommu_cmd_inval_gvma(&cmd); ++ riscv_iommu_cmd_inval_set_gscid(&cmd, 0); ++ ++ if (pte) { ++ u64 addr = pfn_to_phys(FIELD_GET(RISCV_IOMMU_MSIPTE_PPN, pte->pte)); ++ riscv_iommu_cmd_inval_set_addr(&cmd, addr); ++ } ++ ++ /* Like riscv_iommu_iotlb_inval(), synchronize with riscv_iommu_bond_link() */ ++ smp_mb(); ++ ++ rcu_read_lock(); ++ ++ prev = NULL; ++ list_for_each_entry_rcu(bond, &domain->bonds, list) { ++ iommu = dev_to_iommu(bond->dev); ++ if (iommu == prev) ++ continue; ++ ++ riscv_iommu_cmd_send(iommu, &cmd); ++ prev = iommu; ++ } ++ ++ prev = NULL; ++ list_for_each_entry_rcu(bond, &domain->bonds, list) { ++ iommu = dev_to_iommu(bond->dev); ++ if (iommu == prev) ++ continue; ++ ++ riscv_iommu_cmd_sync(iommu, RISCV_IOMMU_IOTINVAL_TIMEOUT); ++ prev = iommu; ++ } ++ ++ rcu_read_unlock(); ++} ++ ++static void riscv_iommu_ir_msitbl_clear(struct riscv_iommu_domain *domain) ++{ ++ for (size_t i = 0; i < riscv_iommu_ir_nr_msiptes(domain); i++) { ++ riscv_iommu_ir_clear_pte(&domain->msi_root[i]); ++ refcount_set(&domain->msi_pte_counts[i], 0); ++ } ++} ++ ++static void riscv_iommu_ir_msiptp_update(struct riscv_iommu_domain *domain) ++{ ++ struct riscv_iommu_bond *bond; ++ struct riscv_iommu_device *iommu, *prev; ++ struct riscv_iommu_dc new_dc = { ++ .ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) | ++ RISCV_IOMMU_PC_TA_V, ++ .fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) | ++ FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root)), ++ .msiptp = virt_to_pfn(domain->msi_root) | ++ FIELD_PREP(RISCV_IOMMU_DC_MSIPTP_MODE, ++ RISCV_IOMMU_DC_MSIPTP_MODE_FLAT), ++ .msi_addr_mask = domain->msi_addr_mask, ++ .msi_addr_pattern = domain->msi_addr_pattern, ++ }; ++ ++ /* Like riscv_iommu_ir_msitbl_inval(), synchronize with riscv_iommu_bond_link() */ ++ smp_mb(); ++ ++ rcu_read_lock(); ++ ++ prev = NULL; ++ list_for_each_entry_rcu(bond, &domain->bonds, list) { ++ iommu = dev_to_iommu(bond->dev); ++ if (iommu == prev) ++ continue; ++ ++ riscv_iommu_iodir_update(iommu, bond->dev, &new_dc); ++ prev = iommu; ++ } ++ ++ rcu_read_unlock(); ++} ++ ++struct riscv_iommu_ir_chip_data { ++ size_t idx; ++ u32 config; ++}; ++ ++static size_t riscv_iommu_ir_irq_msitbl_idx(struct irq_data *data) ++{ ++ struct riscv_iommu_ir_chip_data *chip_data = irq_data_get_irq_chip_data(data); ++ ++ return chip_data->idx; ++} ++ ++static u32 riscv_iommu_ir_irq_msitbl_config(struct irq_data *data) ++{ ++ struct riscv_iommu_ir_chip_data *chip_data = irq_data_get_irq_chip_data(data); ++ ++ return chip_data->config; ++} ++ ++static void riscv_iommu_ir_irq_set_msitbl_info(struct irq_data *data, ++ size_t idx, u32 config) ++{ ++ struct riscv_iommu_ir_chip_data *chip_data = irq_data_get_irq_chip_data(data); ++ ++ chip_data->idx = idx; ++ chip_data->config = config; ++} ++ ++static void riscv_iommu_ir_msitbl_map(struct riscv_iommu_domain *domain, ++ struct irq_data *data, ++ size_t idx, phys_addr_t addr) ++{ ++ struct riscv_iommu_msipte *pte; ++ ++ riscv_iommu_ir_irq_set_msitbl_info(data, idx, domain->msitbl_config); ++ ++ if (!domain->msi_root) ++ return; ++ ++ if (!refcount_inc_not_zero(&domain->msi_pte_counts[idx])) { ++ scoped_guard(raw_spinlock_irqsave, &domain->msi_lock) { ++ if (refcount_read(&domain->msi_pte_counts[idx]) == 0) { ++ pte = &domain->msi_root[idx]; ++ riscv_iommu_ir_set_pte(pte, addr); ++ riscv_iommu_ir_msitbl_inval(domain, pte); ++ refcount_set(&domain->msi_pte_counts[idx], 1); ++ } else { ++ refcount_inc(&domain->msi_pte_counts[idx]); ++ } ++ } ++ } ++} ++ ++static void riscv_iommu_ir_msitbl_unmap(struct riscv_iommu_domain *domain, ++ struct irq_data *data, size_t idx) ++{ ++ struct riscv_iommu_msipte *pte; ++ u32 config; ++ ++ config = riscv_iommu_ir_irq_msitbl_config(data); ++ riscv_iommu_ir_irq_set_msitbl_info(data, -1, -1); ++ ++ if (WARN_ON_ONCE(config != domain->msitbl_config)) ++ return; ++ ++ if (!domain->msi_root) ++ return; ++ ++ scoped_guard(raw_spinlock_irqsave, &domain->msi_lock) { ++ if (refcount_dec_and_test(&domain->msi_pte_counts[idx])) { ++ pte = &domain->msi_root[idx]; ++ riscv_iommu_ir_clear_pte(pte); ++ riscv_iommu_ir_msitbl_inval(domain, pte); ++ } ++ } ++} ++ ++static size_t riscv_iommu_ir_get_msipte_idx_from_target(struct riscv_iommu_domain *domain, ++ struct irq_data *data, phys_addr_t *addr) ++{ ++ struct msi_msg msg; ++ ++ BUG_ON(irq_chip_compose_msi_msg(data, &msg)); ++ ++ *addr = ((phys_addr_t)msg.address_hi << 32) | msg.address_lo; ++ ++ return riscv_iommu_ir_compute_msipte_idx(domain, *addr); ++} ++ ++static int riscv_iommu_ir_irq_set_affinity(struct irq_data *data, ++ const struct cpumask *dest, bool force) ++{ ++ struct riscv_iommu_info *info = data->domain->host_data; ++ struct riscv_iommu_domain *domain = info->domain; ++ size_t old_idx, new_idx; ++ phys_addr_t new_addr; ++ int ret; ++ ++ old_idx = riscv_iommu_ir_irq_msitbl_idx(data); ++ ++ ret = irq_chip_set_affinity_parent(data, dest, force); ++ if (ret < 0) ++ return ret; ++ ++ new_idx = riscv_iommu_ir_get_msipte_idx_from_target(domain, data, &new_addr); ++ ++ if (new_idx == old_idx) ++ return ret; ++ ++ riscv_iommu_ir_msitbl_unmap(domain, data, old_idx); ++ riscv_iommu_ir_msitbl_map(domain, data, new_idx, new_addr); ++ ++ return ret; ++} ++ ++static bool riscv_iommu_ir_vcpu_check_config(struct riscv_iommu_domain *domain, ++ struct riscv_iommu_ir_vcpu_info *vcpu_info) ++{ ++ return domain->msi_addr_mask == vcpu_info->msi_addr_mask && ++ domain->msi_addr_pattern == vcpu_info->msi_addr_pattern && ++ domain->group_index_bits == vcpu_info->group_index_bits && ++ domain->group_index_shift == vcpu_info->group_index_shift; ++} ++ ++static int riscv_iommu_ir_vcpu_new_config(struct riscv_iommu_domain *domain, ++ struct irq_data *data, ++ struct riscv_iommu_ir_vcpu_info *vcpu_info) ++{ ++ struct riscv_iommu_msipte *pte; ++ size_t idx; ++ int ret; ++ ++ if (domain->pgd_mode) ++ riscv_iommu_ir_unmap_imsics(domain); ++ ++ riscv_iommu_ir_msitbl_clear(domain); ++ ++ domain->msi_addr_mask = vcpu_info->msi_addr_mask; ++ domain->msi_addr_pattern = vcpu_info->msi_addr_pattern; ++ domain->group_index_bits = vcpu_info->group_index_bits; ++ domain->group_index_shift = vcpu_info->group_index_shift; ++ domain->imsic_stride = SZ_4K; ++ domain->msitbl_config += 1; ++ ++ if (domain->pgd_mode) { ++ /* ++ * As in riscv_iommu_ir_irq_domain_create(), we do all stage1 ++ * mappings up front since the MSI table will manage the ++ * translations. ++ * ++ * XXX: Since irq-set-vcpu-affinity is called in atomic context ++ * we need GFP_ATOMIC. If the number of 4K dma pte allocations ++ * is considered too many for GFP_ATOMIC, then we can wrap ++ * riscv_iommu_pte_alloc()'s iommu_alloc_pages_node_sz() call ++ * in a mempool and try to ensure the pool has enough elements ++ * in riscv_iommu_ir_irq_domain_enable_msis(). ++ */ ++ ret = riscv_iommu_ir_map_imsics(domain, GFP_ATOMIC); ++ if (ret) ++ return ret; ++ } ++ ++ idx = riscv_iommu_ir_compute_msipte_idx(domain, vcpu_info->gpa); ++ pte = &domain->msi_root[idx]; ++ riscv_iommu_ir_irq_set_msitbl_info(data, idx, domain->msitbl_config); ++ riscv_iommu_ir_set_pte(pte, vcpu_info->hpa); ++ riscv_iommu_ir_msitbl_inval(domain, NULL); ++ refcount_set(&domain->msi_pte_counts[idx], 1); ++ ++ riscv_iommu_ir_msiptp_update(domain); ++ ++ return 0; ++} ++ ++static int riscv_iommu_ir_irq_set_vcpu_affinity(struct irq_data *data, void *arg) ++{ ++ struct riscv_iommu_info *info = data->domain->host_data; ++ struct riscv_iommu_domain *domain = info->domain; ++ struct riscv_iommu_ir_vcpu_info *vcpu_info = arg; ++ struct riscv_iommu_msipte pteval; ++ struct riscv_iommu_msipte *pte; ++ bool inc = false, dec = false; ++ size_t old_idx, new_idx; ++ u32 old_config; ++ ++ if (!domain->msi_root) ++ return -EOPNOTSUPP; ++ ++ old_idx = riscv_iommu_ir_irq_msitbl_idx(data); ++ old_config = riscv_iommu_ir_irq_msitbl_config(data); ++ ++ if (!vcpu_info) { ++ riscv_iommu_ir_msitbl_unmap(domain, data, old_idx); ++ return 0; ++ } ++ ++ guard(raw_spinlock)(&domain->msi_lock); ++ ++ if (!riscv_iommu_ir_vcpu_check_config(domain, vcpu_info)) ++ return riscv_iommu_ir_vcpu_new_config(domain, data, vcpu_info); ++ ++ new_idx = riscv_iommu_ir_compute_msipte_idx(domain, vcpu_info->gpa); ++ riscv_iommu_ir_irq_set_msitbl_info(data, new_idx, domain->msitbl_config); ++ ++ pte = &domain->msi_root[new_idx]; ++ riscv_iommu_ir_set_pte(&pteval, vcpu_info->hpa); ++ ++ if (pteval.pte != pte->pte) { ++ *pte = pteval; ++ riscv_iommu_ir_msitbl_inval(domain, pte); ++ } ++ ++ if (old_config != domain->msitbl_config) ++ inc = true; ++ else if (new_idx != old_idx) ++ inc = dec = true; ++ ++ if (dec && refcount_dec_and_test(&domain->msi_pte_counts[old_idx])) { ++ pte = &domain->msi_root[old_idx]; ++ riscv_iommu_ir_clear_pte(pte); ++ riscv_iommu_ir_msitbl_inval(domain, pte); ++ } ++ ++ if (inc && !refcount_inc_not_zero(&domain->msi_pte_counts[new_idx])) ++ refcount_set(&domain->msi_pte_counts[new_idx], 1); ++ ++ return 0; ++} ++ ++static struct irq_chip riscv_iommu_ir_irq_chip = { ++ .name = "IOMMU-IR", ++ .irq_ack = irq_chip_ack_parent, ++ .irq_mask = irq_chip_mask_parent, ++ .irq_unmask = irq_chip_unmask_parent, ++ .irq_set_affinity = riscv_iommu_ir_irq_set_affinity, ++ .irq_set_vcpu_affinity = riscv_iommu_ir_irq_set_vcpu_affinity, ++}; ++ ++static int riscv_iommu_ir_irq_domain_alloc_irqs(struct irq_domain *irqdomain, ++ unsigned int irq_base, unsigned int nr_irqs, ++ void *arg) ++{ ++ struct riscv_iommu_info *info = irqdomain->host_data; ++ struct riscv_iommu_domain *domain = info->domain; ++ struct riscv_iommu_ir_chip_data *chip_data; ++ struct irq_data *data; ++ phys_addr_t addr; ++ size_t idx; ++ int i, ret; ++ ++ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL_ACCOUNT); ++ if (!chip_data) ++ return -ENOMEM; ++ ++ ret = irq_domain_alloc_irqs_parent(irqdomain, irq_base, nr_irqs, arg); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ data = irq_domain_get_irq_data(irqdomain, irq_base + i); ++ data->chip = &riscv_iommu_ir_irq_chip; ++ data->chip_data = chip_data; ++ idx = riscv_iommu_ir_get_msipte_idx_from_target(domain, data, &addr); ++ riscv_iommu_ir_msitbl_map(domain, data, idx, addr); ++ } ++ ++ return 0; ++} ++ ++static void riscv_iommu_ir_irq_domain_free_irqs(struct irq_domain *irqdomain, ++ unsigned int irq_base, ++ unsigned int nr_irqs) ++{ ++ struct riscv_iommu_info *info = irqdomain->host_data; ++ struct riscv_iommu_domain *domain = info->domain; ++ struct irq_data *data; ++ u32 config; ++ size_t idx; ++ int i; ++ ++ for (i = 0; i < nr_irqs; i++) { ++ data = irq_domain_get_irq_data(irqdomain, irq_base + i); ++ config = riscv_iommu_ir_irq_msitbl_config(data); ++ /* ++ * Only irqs with matching config versions need to be unmapped here ++ * since config changes will unmap everything and irq-set-vcpu-affinity ++ * irq deletions unmap at deletion time. An example of stale indices that ++ * don't need to be unmapped are those of irqs allocated by VFIO that a ++ * guest driver never used. The config change made for the guest will have ++ * already unmapped those, though, so there's no need to unmap them here. ++ */ ++ if (config == domain->msitbl_config) { ++ idx = riscv_iommu_ir_irq_msitbl_idx(data); ++ riscv_iommu_ir_msitbl_unmap(domain, data, idx); ++ } ++ kfree(data->chip_data); ++ } ++ ++ irq_domain_free_irqs_parent(irqdomain, irq_base, nr_irqs); ++} ++ ++static const struct irq_domain_ops riscv_iommu_ir_irq_domain_ops = { ++ .alloc = riscv_iommu_ir_irq_domain_alloc_irqs, ++ .free = riscv_iommu_ir_irq_domain_free_irqs, ++}; ++ ++static const struct msi_parent_ops riscv_iommu_ir_msi_parent_ops = { ++ .prefix = "IR-", ++ .supported_flags = MSI_GENERIC_FLAGS_MASK | ++ MSI_FLAG_PCI_MSIX, ++ .required_flags = MSI_FLAG_USE_DEF_DOM_OPS | ++ MSI_FLAG_USE_DEF_CHIP_OPS, ++ .init_dev_msi_info = msi_parent_init_dev_msi_info, ++}; ++ ++struct irq_domain *riscv_iommu_ir_irq_domain_create(struct riscv_iommu_device *iommu, ++ struct device *dev, ++ struct riscv_iommu_info *info) ++{ ++ struct irq_domain *irqparent = dev_get_msi_domain(dev); ++ struct irq_domain *irqdomain; ++ struct fwnode_handle *fn; ++ char *fwname; ++ ++ fwname = kasprintf(GFP_KERNEL, "IOMMU-IR-%s", dev_name(dev)); ++ if (!fwname) ++ return NULL; ++ ++ fn = irq_domain_alloc_named_fwnode(fwname); ++ kfree(fwname); ++ if (!fn) { ++ dev_err(iommu->dev, "Couldn't allocate fwnode\n"); ++ return NULL; ++ } ++ ++ irqdomain = irq_domain_create_hierarchy(irqparent, 0, 0, fn, ++ &riscv_iommu_ir_irq_domain_ops, ++ info); ++ if (!irqdomain) { ++ dev_err(iommu->dev, "Failed to create IOMMU irq domain\n"); ++ irq_domain_free_fwnode(fn); ++ return NULL; ++ } ++ ++ if (iommu->caps & RISCV_IOMMU_CAPABILITIES_MSI_FLAT) { ++ /* ++ * NOTE: The RISC-V IOMMU doesn't actually support isolated MSI because ++ * there is no MSI message validation (see the comment above ++ * msi_device_has_isolated_msi()). However, we claim isolated MSI here ++ * because applying the IOMMU ensures MSI messages may only be delivered ++ * to the mapped MSI addresses. This allows MSIs to be isolated to ++ * particular harts/vcpus where the unvalidated MSI messages can be ++ * tolerated. ++ */ ++ irqdomain->flags |= IRQ_DOMAIN_FLAG_ISOLATED_MSI; ++ } ++ ++ irqdomain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT; ++ irqdomain->msi_parent_ops = &riscv_iommu_ir_msi_parent_ops; ++ irq_domain_update_bus_token(irqdomain, DOMAIN_BUS_MSI_REMAP); ++ ++ dev_set_msi_domain(dev, irqdomain); ++ ++ return irqdomain; ++} ++ ++static void riscv_iommu_ir_free_msi_table(struct riscv_iommu_domain *domain) ++{ ++ iommu_free_pages(domain->msi_root, domain->msi_order); ++ kfree(domain->msi_pte_counts); ++} ++ ++void riscv_iommu_ir_irq_domain_remove(struct riscv_iommu_info *info) ++{ ++ struct riscv_iommu_domain *domain = info->domain; ++ struct fwnode_handle *fn; ++ ++ if (!info->irqdomain) ++ return; ++ ++ riscv_iommu_ir_free_msi_table(domain); ++ ++ fn = info->irqdomain->fwnode; ++ irq_domain_remove(info->irqdomain); ++ info->irqdomain = NULL; ++ irq_domain_free_fwnode(fn); ++} ++ ++static int riscv_ir_set_imsic_global_config(struct riscv_iommu_device *iommu, ++ struct riscv_iommu_domain *domain) ++{ ++ const struct imsic_global_config *imsic_global; ++ u64 mask = 0; ++ ++ imsic_global = imsic_get_global_config(); ++ ++ mask |= (BIT(imsic_global->group_index_bits) - 1) << (imsic_global->group_index_shift - 12); ++ mask |= BIT(imsic_global->hart_index_bits + imsic_global->guest_index_bits) - 1; ++ domain->msi_addr_mask = mask; ++ domain->msi_addr_pattern = imsic_global->base_addr >> 12; ++ domain->group_index_bits = imsic_global->group_index_bits; ++ domain->group_index_shift = imsic_global->group_index_shift; ++ domain->imsic_stride = BIT(imsic_global->guest_index_bits + 12); ++ ++ if (iommu->caps & RISCV_IOMMU_CAPABILITIES_MSI_FLAT) { ++ size_t nr_ptes = riscv_iommu_ir_nr_msiptes(domain); ++ ++ domain->msi_order = get_order(nr_ptes * sizeof(*domain->msi_root)); ++ domain->msi_root = iommu_alloc_pages_node(domain->numa_node, GFP_KERNEL_ACCOUNT, ++ domain->msi_order); ++ if (!domain->msi_root) ++ return -ENOMEM; ++ ++ domain->msi_pte_counts = kcalloc(nr_ptes, sizeof(refcount_t), GFP_KERNEL_ACCOUNT); ++ if (!domain->msi_pte_counts) { ++ iommu_free_pages(domain->msi_root, domain->msi_order); ++ return -ENOMEM; ++ } ++ ++ raw_spin_lock_init(&domain->msi_lock); ++ } ++ ++ return 0; ++} ++ ++int riscv_iommu_ir_attach_paging_domain(struct riscv_iommu_domain *domain, ++ struct device *dev) ++{ ++ struct riscv_iommu_device *iommu = dev_to_iommu(dev); ++ struct riscv_iommu_info *info = dev_iommu_priv_get(dev); ++ int ret; ++ ++ if (!info->irqdomain) ++ return 0; ++ ++ /* ++ * Do the domain's one-time setup of the msi configuration the ++ * first time the domain is attached and the msis are enabled. ++ */ ++ if (domain->msi_addr_mask == 0) { ++ ret = riscv_ir_set_imsic_global_config(iommu, domain); ++ if (ret) ++ return ret; ++ ++ /* ++ * The RISC-V IOMMU MSI table is checked after the stage1 DMA ++ * page tables. If we don't create identity mappings in the ++ * stage1 table then we'll fault and won't even get a chance ++ * to check the MSI table. ++ */ ++ if (domain->pgd_mode) { ++ ret = riscv_iommu_ir_map_imsics(domain, GFP_KERNEL_ACCOUNT); ++ if (ret) { ++ riscv_iommu_ir_free_msi_table(domain); ++ return ret; ++ } ++ } ++ } ++ ++ return 0; ++} ++ ++void riscv_iommu_ir_free_paging_domain(struct riscv_iommu_domain *domain) ++{ ++ riscv_iommu_ir_free_msi_table(domain); ++} ++ ++void riscv_iommu_ir_get_resv_regions(struct device *dev, struct list_head *head) ++{ ++ const struct imsic_global_config *imsic_global; ++ struct iommu_resv_region *reg; ++ phys_addr_t addr; ++ size_t size, i; ++ ++ imsic_global = imsic_get_global_config(); ++ if (!imsic_global || !imsic_global->nr_ids) ++ return; ++ ++ size = BIT(imsic_global->hart_index_bits + imsic_global->guest_index_bits + 12); ++ ++ for (i = 0; i < BIT(imsic_global->group_index_bits); i++) { ++ addr = imsic_global->base_addr | (i << imsic_global->group_index_shift); ++ reg = iommu_alloc_resv_region(addr, size, 0, IOMMU_RESV_MSI, GFP_KERNEL); ++ if (reg) ++ list_add_tail(®->list, head); ++ } ++} diff --git a/drivers/iommu/riscv/iommu-pci.c b/drivers/iommu/riscv/iommu-pci.c new file mode 100644 -index 000000000000..c7a89143014c +index 000000000000..d82d2b00904c --- /dev/null +++ b/drivers/iommu/riscv/iommu-pci.c -@@ -0,0 +1,120 @@ +@@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* @@ -382482,6 +393175,13 @@ index 000000000000..c7a89143014c + riscv_iommu_remove(iommu); +} + ++static void riscv_iommu_pci_shutdown(struct pci_dev *pdev) ++{ ++ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev); ++ ++ riscv_iommu_disable(iommu); ++} ++ +static const struct pci_device_id riscv_iommu_pci_tbl[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_RISCV_IOMMU), 0}, + {PCI_VDEVICE(RIVOS, PCI_DEVICE_ID_RIVOS_RISCV_IOMMU_GA), 0}, @@ -382493,6 +393193,7 @@ index 000000000000..c7a89143014c + .id_table = riscv_iommu_pci_tbl, + .probe = riscv_iommu_pci_probe, + .remove = riscv_iommu_pci_remove, ++ .shutdown = riscv_iommu_pci_shutdown, + .driver = { + .suppress_bind_attrs = true, + }, @@ -382501,10 +393202,10 @@ index 000000000000..c7a89143014c +builtin_pci_driver(riscv_iommu_pci_driver); diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c new file mode 100644 -index 000000000000..da336863f152 +index 000000000000..a40ff06af0e1 --- /dev/null +++ b/drivers/iommu/riscv/iommu-platform.c -@@ -0,0 +1,92 @@ +@@ -0,0 +1,179 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * RISC-V IOMMU as a platform device @@ -382517,19 +393218,47 @@ index 000000000000..da336863f152 + * Tomasz Jeznach + */ + ++#include ++#include +#include ++#include ++#include +#include +#include + +#include "iommu-bits.h" +#include "iommu.h" + ++static void riscv_iommu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) ++{ ++ struct device *dev = msi_desc_to_dev(desc); ++ struct riscv_iommu_device *iommu = dev_get_drvdata(dev); ++ u16 idx = desc->msi_index; ++ u64 addr; ++ ++ addr = ((u64)msg->address_hi << 32) | msg->address_lo; ++ ++ if (addr != (addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR)) { ++ dev_err_once(dev, ++ "uh oh, the IOMMU can't send MSIs to 0x%llx, sending to 0x%llx instead\n", ++ addr, addr & RISCV_IOMMU_MSI_CFG_TBL_ADDR); ++ } ++ ++ addr &= RISCV_IOMMU_MSI_CFG_TBL_ADDR; ++ ++ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_ADDR(idx), addr); ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_DATA(idx), msg->data); ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_MSI_CFG_TBL_CTRL(idx), 0); ++} ++ +static int riscv_iommu_platform_probe(struct platform_device *pdev) +{ ++ enum riscv_iommu_igs_settings igs; + struct device *dev = &pdev->dev; + struct riscv_iommu_device *iommu = NULL; ++ struct irq_domain *msi_domain; + struct resource *res = NULL; -+ int vec; ++ int vec, ret; + + iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); + if (!iommu) @@ -382547,16 +393276,6 @@ index 000000000000..da336863f152 + iommu->caps = riscv_iommu_readq(iommu, RISCV_IOMMU_REG_CAPABILITIES); + iommu->fctl = riscv_iommu_readl(iommu, RISCV_IOMMU_REG_FCTL); + -+ /* For now we only support WSI */ -+ switch (FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps)) { -+ case RISCV_IOMMU_CAPABILITIES_IGS_WSI: -+ case RISCV_IOMMU_CAPABILITIES_IGS_BOTH: -+ break; -+ default: -+ return dev_err_probe(dev, -ENODEV, -+ "unable to use wire-signaled interrupts\n"); -+ } -+ + iommu->irqs_count = platform_irq_count(pdev); + if (iommu->irqs_count <= 0) + return dev_err_probe(dev, -ENODEV, @@ -382564,13 +393283,63 @@ index 000000000000..da336863f152 + if (iommu->irqs_count > RISCV_IOMMU_INTR_COUNT) + iommu->irqs_count = RISCV_IOMMU_INTR_COUNT; + -+ for (vec = 0; vec < iommu->irqs_count; vec++) -+ iommu->irqs[vec] = platform_get_irq(pdev, vec); ++ igs = FIELD_GET(RISCV_IOMMU_CAPABILITIES_IGS, iommu->caps); ++ switch (igs) { ++ case RISCV_IOMMU_CAPABILITIES_IGS_BOTH: ++ case RISCV_IOMMU_CAPABILITIES_IGS_MSI: ++ if (is_of_node(dev_fwnode(dev))) { ++ of_msi_configure(dev, to_of_node(dev->fwnode)); ++ } else { ++ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev), ++ DOMAIN_BUS_PLATFORM_MSI); ++ dev_set_msi_domain(dev, msi_domain); ++ } + -+ /* Enable wire-signaled interrupts, fctl.WSI */ -+ if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) { -+ iommu->fctl |= RISCV_IOMMU_FCTL_WSI; -+ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl); ++ if (!dev_get_msi_domain(dev)) { ++ dev_warn(dev, "failed to find an MSI domain\n"); ++ goto msi_fail; ++ } ++ ++ ret = platform_device_msi_init_and_alloc_irqs(dev, iommu->irqs_count, ++ riscv_iommu_write_msi_msg); ++ if (ret) { ++ dev_warn(dev, "failed to allocate MSIs\n"); ++ goto msi_fail; ++ } ++ ++ for (vec = 0; vec < iommu->irqs_count; vec++) ++ iommu->irqs[vec] = msi_get_virq(dev, vec); ++ ++ /* Enable message-signaled interrupts, fctl.WSI */ ++ if (iommu->fctl & RISCV_IOMMU_FCTL_WSI) { ++ iommu->fctl ^= RISCV_IOMMU_FCTL_WSI; ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl); ++ } ++ ++ dev_info(dev, "using MSIs\n"); ++ break; ++ ++msi_fail: ++ if (igs != RISCV_IOMMU_CAPABILITIES_IGS_BOTH) { ++ return dev_err_probe(dev, -ENODEV, ++ "unable to use wire-signaled interrupts\n"); ++ } ++ ++ fallthrough; ++ ++ case RISCV_IOMMU_CAPABILITIES_IGS_WSI: ++ for (vec = 0; vec < iommu->irqs_count; vec++) ++ iommu->irqs[vec] = platform_get_irq(pdev, vec); ++ ++ /* Enable wire-signaled interrupts, fctl.WSI */ ++ if (!(iommu->fctl & RISCV_IOMMU_FCTL_WSI)) { ++ iommu->fctl |= RISCV_IOMMU_FCTL_WSI; ++ riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FCTL, iommu->fctl); ++ } ++ dev_info(dev, "using wire-signaled interrupts\n"); ++ break; ++ default: ++ return dev_err_probe(dev, -ENODEV, "invalid IGS\n"); + } + + return riscv_iommu_init(iommu); @@ -382578,7 +393347,18 @@ index 000000000000..da336863f152 + +static void riscv_iommu_platform_remove(struct platform_device *pdev) +{ -+ riscv_iommu_remove(dev_get_drvdata(&pdev->dev)); ++ struct riscv_iommu_device *iommu = dev_get_drvdata(&pdev->dev); ++ bool msi = !(iommu->fctl & RISCV_IOMMU_FCTL_WSI); ++ ++ riscv_iommu_remove(iommu); ++ ++ if (msi) ++ platform_device_msi_free_irqs_all(&pdev->dev); ++}; ++ ++static void riscv_iommu_platform_shutdown(struct platform_device *pdev) ++{ ++ riscv_iommu_disable(dev_get_drvdata(&pdev->dev)); +}; + +static const struct of_device_id riscv_iommu_of_match[] = { @@ -382586,23 +393366,31 @@ index 000000000000..da336863f152 + {}, +}; + ++static const struct acpi_device_id riscv_iommu_acpi_match[] = { ++ { "RSCV0004", 0 }, ++ {} ++}; ++MODULE_DEVICE_TABLE(acpi, riscv_iommu_acpi_match); ++ +static struct platform_driver riscv_iommu_platform_driver = { + .probe = riscv_iommu_platform_probe, + .remove_new = riscv_iommu_platform_remove, ++ .shutdown = riscv_iommu_platform_shutdown, + .driver = { + .name = "riscv,iommu", + .of_match_table = riscv_iommu_of_match, + .suppress_bind_attrs = true, ++ .acpi_match_table = riscv_iommu_acpi_match, + }, +}; + +builtin_platform_driver(riscv_iommu_platform_driver); diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c new file mode 100644 -index 000000000000..8a05def774bd +index 000000000000..8f0553b0b553 --- /dev/null +++ b/drivers/iommu/riscv/iommu.c -@@ -0,0 +1,1661 @@ +@@ -0,0 +1,1697 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IOMMU API for RISC-V IOMMU implementations. @@ -382617,35 +393405,27 @@ index 000000000000..8a05def774bd + +#define pr_fmt(fmt) "riscv-iommu: " fmt + ++#include ++#include +#include +#include +#include +#include +#include ++#include ++#include +#include +#include ++#include + +#include "../iommu-pages.h" +#include "iommu-bits.h" +#include "iommu.h" + -+/* Timeouts in [us] */ -+#define RISCV_IOMMU_QCSR_TIMEOUT 150000 -+#define RISCV_IOMMU_QUEUE_TIMEOUT 150000 -+#define RISCV_IOMMU_DDTP_TIMEOUT 10000000 -+#define RISCV_IOMMU_IOTINVAL_TIMEOUT 90000000 -+ +/* Number of entries per CMD/FLT queue, should be <= INT_MAX */ +#define RISCV_IOMMU_DEF_CQ_COUNT 8192 +#define RISCV_IOMMU_DEF_FQ_COUNT 4096 + -+/* RISC-V IOMMU PPN <> PHYS address conversions, PHYS <=> PPN[53:10] */ -+#define phys_to_ppn(pa) (((pa) >> 2) & (((1ULL << 44) - 1) << 10)) -+#define ppn_to_phys(pn) (((pn) << 2) & (((1ULL << 44) - 1) << 12)) -+ -+#define dev_to_iommu(dev) \ -+ iommu_get_iommu_dev(dev, struct riscv_iommu_device, iommu) -+ +/* IOMMU PSCID allocation namespace. */ +static DEFINE_IDA(riscv_iommu_pscids); +#define RISCV_IOMMU_MAX_PSCID (BIT(20) - 1) @@ -382778,7 +393558,7 @@ index 000000000000..8a05def774bd + if (!queue->base) + return -ENOMEM; + -+ qb = phys_to_ppn(queue->phys) | ++ qb = riscv_iommu_phys_to_ppn(queue->phys) | + FIELD_PREP(RISCV_IOMMU_QUEUE_LOG2SZ_FIELD, logsz); + + /* Update base register and read back to verify hw accepted our write */ @@ -382845,6 +393625,12 @@ index 000000000000..8a05def774bd + return rc; + } + ++ /* Empty queue before enabling it */ ++ if (queue->qid == RISCV_IOMMU_INTR_CQ) ++ riscv_iommu_writel(queue->iommu, Q_TAIL(queue), 0); ++ else ++ riscv_iommu_writel(queue->iommu, Q_HEAD(queue), 0); ++ + /* + * Enable queue with interrupts, clear any memory fault if any. + * Wait for the hardware to acknowledge request and activate queue @@ -383081,15 +393867,15 @@ index 000000000000..8a05def774bd +} + +/* Send command to the IOMMU command queue */ -+static void riscv_iommu_cmd_send(struct riscv_iommu_device *iommu, -+ struct riscv_iommu_command *cmd) ++void riscv_iommu_cmd_send(struct riscv_iommu_device *iommu, ++ struct riscv_iommu_command *cmd) +{ + riscv_iommu_queue_send(&iommu->cmdq, cmd, sizeof(*cmd)); +} + +/* Send IOFENCE.C command and wait for all scheduled commands to complete. */ -+static void riscv_iommu_cmd_sync(struct riscv_iommu_device *iommu, -+ unsigned int timeout_us) ++void riscv_iommu_cmd_sync(struct riscv_iommu_device *iommu, ++ unsigned int timeout_us) +{ + struct riscv_iommu_command cmd; + unsigned int prod; @@ -383215,7 +394001,7 @@ index 000000000000..8a05def774bd + do { + ddt = READ_ONCE(*(unsigned long *)ddtp); + if (ddt & RISCV_IOMMU_DDTE_V) { -+ ddtp = __va(ppn_to_phys(ddt)); ++ ddtp = __va(riscv_iommu_ppn_to_phys(ddt)); + break; + } + @@ -383223,7 +394009,7 @@ index 000000000000..8a05def774bd + if (!ptr) + return NULL; + -+ new = phys_to_ppn(__pa(ptr)) | RISCV_IOMMU_DDTE_V; ++ new = riscv_iommu_phys_to_ppn(__pa(ptr)) | RISCV_IOMMU_DDTE_V; + old = cmpxchg_relaxed((unsigned long *)ddtp, ddt, new); + + if (old == ddt) { @@ -383250,9 +394036,11 @@ index 000000000000..8a05def774bd + * This is best effort IOMMU translation shutdown flow. + * Disable IOMMU without waiting for hardware response. + */ -+static void riscv_iommu_disable(struct riscv_iommu_device *iommu) ++void riscv_iommu_disable(struct riscv_iommu_device *iommu) +{ -+ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, 0); ++ riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, ++ FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE, ++ RISCV_IOMMU_DDTP_IOMMU_MODE_BARE)); + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_CQCSR, 0); + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_FQCSR, 0); + riscv_iommu_writel(iommu, RISCV_IOMMU_REG_PQCSR, 0); @@ -383288,7 +394076,7 @@ index 000000000000..8a05def774bd + if (ddtp & RISCV_IOMMU_DDTP_BUSY) + return -EBUSY; + -+ iommu->ddt_phys = ppn_to_phys(ddtp); ++ iommu->ddt_phys = riscv_iommu_ppn_to_phys(ddtp); + if (iommu->ddt_phys) + iommu->ddt_root = devm_ioremap(iommu->dev, + iommu->ddt_phys, PAGE_SIZE); @@ -383335,7 +394123,7 @@ index 000000000000..8a05def774bd + do { + rq_ddtp = FIELD_PREP(RISCV_IOMMU_DDTP_IOMMU_MODE, rq_mode); + if (rq_mode > RISCV_IOMMU_DDTP_IOMMU_MODE_BARE) -+ rq_ddtp |= phys_to_ppn(iommu->ddt_phys); ++ rq_ddtp |= riscv_iommu_phys_to_ppn(iommu->ddt_phys); + + riscv_iommu_writeq(iommu, RISCV_IOMMU_REG_DDTP, rq_ddtp); + ddtp = riscv_iommu_read_ddtp(iommu); @@ -383400,48 +394188,9 @@ index 000000000000..8a05def774bd + return 0; +} + -+/* This struct contains protection domain specific IOMMU driver data. */ -+struct riscv_iommu_domain { -+ struct iommu_domain domain; -+ struct list_head bonds; -+ spinlock_t lock; /* protect bonds list updates. */ -+ int pscid; -+ bool amo_enabled; -+ int numa_node; -+ unsigned int pgd_mode; -+ unsigned long *pgd_root; -+}; -+ +#define iommu_domain_to_riscv(iommu_domain) \ + container_of(iommu_domain, struct riscv_iommu_domain, domain) + -+/* Private IOMMU data for managed devices, dev_iommu_priv_* */ -+struct riscv_iommu_info { -+ struct riscv_iommu_domain *domain; -+}; -+ -+/* -+ * Linkage between an iommu_domain and attached devices. -+ * -+ * Protection domain requiring IOATC and DevATC translation cache invalidations, -+ * should be linked to attached devices using a riscv_iommu_bond structure. -+ * Devices should be linked to the domain before first use and unlinked after -+ * the translations from the referenced protection domain can no longer be used. -+ * Blocking and identity domains are not tracked here, as the IOMMU hardware -+ * does not cache negative and/or identity (BARE mode) translations, and DevATC -+ * is disabled for those protection domains. -+ * -+ * The device pointer and IOMMU data remain stable in the bond struct after -+ * _probe_device() where it's attached to the managed IOMMU, up to the -+ * completion of the _release_device() call. The release of the bond structure -+ * is synchronized with the device release. -+ */ -+struct riscv_iommu_bond { -+ struct list_head list; -+ struct rcu_head rcu; -+ struct device *dev; -+}; -+ +static int riscv_iommu_bond_link(struct riscv_iommu_domain *domain, + struct device *dev) +{ @@ -383605,8 +394354,9 @@ index 000000000000..8a05def774bd + * device is not quiesced might be disruptive, potentially causing + * interim translation faults. + */ -+static void riscv_iommu_iodir_update(struct riscv_iommu_device *iommu, -+ struct device *dev, u64 fsc, u64 ta) ++void riscv_iommu_iodir_update(struct riscv_iommu_device *iommu, ++ struct device *dev, ++ struct riscv_iommu_dc *new_dc) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct riscv_iommu_dc *dc; @@ -383640,10 +394390,13 @@ index 000000000000..8a05def774bd + for (i = 0; i < fwspec->num_ids; i++) { + dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); + tc = READ_ONCE(dc->tc); -+ tc |= ta & RISCV_IOMMU_DC_TC_V; ++ tc |= new_dc->ta & RISCV_IOMMU_DC_TC_V; + -+ WRITE_ONCE(dc->fsc, fsc); -+ WRITE_ONCE(dc->ta, ta & RISCV_IOMMU_PC_TA_PSCID); ++ WRITE_ONCE(dc->fsc, new_dc->fsc); ++ WRITE_ONCE(dc->ta, new_dc->ta & RISCV_IOMMU_PC_TA_PSCID); ++ WRITE_ONCE(dc->msiptp, new_dc->msiptp); ++ WRITE_ONCE(dc->msi_addr_mask, new_dc->msi_addr_mask); ++ WRITE_ONCE(dc->msi_addr_pattern, new_dc->msi_addr_pattern); + /* Update device context, write TC.V as the last step. */ + dma_wmb(); + WRITE_ONCE(dc->tc, tc); @@ -383875,11 +394628,11 @@ index 000000000000..8a05def774bd + dma_addr_t iova) +{ + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); -+ unsigned long pte_size; ++ size_t pte_size; + unsigned long *ptr; + + ptr = riscv_iommu_pte_fetch(domain, iova, &pte_size); -+ if (_io_pte_none(*ptr) || !_io_pte_present(*ptr)) ++ if (!ptr) + return 0; + + return pfn_to_phys(__page_val_to_pfn(*ptr)) | (iova & (pte_size - 1)); @@ -383892,6 +394645,8 @@ index 000000000000..8a05def774bd + + WARN_ON(!list_empty(&domain->bonds)); + ++ riscv_iommu_ir_free_paging_domain(domain); ++ + if ((int)domain->pscid > 0) + ida_free(&riscv_iommu_pscids, domain->pscid); + @@ -383920,20 +394675,33 @@ index 000000000000..8a05def774bd + struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain); + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); -+ u64 fsc, ta; ++ struct riscv_iommu_dc dc = {0}; ++ int ret; + + if (!riscv_iommu_pt_supported(iommu, domain->pgd_mode)) + return -ENODEV; + -+ fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) | -+ FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root)); -+ ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) | -+ RISCV_IOMMU_PC_TA_V; ++ ret = riscv_iommu_ir_attach_paging_domain(domain, dev); ++ if (ret) ++ return ret; ++ ++ dc.fsc = FIELD_PREP(RISCV_IOMMU_PC_FSC_MODE, domain->pgd_mode) | ++ FIELD_PREP(RISCV_IOMMU_PC_FSC_PPN, virt_to_pfn(domain->pgd_root)); ++ dc.ta = FIELD_PREP(RISCV_IOMMU_PC_TA_PSCID, domain->pscid) | ++ RISCV_IOMMU_PC_TA_V; ++ ++ if (domain->msi_root) { ++ dc.msiptp = virt_to_pfn(domain->msi_root) | ++ FIELD_PREP(RISCV_IOMMU_DC_MSIPTP_MODE, ++ RISCV_IOMMU_DC_MSIPTP_MODE_FLAT); ++ dc.msi_addr_mask = domain->msi_addr_mask; ++ dc.msi_addr_pattern = domain->msi_addr_pattern; ++ } + + if (riscv_iommu_bond_link(domain, dev)) + return -ENOMEM; + -+ riscv_iommu_iodir_update(iommu, dev, fsc, ta); ++ riscv_iommu_iodir_update(iommu, dev, &dc); + riscv_iommu_bond_unlink(info->domain, dev); + info->domain = domain; + @@ -384019,14 +394787,22 @@ index 000000000000..8a05def774bd + return &domain->domain; +} + ++static void riscv_iommu_get_resv_regions(struct device *dev, struct list_head *head) ++{ ++ riscv_iommu_ir_get_resv_regions(dev, head); ++} ++ +static int riscv_iommu_attach_blocking_domain(struct iommu_domain *iommu_domain, + struct device *dev) +{ + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); ++ struct riscv_iommu_dc dc = {0}; ++ ++ dc.fsc = RISCV_IOMMU_FSC_BARE; + + /* Make device context invalid, translation requests will fault w/ #258 */ -+ riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, 0); ++ riscv_iommu_iodir_update(iommu, dev, &dc); + riscv_iommu_bond_unlink(info->domain, dev); + info->domain = NULL; + @@ -384045,8 +394821,12 @@ index 000000000000..8a05def774bd +{ + struct riscv_iommu_device *iommu = dev_to_iommu(dev); + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); ++ struct riscv_iommu_dc dc = {0}; ++ ++ dc.fsc = RISCV_IOMMU_FSC_BARE; ++ dc.ta = RISCV_IOMMU_PC_TA_V; + -+ riscv_iommu_iodir_update(iommu, dev, RISCV_IOMMU_FSC_BARE, RISCV_IOMMU_PC_TA_V); ++ riscv_iommu_iodir_update(iommu, dev, &dc); + riscv_iommu_bond_unlink(info->domain, dev); + info->domain = NULL; + @@ -384067,6 +394847,17 @@ index 000000000000..8a05def774bd + return generic_device_group(dev); +} + ++static bool riscv_iommu_capable(struct device *dev, enum iommu_cap cap) ++{ ++ switch (cap) { ++ case IOMMU_CAP_CACHE_COHERENCY: ++ /* The RISC-V IOMMU is always DMA cache coherent. */ ++ return true; ++ default: ++ return false; ++ } ++} ++ +static int riscv_iommu_of_xlate(struct device *dev, const struct of_phandle_args *args) +{ + return iommu_fwspec_add_ids(dev, args->args, 1); @@ -384075,6 +394866,8 @@ index 000000000000..8a05def774bd +static struct iommu_device *riscv_iommu_probe_device(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); ++ const struct imsic_global_config *imsic_global; ++ struct irq_domain *irqdomain = NULL; + struct riscv_iommu_device *iommu; + struct riscv_iommu_info *info; + struct riscv_iommu_dc *dc; @@ -384098,6 +394891,18 @@ index 000000000000..8a05def774bd + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); ++ ++ imsic_global = imsic_get_global_config(); ++ if (imsic_global && imsic_global->nr_ids) { ++ irqdomain = riscv_iommu_ir_irq_domain_create(iommu, dev, info); ++ if (!irqdomain) { ++ kfree(info); ++ return ERR_PTR(-ENOMEM); ++ } ++ } ++ ++ info->irqdomain = irqdomain; ++ + /* + * Allocate and pre-configure device context entries in + * the device directory. Do not mark the context valid yet. @@ -384108,6 +394913,7 @@ index 000000000000..8a05def774bd + for (i = 0; i < fwspec->num_ids; i++) { + dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]); + if (!dc) { ++ riscv_iommu_ir_irq_domain_remove(info); + kfree(info); + return ERR_PTR(-ENODEV); + } @@ -384121,22 +394927,32 @@ index 000000000000..8a05def774bd + return &iommu->iommu; +} + ++static void riscv_iommu_probe_finalize(struct device *dev) ++{ ++ set_dma_ops(dev, NULL); ++ iommu_setup_dma_ops(dev, 0, U64_MAX); ++} ++ +static void riscv_iommu_release_device(struct device *dev) +{ + struct riscv_iommu_info *info = dev_iommu_priv_get(dev); + ++ riscv_iommu_ir_irq_domain_remove(info); + kfree_rcu_mightsleep(info); +} + +static const struct iommu_ops riscv_iommu_ops = { + .pgsize_bitmap = SZ_4K, + .of_xlate = riscv_iommu_of_xlate, ++ .capable = riscv_iommu_capable, + .identity_domain = &riscv_iommu_identity_domain, + .blocked_domain = &riscv_iommu_blocking_domain, + .release_domain = &riscv_iommu_blocking_domain, + .domain_alloc_paging = riscv_iommu_alloc_paging_domain, ++ .get_resv_regions = riscv_iommu_get_resv_regions, + .device_group = riscv_iommu_device_group, + .probe_device = riscv_iommu_probe_device, ++ .probe_finalize = riscv_iommu_probe_finalize, + .release_device = riscv_iommu_release_device, +}; + @@ -384247,6 +395063,14 @@ index 000000000000..8a05def774bd + goto err_iodir_off; + } + ++ if (!acpi_disabled) { ++ rc = rimt_iommu_register(iommu->dev); ++ if (rc) { ++ dev_err_probe(iommu->dev, rc, "cannot register iommu with RIMT\n"); ++ goto err_remove_sysfs; ++ } ++ } ++ + rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev); + if (rc) { + dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n"); @@ -384266,10 +395090,10 @@ index 000000000000..8a05def774bd +} diff --git a/drivers/iommu/riscv/iommu.h b/drivers/iommu/riscv/iommu.h new file mode 100644 -index 000000000000..b1c4664542b4 +index 000000000000..cd22e9f56031 --- /dev/null +++ b/drivers/iommu/riscv/iommu.h -@@ -0,0 +1,88 @@ +@@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright © 2022-2024 Rivos Inc. @@ -384289,8 +395113,45 @@ index 000000000000..b1c4664542b4 + +#include "iommu-bits.h" + ++/* Timeouts in [us] */ ++#define RISCV_IOMMU_QCSR_TIMEOUT 150000 ++#define RISCV_IOMMU_QUEUE_TIMEOUT 150000 ++#define RISCV_IOMMU_DDTP_TIMEOUT 10000000 ++#define RISCV_IOMMU_IOTINVAL_TIMEOUT 90000000 ++ ++/* This struct contains protection domain specific IOMMU driver data. */ ++struct riscv_iommu_domain { ++ struct iommu_domain domain; ++ struct list_head bonds; ++ spinlock_t lock; /* protect bonds list updates. */ ++ int pscid; ++ int amo_enabled; ++ int numa_node; ++ unsigned int pgd_mode; ++ unsigned long *pgd_root; ++ struct riscv_iommu_msipte *msi_root; ++ refcount_t *msi_pte_counts; ++ raw_spinlock_t msi_lock; ++ u32 msitbl_config; ++ u64 msi_addr_mask; ++ u64 msi_addr_pattern; ++ u32 group_index_bits; ++ u32 group_index_shift; ++ int msi_order; ++ size_t imsic_stride; ++}; ++ ++/* Private IOMMU data for managed devices, dev_iommu_priv_* */ ++struct riscv_iommu_info { ++ struct riscv_iommu_domain *domain; ++ struct irq_domain *irqdomain; ++}; ++ +struct riscv_iommu_device; + ++#define dev_to_iommu(dev) \ ++ iommu_get_iommu_dev(dev, struct riscv_iommu_device, iommu) ++ +struct riscv_iommu_queue { + atomic_t prod; /* unbounded producer allocation index */ + atomic_t head; /* unbounded shadow ring buffer consumer index */ @@ -384334,8 +395195,48 @@ index 000000000000..b1c4664542b4 + u64 *ddt_root; +}; + ++/* ++ * Linkage between an iommu_domain and attached devices. ++ * ++ * Protection domain requiring IOATC and DevATC translation cache invalidations, ++ * should be linked to attached devices using a riscv_iommu_bond structure. ++ * Devices should be linked to the domain before first use and unlinked after ++ * the translations from the referenced protection domain can no longer be used. ++ * Blocking and identity domains are not tracked here, as the IOMMU hardware ++ * does not cache negative and/or identity (BARE mode) translations, and DevATC ++ * is disabled for those protection domains. ++ * ++ * The device pointer and IOMMU data remain stable in the bond struct after ++ * _probe_device() where it's attached to the managed IOMMU, up to the ++ * completion of the _release_device() call. The release of the bond structure ++ * is synchronized with the device release. ++ */ ++struct riscv_iommu_bond { ++ struct list_head list; ++ struct rcu_head rcu; ++ struct device *dev; ++}; ++ +int riscv_iommu_init(struct riscv_iommu_device *iommu); +void riscv_iommu_remove(struct riscv_iommu_device *iommu); ++void riscv_iommu_disable(struct riscv_iommu_device *iommu); ++ ++void riscv_iommu_iodir_update(struct riscv_iommu_device *iommu, ++ struct device *dev, ++ struct riscv_iommu_dc *new_dc); ++ ++void riscv_iommu_cmd_send(struct riscv_iommu_device *iommu, ++ struct riscv_iommu_command *cmd); ++void riscv_iommu_cmd_sync(struct riscv_iommu_device *iommu, unsigned int timeout_us); ++ ++struct irq_domain *riscv_iommu_ir_irq_domain_create(struct riscv_iommu_device *iommu, ++ struct device *dev, ++ struct riscv_iommu_info *info); ++void riscv_iommu_ir_irq_domain_remove(struct riscv_iommu_info *info); ++int riscv_iommu_ir_attach_paging_domain(struct riscv_iommu_domain *domain, ++ struct device *dev); ++void riscv_iommu_ir_free_paging_domain(struct riscv_iommu_domain *domain); ++void riscv_iommu_ir_get_resv_regions(struct device *dev, struct list_head *head); + +#define riscv_iommu_readl(iommu, addr) \ + readl_relaxed((iommu)->reg + (addr)) @@ -384399,10 +395300,10 @@ index 6954b0d5a8ea..9896b940df66 100644 struct platform_device *iommu_pdev = of_find_device_by_node(args->np); unsigned id = args->args[0]; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c -index 310871728ab4..14e525bd0d9b 100644 +index 310871728ab4..aa1aee3a8d1d 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c -@@ -830,7 +830,7 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) +@@ -830,12 +830,12 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np) } static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, @@ -384411,7 +395312,21 @@ index 310871728ab4..14e525bd0d9b 100644 { const struct iommu_ops *ops = smmu->iommu.ops; int err; -@@ -959,7 +959,7 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev) + +- err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); ++ err = iommu_fwspec_init(dev, of_fwnode_handle(dev->of_node)); + if (err < 0) { + dev_err(dev, "failed to initialize fwspec: %d\n", err); + return err; +@@ -844,7 +844,6 @@ static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, + err = ops->of_xlate(dev, args); + if (err < 0) { + dev_err(dev, "failed to parse SW group ID: %d\n", err); +- iommu_fwspec_free(dev); + return err; + } + +@@ -959,7 +958,7 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev) } static int tegra_smmu_of_xlate(struct device *dev, @@ -385723,10 +396638,10 @@ index 000000000000..d586c579713d +#endif diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c new file mode 100644 -index 000000000000..c708780e8760 +index 000000000000..ec889f7b4d77 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-platform.c -@@ -0,0 +1,395 @@ +@@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. @@ -385977,6 +396892,8 @@ index 000000000000..c708780e8760 + info->chip->irq_set_affinity = imsic_irq_set_affinity; +#endif + break; ++ case DOMAIN_BUS_MSI_REMAP: ++ break; + default: + WARN_ON_ONCE(1); + return false; @@ -387774,7 +398691,7 @@ index 000000000000..176d66eb2615 + +arch_initcall(sg2044_msi_init); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c -index 572899669154..8744f09cc99c 100644 +index 572899669154..e343663006b5 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -3,7 +3,8 @@ @@ -388307,7 +399224,7 @@ index 572899669154..8744f09cc99c 100644 done: for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { plic_toggle(handler, hwirq, 0); -@@ -538,52 +718,101 @@ static int __init __plic_init(struct device_node *node, +@@ -538,52 +718,102 @@ static int __init __plic_init(struct device_node *node, nr_handlers++; } @@ -388438,6 +399355,7 @@ index 572899669154..8744f09cc99c 100644 -IRQCHIP_DECLARE(andestech_nceplic100, "andestech,nceplic100", plic_edge_init); -IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_edge_init); +IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); ++IRQCHIP_DECLARE(ultrarisc_dp1000_plic, "ultrarisc,dp1000-plic", plic_early_probe); diff --git a/drivers/irqchip/irq-thead-c900-aclint-sswi.c b/drivers/irqchip/irq-thead-c900-aclint-sswi.c new file mode 100644 index 000000000000..1f24faf9f652 @@ -389475,6 +400393,42 @@ index 000000000000..71c983bd631c +MODULE_AUTHOR("Fugang Duan "); +MODULE_DESCRIPTION("XuanTie TH1520 Mailbox IPC driver"); +MODULE_LICENSE("GPL v2"); +diff --git a/drivers/media/platform/nvidia/tegra-vde/iommu.c b/drivers/media/platform/nvidia/tegra-vde/iommu.c +index 5521ed3e465f..b1d9d841d944 100644 +--- a/drivers/media/platform/nvidia/tegra-vde/iommu.c ++++ b/drivers/media/platform/nvidia/tegra-vde/iommu.c +@@ -78,9 +78,10 @@ int tegra_vde_iommu_init(struct tegra_vde *vde) + arm_iommu_release_mapping(mapping); + } + #endif +- vde->domain = iommu_domain_alloc(&platform_bus_type); +- if (!vde->domain) { +- err = -ENOMEM; ++ vde->domain = iommu_paging_domain_alloc(dev); ++ if (IS_ERR(vde->domain)) { ++ err = PTR_ERR(vde->domain); ++ vde->domain = NULL; + goto put_group; + } + +diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c +index fe7da2b30482..66a18830e66d 100644 +--- a/drivers/media/platform/qcom/venus/firmware.c ++++ b/drivers/media/platform/qcom/venus/firmware.c +@@ -316,10 +316,10 @@ int venus_firmware_init(struct venus_core *core) + + core->fw.dev = &pdev->dev; + +- iommu_dom = iommu_domain_alloc(&platform_bus_type); +- if (!iommu_dom) { ++ iommu_dom = iommu_paging_domain_alloc(core->fw.dev); ++ if (IS_ERR(iommu_dom)) { + dev_err(core->fw.dev, "Failed to allocate iommu domain\n"); +- ret = -ENOMEM; ++ ret = PTR_ERR(iommu_dom); + goto err_unregister; + } + diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 96633e8d4a9c..7a4a2b83972d 100644 --- a/drivers/mfd/Kconfig @@ -396478,7 +407432,7 @@ index 000000000000..d12660124ea7 +MODULE_ALIAS("platform:spacemit_eth"); diff --git a/drivers/net/ethernet/spacemit/k1-emac.h b/drivers/net/ethernet/spacemit/k1-emac.h new file mode 100644 -index 000000000000..4217880f1be5 +index 000000000000..62bc5aed735e --- /dev/null +++ b/drivers/net/ethernet/spacemit/k1-emac.h @@ -0,0 +1,727 @@ @@ -397208,7 +408162,7 @@ index 000000000000..4217880f1be5 + return readl(priv->iobase + reg); +} + -+#endif /* _K1X_EMAC_H_ */ ++#endif /* _K1_EMAC_H_ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 92d7d5a00b84..c47253cda6f3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -483205,6 +494159,42 @@ index 000000000000..012b90c9770a +extern int aicwf_rwnx_usb_platform_init(struct aic_usb_dev *usbdev); + +#endif +diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c +index 2c39bad7ebfb..bd3f956c44e1 100644 +--- a/drivers/net/wireless/ath/ath10k/snoc.c ++++ b/drivers/net/wireless/ath/ath10k/snoc.c +@@ -1632,10 +1632,10 @@ static int ath10k_fw_init(struct ath10k *ar) + + ar_snoc->fw.dev = &pdev->dev; + +- iommu_dom = iommu_domain_alloc(&platform_bus_type); +- if (!iommu_dom) { ++ iommu_dom = iommu_paging_domain_alloc(ar_snoc->fw.dev); ++ if (IS_ERR(iommu_dom)) { + ath10k_err(ar, "failed to allocate iommu domain\n"); +- ret = -ENOMEM; ++ ret = PTR_ERR(iommu_dom); + goto err_unregister; + } + +diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c +index ef11c138bf30..2451d0d9ccb9 100644 +--- a/drivers/net/wireless/ath/ath11k/ahb.c ++++ b/drivers/net/wireless/ath/ath11k/ahb.c +@@ -995,10 +995,10 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) + + ab_ahb->fw.dev = &pdev->dev; + +- iommu_dom = iommu_domain_alloc(&platform_bus_type); +- if (!iommu_dom) { ++ iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev); ++ if (IS_ERR(iommu_dom)) { + ath11k_err(ab, "failed to allocate iommu domain\n"); +- ret = -ENOMEM; ++ ret = PTR_ERR(iommu_dom); + goto err_unregister; + } + diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 5bc9c4874fe3..9d1a3f7175d3 100644 --- a/drivers/nvmem/Kconfig @@ -484443,19 +495433,29 @@ index 000000000000..5ab8469e3ec3 +MODULE_DESCRIPTION("XuanTie TH1520 nvmem driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/of/device.c b/drivers/of/device.c -index 873d933e8e6d..0681c220d114 100644 +index 873d933e8e6d..61bc78e1849d 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c -@@ -96,7 +96,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, +@@ -96,11 +96,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, const struct bus_dma_region *map = NULL; struct device_node *bus_np; u64 dma_start = 0; - u64 mask, end, size = 0; +- bool coherent; +- int iommu_ret; + u64 mask, end = 0; - bool coherent; - int iommu_ret; ++ bool coherent, set_map = false; int ret; -@@ -117,34 +117,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, + ++ if (dev->dma_range_map) { ++ dev_dbg(dev, "dma_range_map already set\n"); ++ goto skip_map; ++ } ++ + if (np == dev->of_node) + bus_np = __of_get_dma_parent(np); + else +@@ -117,36 +121,12 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, if (!force_dma) return ret == -ENODEV ? 0 : ret; } else { @@ -484489,10 +495489,14 @@ index 873d933e8e6d..0681c220d114 100644 - } + dma_start = dma_range_map_min(map); + end = dma_range_map_max(map); ++ set_map = true; } - +- ++skip_map: /* -@@ -158,16 +133,15 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, + * If @dev is expected to be DMA-capable then the bus code that created + * it should have initialised its dma_mask pointer by this point. For +@@ -158,21 +138,20 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, dev->dma_mask = &dev->coherent_dma_mask; } @@ -484513,15 +495517,51 @@ index 873d933e8e6d..0681c220d114 100644 mask = DMA_BIT_MASK(ilog2(end) + 1); dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; -@@ -201,7 +175,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, - } else - dev_dbg(dev, "device is behind an iommu\n"); + /* ...but only set bus limit and range map if we found valid dma-ranges earlier */ +- if (!ret) { ++ if (set_map) { + dev->bus_dma_limit = end; + dev->dma_range_map = map; + } +@@ -181,29 +160,21 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, + dev_dbg(dev, "device is%sdma coherent\n", + coherent ? " " : " not "); + +- iommu_ret = of_iommu_configure(dev, np, id); +- if (iommu_ret == -EPROBE_DEFER) { ++ ret = of_iommu_configure(dev, np, id); ++ if (ret == -EPROBE_DEFER) { + /* Don't touch range map if it wasn't set from a valid dma-ranges */ +- if (!ret) ++ if (set_map) + dev->dma_range_map = NULL; + kfree(map); + return -EPROBE_DEFER; +- } else if (iommu_ret == -ENODEV) { +- dev_dbg(dev, "device is not behind an iommu\n"); +- } else if (iommu_ret) { +- dev_err(dev, "iommu configuration for device failed with %pe\n", +- ERR_PTR(iommu_ret)); +- +- /* +- * Historically this routine doesn't fail driver probing +- * due to errors in of_iommu_configure() +- */ +- } else +- dev_dbg(dev, "device is behind an iommu\n"); ++ } ++ /* Take all other IOMMU errors to mean we'll just carry on without it */ ++ dev_dbg(dev, "device is%sbehind an iommu\n", ++ !ret ? " " : " not "); - arch_setup_dma_ops(dev, dma_start, size, coherent); + arch_setup_dma_ops(dev, dma_start, end - dma_start + 1, coherent); - if (iommu_ret) +- if (iommu_ret) ++ if (ret) of_dma_set_restricted_buffer(dev, np); + + return 0; diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index 291d12711363..25c768d5afb4 100644 --- a/drivers/pci/controller/cadence/Kconfig @@ -485569,68 +496609,69 @@ index bf5c311875a1..b8ce73d09446 100644 # The following drivers are for devices that use the generic ACPI # pci_root.c driver but don't support standard ECAM config access. +diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c +index bf9a961c9f27..0307e9b99631 100644 +--- a/drivers/pci/controller/dwc/pci-keystone.c ++++ b/drivers/pci/controller/dwc/pci-keystone.c +@@ -190,12 +190,6 @@ static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + (int)data->hwirq, msg->address_hi, msg->address_lo); + } + +-static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void ks_pcie_msi_mask(struct irq_data *data) + { + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(data); +@@ -248,7 +242,6 @@ static struct irq_chip ks_pcie_msi_irq_chip = { + .name = "KEYSTONE-PCI-MSI", + .irq_ack = ks_pcie_msi_irq_ack, + .irq_compose_msi_msg = ks_pcie_compose_msi_msg, +- .irq_set_affinity = ks_pcie_msi_set_affinity, + .irq_mask = ks_pcie_msi_mask, + .irq_unmask = ks_pcie_msi_unmask, + }; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c -index a7170fd0e847..5203dcc21128 100644 +index a7170fd0e847..fc819020d51d 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c -@@ -45,6 +45,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = { - .irq_ack = dw_msi_ack_irq, - .irq_mask = dw_msi_mask_irq, - .irq_unmask = dw_msi_unmask_irq, -+#if defined CONFIG_SMP && defined CONFIG_PCIE_ULTRARISC -+ .irq_set_affinity = irq_chip_set_affinity_parent, -+#endif +@@ -48,8 +48,9 @@ static struct irq_chip dw_pcie_msi_irq_chip = { }; static struct msi_domain_info dw_pcie_msi_domain_info = { -@@ -116,6 +119,34 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | ++ MSI_FLAG_MULTI_PCI_MSI, + .chip = &dw_pcie_msi_irq_chip, + }; + +@@ -116,12 +117,6 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) (int)d->hwirq, msg->address_hi, msg->address_lo); } -+/* -+ * Set affinity for DP1000 MSI interrupts -+ * This is used for platforms that require setting the affinity of MSI -+ * interrupts to a specific CPU. -+ * -+ * @d: irq_data structure for the MSI interrupt -+ * @mask: cpumask to set the affinity to -+ * @force: if true, force the affinity to be set, even if it is already set -+ * -+ * Return: 0 on success, -EINVAL on failure -+ */ -+static int dw_pci_msi_set_affinity_dp1000(struct irq_data *d, -+ const struct cpumask *mask, bool force) -+{ -+ struct irq_domain *domain = d->domain; -+ struct dw_pcie_rp *pp = domain->host_data; -+ struct irq_desc *desc; -+ struct irq_data *data; -+ -+ desc = irq_to_desc(pp->msi_irq[0]); -+ data = &(desc->irq_data); -+ -+ if (data->chip->irq_set_affinity) -+ return data->chip->irq_set_affinity(data, mask, force); -+ -+ return -EINVAL; -+} -+ - static int dw_pci_msi_set_affinity(struct irq_data *d, - const struct cpumask *mask, bool force) +-static int dw_pci_msi_set_affinity(struct irq_data *d, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void dw_pci_bottom_mask(struct irq_data *d) { -@@ -177,7 +208,11 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = { + struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d); +@@ -177,7 +172,6 @@ static struct irq_chip dw_pci_msi_bottom_irq_chip = { .name = "DWPCI-MSI", .irq_ack = dw_pci_bottom_ack, .irq_compose_msi_msg = dw_pci_setup_msi_msg, -+#if defined CONFIG_PCIE_ULTRARISC -+ .irq_set_affinity = dw_pci_msi_set_affinity_dp1000, -+#else - .irq_set_affinity = dw_pci_msi_set_affinity, -+#endif +- .irq_set_affinity = dw_pci_msi_set_affinity, .irq_mask = dw_pci_bottom_mask, .irq_unmask = dw_pci_bottom_unmask, }; -@@ -644,6 +679,48 @@ static struct pci_ops dw_pcie_ops = { +@@ -644,6 +638,48 @@ static struct pci_ops dw_pcie_ops = { .write = pci_generic_config_write, }; @@ -485679,7 +496720,7 @@ index a7170fd0e847..5203dcc21128 100644 static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); -@@ -674,10 +751,14 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) +@@ -674,10 +710,14 @@ static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) if (pci->num_ob_windows <= ++i) break; @@ -487732,10 +498773,10 @@ index 000000000000..000ac313bed6 +#endif diff --git a/drivers/pci/controller/dwc/pcie-ultrarisc.c b/drivers/pci/controller/dwc/pcie-ultrarisc.c new file mode 100644 -index 000000000000..9a11fc7ad7d7 +index 000000000000..73c9cd36ca90 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-ultrarisc.c -@@ -0,0 +1,156 @@ +@@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DWC PCIe RC driver for UltraRISC DP1000 SoC @@ -487767,6 +498808,7 @@ index 000000000000..9a11fc7ad7d7 + +struct ultrarisc_pcie { + struct dw_pcie *pci; ++ u32 irq_mask[MAX_MSI_CTRLS]; +}; + +static const struct of_device_id ultrarisc_pcie_of_match[]; @@ -487876,6 +498918,49 @@ index 000000000000..9a11fc7ad7d7 + return 0; +} + ++int ultrarisc_pcie_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ struct ultrarisc_pcie *ultrarisc_pcie = platform_get_drvdata(pdev); ++ struct dw_pcie *pci = ultrarisc_pcie->pci; ++ struct dw_pcie_rp *pp = &pci->pp; ++ int num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; ++ unsigned long flags; ++ int ctrl; ++ ++ raw_spin_lock_irqsave(&pp->lock, flags); ++ ++ for (ctrl = 0; ctrl < num_ctrls; ctrl++) ++ ultrarisc_pcie->irq_mask[ctrl] = pp->irq_mask[ctrl]; ++ ++ raw_spin_unlock_irqrestore(&pp->lock, flags); ++ ++ return 0; ++} ++ ++int ultrarisc_pcie_resume(struct platform_device *pdev) ++{ ++ struct ultrarisc_pcie *ultrarisc_pcie = platform_get_drvdata(pdev); ++ struct dw_pcie *pci = ultrarisc_pcie->pci; ++ struct dw_pcie_rp *pp = &pci->pp; ++ int num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; ++ unsigned long flags; ++ int ctrl; ++ ++ raw_spin_lock_irqsave(&pp->lock, flags); ++ ++ for (ctrl = 0; ctrl < num_ctrls; ctrl++) { ++ pp->irq_mask[ctrl] = ultrarisc_pcie->irq_mask[ctrl]; ++ dw_pcie_writel_dbi(pci, ++ PCIE_MSI_INTR0_MASK + ++ ctrl * MSI_REG_CTRL_BLOCK_SIZE, ++ pp->irq_mask[ctrl]); ++ } ++ ++ raw_spin_unlock_irqrestore(&pp->lock, flags); ++ ++ return 0; ++} ++ +static const struct of_device_id ultrarisc_pcie_of_match[] = { + { + .compatible = "ultrarisc,dw-pcie", @@ -487890,10 +498975,400 @@ index 000000000000..9a11fc7ad7d7 + .suppress_bind_attrs = true, + }, + .probe = ultrarisc_pcie_probe, ++ .suspend = ultrarisc_pcie_suspend, ++ .resume = ultrarisc_pcie_resume, +}; +builtin_platform_driver(ultrarisc_pcie_driver); +diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +index 45b97a4b14db..03fbd0b6bd2e 100644 +--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c ++++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +@@ -360,8 +360,8 @@ static struct irq_chip mobiveil_msi_irq_chip = { + }; + + static struct msi_domain_info mobiveil_msi_domain_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_PCI_MSIX), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, + .chip = &mobiveil_msi_irq_chip, + }; + +@@ -378,16 +378,9 @@ static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + (int)data->hwirq, msg->address_hi, msg->address_lo); + } + +-static int mobiveil_msi_set_affinity(struct irq_data *irq_data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static struct irq_chip mobiveil_msi_bottom_irq_chip = { + .name = "Mobiveil MSI", + .irq_compose_msi_msg = mobiveil_compose_msi_msg, +- .irq_set_affinity = mobiveil_msi_set_affinity, + }; + + static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c +index 71ecd7ddcc8a..d6fc4f1d0bea 100644 +--- a/drivers/pci/controller/pci-aardvark.c ++++ b/drivers/pci/controller/pci-aardvark.c +@@ -1305,12 +1305,6 @@ static void advk_msi_irq_compose_msi_msg(struct irq_data *data, + msg->data = data->hwirq; + } + +-static int advk_msi_set_affinity(struct irq_data *irq_data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void advk_msi_irq_mask(struct irq_data *d) + { + struct advk_pcie *pcie = d->domain->host_data; +@@ -1354,7 +1348,6 @@ static void advk_msi_top_irq_unmask(struct irq_data *d) + static struct irq_chip advk_msi_bottom_irq_chip = { + .name = "MSI", + .irq_compose_msi_msg = advk_msi_irq_compose_msi_msg, +- .irq_set_affinity = advk_msi_set_affinity, + .irq_mask = advk_msi_irq_mask, + .irq_unmask = advk_msi_irq_unmask, + }; +@@ -1452,7 +1445,8 @@ static struct irq_chip advk_msi_irq_chip = { + + static struct msi_domain_info advk_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI | ++ MSI_FLAG_PCI_MSIX, + .chip = &advk_msi_irq_chip, + }; + +diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c +index 038d974a318e..d7517c3976e7 100644 +--- a/drivers/pci/controller/pci-tegra.c ++++ b/drivers/pci/controller/pci-tegra.c +@@ -1629,11 +1629,6 @@ static void tegra_msi_irq_unmask(struct irq_data *d) + spin_unlock_irqrestore(&msi->mask_lock, flags); + } + +-static int tegra_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + { + struct tegra_msi *msi = irq_data_get_irq_chip_data(data); +@@ -1648,7 +1643,6 @@ static struct irq_chip tegra_msi_bottom_chip = { + .irq_ack = tegra_msi_irq_ack, + .irq_mask = tegra_msi_irq_mask, + .irq_unmask = tegra_msi_irq_unmask, +- .irq_set_affinity = tegra_msi_set_affinity, + .irq_compose_msi_msg = tegra_compose_msi_msg, + }; + +@@ -1697,8 +1691,8 @@ static const struct irq_domain_ops tegra_msi_domain_ops = { + }; + + static struct msi_domain_info tegra_msi_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_PCI_MSIX), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, + .chip = &tegra_msi_top_chip, + }; + +diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c +index 6ad5427490b5..2fa2f91a5de3 100644 +--- a/drivers/pci/controller/pcie-altera-msi.c ++++ b/drivers/pci/controller/pcie-altera-msi.c +@@ -81,8 +81,8 @@ static struct irq_chip altera_msi_irq_chip = { + }; + + static struct msi_domain_info altera_msi_domain_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_PCI_MSIX), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, + .chip = &altera_msi_irq_chip, + }; + +@@ -99,16 +99,9 @@ static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + (int)data->hwirq, msg->address_hi, msg->address_lo); + } + +-static int altera_msi_set_affinity(struct irq_data *irq_data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static struct irq_chip altera_msi_bottom_irq_chip = { + .name = "Altera MSI", + .irq_compose_msi_msg = altera_compose_msi_msg, +- .irq_set_affinity = altera_msi_set_affinity, + }; + + static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, +diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c +index 940af934ce1b..52a39d649b94 100644 +--- a/drivers/pci/controller/pcie-brcmstb.c ++++ b/drivers/pci/controller/pcie-brcmstb.c +@@ -439,8 +439,8 @@ static struct irq_chip brcm_msi_irq_chip = { + }; + + static struct msi_domain_info brcm_msi_domain_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_MULTI_PCI_MSI), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, + .chip = &brcm_msi_irq_chip, + }; + +@@ -478,12 +478,6 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq; + } + +-static int brcm_msi_set_affinity(struct irq_data *irq_data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void brcm_msi_ack_irq(struct irq_data *data) + { + struct brcm_msi *msi = irq_data_get_irq_chip_data(data); +@@ -496,7 +490,6 @@ static void brcm_msi_ack_irq(struct irq_data *data) + static struct irq_chip brcm_msi_bottom_irq_chip = { + .name = "BRCM STB MSI", + .irq_compose_msi_msg = brcm_msi_compose_msi_msg, +- .irq_set_affinity = brcm_msi_set_affinity, + .irq_ack = brcm_msi_ack_irq, + }; + +diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c +index 975b3024fb08..973415d3b21f 100644 +--- a/drivers/pci/controller/pcie-mediatek-gen3.c ++++ b/drivers/pci/controller/pcie-mediatek-gen3.c +@@ -424,12 +424,6 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie) + return 0; + } + +-static int mtk_pcie_set_affinity(struct irq_data *data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void mtk_pcie_msi_irq_mask(struct irq_data *data) + { + pci_msi_mask_irq(data); +@@ -450,8 +444,9 @@ static struct irq_chip mtk_msi_irq_chip = { + }; + + static struct msi_domain_info mtk_msi_domain_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX | ++ MSI_FLAG_MULTI_PCI_MSI, + .chip = &mtk_msi_irq_chip, + }; + +@@ -517,7 +512,6 @@ static struct irq_chip mtk_msi_bottom_irq_chip = { + .irq_mask = mtk_msi_bottom_irq_mask, + .irq_unmask = mtk_msi_bottom_irq_unmask, + .irq_compose_msi_msg = mtk_compose_msi_msg, +- .irq_set_affinity = mtk_pcie_set_affinity, + .name = "MSI", + }; + +@@ -618,7 +612,6 @@ static struct irq_chip mtk_intx_irq_chip = { + .irq_mask = mtk_intx_mask, + .irq_unmask = mtk_intx_unmask, + .irq_eoi = mtk_intx_eoi, +- .irq_set_affinity = mtk_pcie_set_affinity, + .name = "INTx", + }; + +diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c +index 48372013f26d..0b9d9548c8e1 100644 +--- a/drivers/pci/controller/pcie-mediatek.c ++++ b/drivers/pci/controller/pcie-mediatek.c +@@ -407,12 +407,6 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + (int)data->hwirq, msg->address_hi, msg->address_lo); + } + +-static int mtk_msi_set_affinity(struct irq_data *irq_data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void mtk_msi_ack_irq(struct irq_data *data) + { + struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); +@@ -424,7 +418,6 @@ static void mtk_msi_ack_irq(struct irq_data *data) + static struct irq_chip mtk_msi_bottom_irq_chip = { + .name = "MTK MSI", + .irq_compose_msi_msg = mtk_compose_msi_msg, +- .irq_set_affinity = mtk_msi_set_affinity, + .irq_ack = mtk_msi_ack_irq, + }; + +@@ -486,8 +479,8 @@ static struct irq_chip mtk_msi_irq_chip = { + }; + + static struct msi_domain_info mtk_msi_domain_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_PCI_MSIX), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, + .chip = &mtk_msi_irq_chip, + }; + +diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c +index 704ab5d723a9..e0cf79e92cf5 100644 +--- a/drivers/pci/controller/pcie-rcar-host.c ++++ b/drivers/pci/controller/pcie-rcar-host.c +@@ -657,11 +657,6 @@ static void rcar_msi_irq_unmask(struct irq_data *d) + spin_unlock_irqrestore(&msi->mask_lock, flags); + } + +-static int rcar_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + { + struct rcar_msi *msi = irq_data_get_irq_chip_data(data); +@@ -677,7 +672,6 @@ static struct irq_chip rcar_msi_bottom_chip = { + .irq_ack = rcar_msi_irq_ack, + .irq_mask = rcar_msi_irq_mask, + .irq_unmask = rcar_msi_irq_unmask, +- .irq_set_affinity = rcar_msi_set_affinity, + .irq_compose_msi_msg = rcar_compose_msi_msg, + }; + +@@ -724,8 +718,8 @@ static const struct irq_domain_ops rcar_msi_domain_ops = { + }; + + static struct msi_domain_info rcar_msi_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_MULTI_PCI_MSI), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, + .chip = &rcar_msi_top_chip, + }; + +diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c +index 5b82098f32b7..0c66815d719b 100644 +--- a/drivers/pci/controller/pcie-xilinx-nwl.c ++++ b/drivers/pci/controller/pcie-xilinx-nwl.c +@@ -427,8 +427,8 @@ static struct irq_chip nwl_msi_irq_chip = { + }; + + static struct msi_domain_info nwl_msi_domain_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_MULTI_PCI_MSI), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI, + .chip = &nwl_msi_irq_chip, + }; + #endif +@@ -443,16 +443,9 @@ static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + msg->data = data->hwirq; + } + +-static int nwl_msi_set_affinity(struct irq_data *irq_data, +- const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static struct irq_chip nwl_irq_chip = { + .name = "Xilinx MSI", + .irq_compose_msi_msg = nwl_compose_msi_msg, +- .irq_set_affinity = nwl_msi_set_affinity, + }; + + static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, +diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c +index cb6e9f7b0152..0b534f73a942 100644 +--- a/drivers/pci/controller/pcie-xilinx.c ++++ b/drivers/pci/controller/pcie-xilinx.c +@@ -208,11 +208,6 @@ static struct irq_chip xilinx_msi_top_chip = { + .irq_ack = xilinx_msi_top_irq_ack, + }; + +-static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) +-{ +- return -EINVAL; +-} +- + static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + { + struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data); +@@ -225,7 +220,6 @@ static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) + + static struct irq_chip xilinx_msi_bottom_chip = { + .name = "Xilinx MSI", +- .irq_set_affinity = xilinx_msi_set_affinity, + .irq_compose_msi_msg = xilinx_compose_msi_msg, + }; + +@@ -271,7 +265,8 @@ static const struct irq_domain_ops xilinx_msi_domain_ops = { + }; + + static struct msi_domain_info xilinx_msi_info = { +- .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), ++ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | ++ MSI_FLAG_NO_AFFINITY, + .chip = &xilinx_msi_top_chip, + }; + +diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c +index dfa222e02c4d..333c23a7f0a0 100644 +--- a/drivers/pci/controller/vmd.c ++++ b/drivers/pci/controller/vmd.c +@@ -204,22 +204,11 @@ static void vmd_irq_disable(struct irq_data *data) + raw_spin_unlock_irqrestore(&list_lock, flags); + } + +-/* +- * XXX: Stubbed until we develop acceptable way to not create conflicts with +- * other devices sharing the same vector. +- */ +-static int vmd_irq_set_affinity(struct irq_data *data, +- const struct cpumask *dest, bool force) +-{ +- return -EINVAL; +-} +- + static struct irq_chip vmd_msi_controller = { + .name = "VMD-MSI", + .irq_enable = vmd_irq_enable, + .irq_disable = vmd_irq_disable, + .irq_compose_msi_msg = vmd_compose_msi_msg, +- .irq_set_affinity = vmd_irq_set_affinity, + }; + + static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, +@@ -326,7 +315,7 @@ static struct msi_domain_ops vmd_msi_domain_ops = { + + static struct msi_domain_info vmd_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | +- MSI_FLAG_PCI_MSIX, ++ MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, + .ops = &vmd_msi_domain_ops, + .chip = &vmd_msi_controller, + }; diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c -index 4eea161663b1..6a36d630e101 100644 +index 930ba5a9d7e1..f48fd1a9282b 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -9,6 +9,7 @@ @@ -487939,7 +499414,7 @@ index 4eea161663b1..6a36d630e101 100644 int pci_msi_enable = 1; int pci_msi_ignore_mask; -@@ -836,10 +865,41 @@ static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *en +@@ -840,10 +869,41 @@ static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *en return true; } @@ -487981,7 +499456,7 @@ index 4eea161663b1..6a36d630e101 100644 #ifdef CONFIG_LOONGARCH if (!disable_pci_irq_limit) { -@@ -849,7 +909,6 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int +@@ -853,7 +913,6 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int } } #endif @@ -488273,6 +499748,20 @@ index 05b7357bd258..cc6cac9395c8 100644 +} + +#endif +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c +index b699839a7d4f..578f6f1565e2 100644 +--- a/drivers/pci/pci-driver.c ++++ b/drivers/pci/pci-driver.c +@@ -1669,7 +1669,8 @@ static int pci_dma_configure(struct device *dev) + + pci_put_host_bridge_device(bridge); + +- if (!ret && !driver->driver_managed_dma) { ++ /* @driver may not be valid when we're called from the IOMMU layer */ ++ if (!ret && dev->driver && !driver->driver_managed_dma) { + ret = iommu_device_use_default_domain(dev); + if (ret) + arch_teardown_dma_ops(dev); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f6869fc8f49b..e70fdb1bb006 100644 --- a/drivers/pci/pci.h @@ -488291,10 +499780,10 @@ index f6869fc8f49b..e70fdb1bb006 100644 static inline int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, struct resource *res) diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c -index 46fad0d813b2..560b3a236d84 100644 +index d6e5fef54c3b..5c3032fe475b 100644 --- a/drivers/pci/pcie/portdrv.c +++ b/drivers/pci/pcie/portdrv.c -@@ -598,7 +598,7 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv) +@@ -600,7 +600,7 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv) } /* If this switch is set, PCIe port native services should not be enabled. */ @@ -488304,13 +499793,23 @@ index 46fad0d813b2..560b3a236d84 100644 /* * If the user specified "pcie_ports=native", use the PCIe services regardless diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig -index f608c2e66235..dad83faccf44 100644 +index f608c2e66235..5cea1e4ce77d 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig -@@ -86,6 +86,20 @@ config RISCV_PMU_SBI +@@ -86,6 +86,30 @@ config RISCV_PMU_SBI full perf feature support i.e. counter overflow, privilege mode filtering, counter configuration. ++config RISCV_PMU_SSE ++ depends on RISCV_PMU && RISCV_SSE ++ bool "RISC-V PMU SSE events" ++ default n ++ help ++ Say y if you want to use SSE events to deliver PMU interrupts. This ++ provides a way to profile the kernel at any level by using NMI-like ++ SSE events. SSE events being really intrusive, this option allows ++ to select it only if needed. ++ +config ANDES_CUSTOM_PMU + bool "Andes custom PMU support" + depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI @@ -488328,6 +499827,37 @@ index f608c2e66235..dad83faccf44 100644 config ARM_PMU_ACPI depends on ARM_PMU && ACPI def_bool y +@@ -253,4 +277,21 @@ config HISILICON_HW_METRIC + Support hardware metric that allows filter of sampling for specific + sampling period ratio. + ++config LRW_DDR_PMU ++ tristate "Enable LRW DDR sub-system performance monitoring" ++ depends on ACPI ++ help ++ This option enables the PMU driver for the LRW DDR sub-system, which allows ++ performance monitoring of DDR memory operations using the perf framework. ++ ++ Say Y here if you want to include this driver directly in the kernel image, ++ or M to build it as a module named lrw_ddr_pmu.ko. ++ ++ This driver provides basic counter support for DDR performance metrics, such as ++ memory access counts. It does not support advanced features like counter overflow ++ handling or privilege mode filtering. ++ ++ If you are not using LRW DDR hardware or do not require performance monitoring, ++ say N. ++ + endmenu +diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile +index a8b7bc22e3d6..8bb2752f47d5 100644 +--- a/drivers/perf/Makefile ++++ b/drivers/perf/Makefile +@@ -27,3 +27,4 @@ obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o + obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/ + obj-$(CONFIG_MESON_DDR_PMU) += amlogic/ + obj-$(CONFIG_CXL_PMU) += cxl_pmu.o ++obj-$(CONFIG_LRW_DDR_PMU) += lrw_ddr_pmu.o diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 31e491e7f206..2946422539fb 100644 --- a/drivers/perf/arm_smmuv3_pmu.c @@ -488350,12 +499880,824 @@ index 31e491e7f206..2946422539fb 100644 if (ret) { dev_warn(dev, "failed to allocate MSIs\n"); return; +diff --git a/drivers/perf/lrw_ddr_pmu.c b/drivers/perf/lrw_ddr_pmu.c +new file mode 100644 +index 000000000000..9b4522dc0573 +--- /dev/null ++++ b/drivers/perf/lrw_ddr_pmu.c +@@ -0,0 +1,759 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * DDR Sub-system PMU Events Monitoring Support ++ * ++ * Copyright (C) 2025 LRW Corporation or its affiliates. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define LRW_DDR_PMU_MAX_COUNTERS 16 ++#define LRW_DDR_PMU_MAX_CHB_EVENTS 0x40 ++#define LRW_DDR_PMU_MAX_DDRC_EVENTS 0x44 ++ ++#define LRW_DDR_PMU_PA_SHIFT 24 ++#define LRW_DDR_PMU_CNT_INIT 0 ++ ++#define LRW_DDR_PMU_CHB_DOMAIN_OFFSET 0x0 ++#define LRW_DDR_PMU_DDRC_DOMAIN_OFFSET 0x400 ++ ++#define LRW_DDR_PMU_REG_CFG0 0x04 ++#define LRW_DDR_PMU_CNT_CLEAR_MASK GENMASK(31, 16) ++#define LRW_DDR_PMU_CNT_ENABLE_MASK GENMASK(15, 0) ++ ++#define LRW_DDR_PMU_CNT_CLEAR_BIT(n) BIT((n) + 16) ++#define LRW_DDR_PMU_CNT_ENABLE_BIT(n) BIT(n) ++#define LRW_DDR_PMU_CNT_DISABLE_BIT(n) (~BIT(n)) ++ ++#define LRW_DDR_PMU_REG_CFG1 0x08 ++#define LRW_DDR_PMU_EVENT_CFG1_DCH_SEL GENMASK(13, 13) ++#define LRW_DDR_PMU_EVENT_CFG1_ID GENMASK(12, 6) ++#define LRW_DDR_PMU_EVENT_CFG1_CNT_ID GENMASK(5, 2) ++#define LRW_DDR_PMU_EVENT_CFG1_BOUND_EN GENMASK(0, 0) ++ ++#define LRW_DDR_PMU_REG_CNT_L(x) (0x0C + ((x) * 0x08)) ++#define LRW_DDR_PMU_REG_CNT_H(x) (0x10 + ((x) * 0x08)) ++ ++#define LRW_DDR_PMU_REG_CYCLE_L 0x8C ++#define LRW_DDR_PMU_REG_CYCLE_H 0x90 ++ ++#define LRW_DDR_PMU_REG_OCCUPY_MON_CTL 0x100 ++#define LRW_DDR_PMU_REG_OCCUPY_MON_CNT_MASK 0xFFFFFF ++ ++/* fixed cycle event */ ++#define LRW_DDR_PMU_FIXED_EVT_ID_CHB_CYCLE 0x200 ++#define LRW_DDR_PMU_FIXED_EVT_ID_DDRC_CYCLE 0x300 ++ ++#define LRW_DDR_PMU_EVENT_DCH0 0 ++#define LRW_DDR_PMU_EVENT_DCH1 1 ++#define LRW_DDR_PMU_EVENT_DOMAIN_CHB 0 ++#define LRW_DDR_PMU_EVENT_DOMAIN_DDRC 1 ++#define LRW_DDR_PMU_EVENT_TYPE_GENERAL 0 ++#define LRW_DDR_PMU_EVENT_TYPE_FIXED 1 ++ ++#define LRW_DDR_PMU_EVENT_CONFIG_MASK GENMASK(9, 0) ++#define LRW_DDR_PMU_EVENT_ID_MASK GENMASK(6, 0) ++ ++#define LRW_DDR_PMU_EVENT_DCH_SHIFT 7 ++#define LRW_DDR_PMU_EVENT_DOMAIN_SHIFT 8 ++#define LRW_DDR_PMU_EVENT_TYPE_SHIFT 9 ++ ++#define LRW_DDR_PMU_EVENT_DCH_MASK BIT(LRW_DDR_PMU_EVENT_DCH_SHIFT) ++#define LRW_DDR_PMU_EVENT_DOMAIN_MASK BIT(LRW_DDR_PMU_EVENT_DOMAIN_SHIFT) ++#define LRW_DDR_PMU_EVENT_TYPE_MASK BIT(LRW_DDR_PMU_EVENT_TYPE_SHIFT) ++ ++#define LRW_DDR_PMU_GET_EVENT_CONFIG(event) \ ++ FIELD_GET(LRW_DDR_PMU_EVENT_CONFIG_MASK, (event)->attr.config) ++ ++#define LRW_DDR_PMU_GET_EVENT_ID(event) \ ++ FIELD_GET(LRW_DDR_PMU_EVENT_ID_MASK, (event)->attr.config) ++ ++#define LRW_DDR_PMU_GET_EVENT_DCH(event) \ ++ FIELD_GET(LRW_DDR_PMU_EVENT_DCH_MASK, (event)->attr.config) ++ ++#define LRW_DDR_PMU_GET_EVENT_DOMAIN(event) \ ++ FIELD_GET(LRW_DDR_PMU_EVENT_DOMAIN_MASK, (event)->attr.config) ++ ++#define LRW_DDR_PMU_GET_EVENT_TYPE(event) \ ++ FIELD_GET(LRW_DDR_PMU_EVENT_TYPE_MASK, (event)->attr.config) ++ ++#define LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) \ ++ ((LRW_DDR_PMU_GET_EVENT_DOMAIN(event) == \ ++ LRW_DDR_PMU_EVENT_DOMAIN_CHB) ? \ ++ LRW_DDR_PMU_CHB_DOMAIN_OFFSET : \ ++ LRW_DDR_PMU_DDRC_DOMAIN_OFFSET) ++ ++#define LRW_DDR_PMU_GET_PMU_DOMAIN(event, pmu) \ ++ ((LRW_DDR_PMU_GET_EVENT_DOMAIN(event) == \ ++ LRW_DDR_PMU_EVENT_DOMAIN_CHB) ? \ ++ &(pmu)->chb : \ ++ &(pmu)->ddrc) ++ ++#define to_lrw_ddr_pmu(p) (container_of(p, struct lrw_ddr_pmu, pmu)) ++ ++static int lrw_ddr_pmu_cpuhp_state_num; ++ ++static const struct acpi_device_id lrw_ddr_acpi_match[] = { ++ { ++ "LRWX0001", ++ }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(acpi, lrw_ddr_acpi_match); ++ ++struct lrw_ddr_pmu_domain { ++ int num_counters; ++ unsigned long *used_mask; ++ struct perf_event **events; ++ int *config; ++}; ++ ++struct lrw_ddr_pmu { ++ void __iomem *cfg_base; ++ struct device *dev; ++ struct pmu pmu; ++ int cpu; ++ ++ struct lrw_ddr_pmu_domain ddrc; ++ struct lrw_ddr_pmu_domain chb; ++ struct hlist_node node; ++}; ++ ++static ssize_t lrw_ddr_pmu_event_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dev_ext_attribute *ext = ++ container_of(attr, struct dev_ext_attribute, attr); ++ return sprintf(buf, "config=0x%llx\n", (u64)ext->var); ++} ++ ++static ssize_t lrw_ddr_pmu_cpumask_show(struct device *dev, ++ struct device_attribute *attr, ++ char *buf) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(dev_get_drvdata(dev)); ++ ++ return cpumap_print_to_pagebuf(true, buf, cpumask_of(ddr_pmu->cpu)); ++} ++ ++static ssize_t lrw_ddr_pmu_format_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct dev_ext_attribute *ext = ++ container_of(attr, struct dev_ext_attribute, attr); ++ return sprintf(buf, "%s\n", (char *)ext->var); ++} ++ ++#define LRW_DDR_PMU_ATTR(_name, _show_func, _config) \ ++ (&((struct dev_ext_attribute[]){ \ ++ { __ATTR(_name, 0444, _show_func, NULL), \ ++ (void *)(_config) } })[0] \ ++ .attr.attr) ++ ++#define LRW_DDR_PMU_FORMAT_ATTR(_name, _config) \ ++ LRW_DDR_PMU_ATTR(_name, lrw_ddr_pmu_format_show, (void *)(_config)) ++ ++#define LRW_DDR_PMU_EVENT_ATTR(_name, _config) \ ++ LRW_DDR_PMU_ATTR(_name, lrw_ddr_pmu_event_show, \ ++ (unsigned long)(_config)) ++ ++#define LRW_DDR_PMU_EVENT_DCH0_SHIFTED \ ++ ((LRW_DDR_PMU_EVENT_DCH0) << (LRW_DDR_PMU_EVENT_DCH_SHIFT)) ++#define LRW_DDR_PMU_EVENT_DCH1_SHIFTED \ ++ ((LRW_DDR_PMU_EVENT_DCH1) << (LRW_DDR_PMU_EVENT_DCH_SHIFT)) ++#define LRW_DDR_PMU_EVENT_DOMAIN_CHB_SHIFTED \ ++ ((LRW_DDR_PMU_EVENT_DOMAIN_CHB) << (LRW_DDR_PMU_EVENT_DOMAIN_SHIFT)) ++#define LRW_DDR_PMU_EVENT_DOMAIN_DDRC_SHIFTED \ ++ ((LRW_DDR_PMU_EVENT_DOMAIN_DDRC) << (LRW_DDR_PMU_EVENT_DOMAIN_SHIFT)) ++ ++#define LRW_DDR_PMU_GEN_DDRC_EVENTS_0(prefix, cfg) \ ++ (LRW_DDR_PMU_EVENT_ATTR(ddrc_##prefix##_sub0, \ ++ ((cfg) | \ ++ LRW_DDR_PMU_EVENT_DOMAIN_DDRC_SHIFTED | \ ++ LRW_DDR_PMU_EVENT_DCH0_SHIFTED))) ++ ++#define LRW_DDR_PMU_GEN_DDRC_EVENTS_1(prefix, cfg) \ ++ (LRW_DDR_PMU_EVENT_ATTR(ddrc_##prefix##_sub1, \ ++ ((cfg) | \ ++ LRW_DDR_PMU_EVENT_DOMAIN_DDRC_SHIFTED | \ ++ LRW_DDR_PMU_EVENT_DCH1_SHIFTED))) ++ ++#define LRW_DDR_PMU_GEN_FIXED_EVENT(prefix, cfg) \ ++ LRW_DDR_PMU_EVENT_ATTR(fixed_##prefix, (cfg)) ++ ++static struct attribute *lrw_ddr_pmu_events_attrs[] = { ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_rd_or_wr_ps0, 0), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_rd_or_wr_ps1, 1), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_wr_ps0, 2), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_wr_ps1, 3), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_rd_ps0, 4), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_rd_ps1, 5), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_rmw_ps0, 6), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_rmw_ps1, 7), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_hi_pri_rd_ps0, 8), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hif_hi_pri_rd_ps1, 9), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(dfi_wr_data_cycles_ps0, 10), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(dfi_wr_data_cycles_ps1, 11), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(dfi_rd_data_cycles_ps0, 12), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(dfi_rd_data_cycles_ps1, 13), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(hpr_xact_when_critical, 14), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(lpr_xact_when_critical, 15), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(wr_xact_when_critical, 16), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_activate_ps0, 17), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_activate_ps1, 18), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_rd_or_wr_ps0, 19), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_rd_or_wr_ps1, 20), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_rd_activate_ps0, 21), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_rd_activate_ps1, 22), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_rd_ps0, 23), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_rd_ps1, 24), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_wr_ps0, 25), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_wr_ps1, 26), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_mwr_ps0, 27), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_mwr_ps1, 28), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_precharge_ps0, 29), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_precharge_ps1, 30), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(precharge_for_rdwr_ps0, 31), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(precharge_for_rdwr_ps1, 32), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(precharge_for_other_ps0, 33), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(precharge_for_other_ps1, 34), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(rdwr_transitions, 35), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(write_combine_ps0, 36), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(write_combine_ps1, 37), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(war_hazard, 38), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(raw_hazard, 39), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(waw_hazard, 40), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_selfref_rank0, 41), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_selfref_rank1, 42), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_selfref_rank2, 43), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_selfref_rank3, 44), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_powerdown_rank0, 45), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_powerdown_rank1, 46), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_powerdown_rank2, 47), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_enter_powerdown_rank3, 48), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_refresh_ps0, 53), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_refresh_ps1, 54), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_crit_ref_ps0, 55), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_crit_ref_ps1, 56), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_spec_ref_ps0, 57), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_spec_ref_ps1, 58), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_load_mode, 59), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(bsm_alloc, 60), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(bsm_starvation, 61), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(visible_window_limit_reached_rd, 62), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(visible_window_limit_reached_wr, 63), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_dqsosc_mpc, 64), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_dqsosc_mrr, 65), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_tcr_mrr, 66), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_zqstart, 67), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_0(op_is_zqlatch, 68), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_rd_or_wr_ps0, 0), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_rd_or_wr_ps1, 1), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_wr_ps0, 2), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_wr_ps1, 3), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_rd_ps0, 4), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_rd_ps1, 5), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_rmw_ps0, 6), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_rmw_ps1, 7), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_hi_pri_rd_ps0, 8), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hif_hi_pri_rd_ps1, 9), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(dfi_wr_data_cycles_ps0, 10), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(dfi_wr_data_cycles_ps1, 11), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(dfi_rd_data_cycles_ps0, 12), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(dfi_rd_data_cycles_ps1, 13), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(hpr_xact_when_critical, 14), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(lpr_xact_when_critical, 15), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(wr_xact_when_critical, 16), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_activate_ps0, 17), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_activate_ps1, 18), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_rd_or_wr_ps0, 19), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_rd_or_wr_ps1, 20), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_rd_activate_ps0, 21), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_rd_activate_ps1, 22), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_rd_ps0, 23), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_rd_ps1, 24), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_wr_ps0, 25), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_wr_ps1, 26), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_mwr_ps0, 27), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_mwr_ps1, 28), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_precharge_ps0, 29), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_precharge_ps1, 30), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(precharge_for_rdwr_ps0, 31), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(precharge_for_rdwr_ps1, 32), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(precharge_for_other_ps0, 33), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(precharge_for_other_ps1, 34), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(rdwr_transitions, 35), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(write_combine_ps0, 36), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(write_combine_ps1, 37), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(war_hazard, 38), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(raw_hazard, 39), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(waw_hazard, 40), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_selfref_rank0, 41), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_selfref_rank1, 42), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_selfref_rank2, 43), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_selfref_rank3, 44), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_powerdown_rank0, 45), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_powerdown_rank1, 46), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_powerdown_rank2, 47), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_enter_powerdown_rank3, 48), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_refresh_ps0, 53), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_refresh_ps1, 54), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_crit_ref_ps0, 55), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_crit_ref_ps1, 56), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_spec_ref_ps0, 57), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_spec_ref_ps1, 58), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_load_mode, 59), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(bsm_alloc, 60), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(bsm_starvation, 61), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(visible_window_limit_reached_rd, 62), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(visible_window_limit_reached_wr, 63), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_dqsosc_mpc, 64), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_dqsosc_mrr, 65), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_tcr_mrr, 66), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_zqstart, 67), ++ LRW_DDR_PMU_GEN_DDRC_EVENTS_1(op_is_zqlatch, 68), ++ LRW_DDR_PMU_GEN_FIXED_EVENT(chb_cycle, ++ LRW_DDR_PMU_FIXED_EVT_ID_CHB_CYCLE), ++ LRW_DDR_PMU_GEN_FIXED_EVENT(ddrc_cycle, ++ LRW_DDR_PMU_FIXED_EVT_ID_DDRC_CYCLE), ++ NULL, ++}; ++ ++static struct attribute_group lrw_ddr_pmu_events_attr_group = { ++ .name = "events", ++ .attrs = lrw_ddr_pmu_events_attrs, ++}; ++ ++static struct device_attribute lrw_ddr_pmu_cpumask_attr = ++ __ATTR(cpumask, 0444, lrw_ddr_pmu_cpumask_show, NULL); ++ ++static struct attribute *lrw_ddr_pmu_cpumask_attrs[] = { ++ &lrw_ddr_pmu_cpumask_attr.attr, ++ NULL, ++}; ++ ++static const struct attribute_group lrw_ddr_pmu_cpumask_attr_group = { ++ .attrs = lrw_ddr_pmu_cpumask_attrs, ++}; ++ ++static struct attribute *lrw_ddr_pmu_format_attrs[] = { ++ LRW_DDR_PMU_FORMAT_ATTR(event, "config:0-6"), ++ LRW_DDR_PMU_FORMAT_ATTR(dch, "config:7"), ++ LRW_DDR_PMU_FORMAT_ATTR(domain, "config:8"), ++ LRW_DDR_PMU_FORMAT_ATTR(type, "config:9"), ++ NULL, ++}; ++ ++static const struct attribute_group lrw_ddr_pmu_format_group = { ++ .name = "format", ++ .attrs = lrw_ddr_pmu_format_attrs, ++}; ++ ++static const struct attribute_group *lrw_ddr_pmu_attr_groups[] = { ++ &lrw_ddr_pmu_events_attr_group, ++ &lrw_ddr_pmu_cpumask_attr_group, ++ &lrw_ddr_pmu_format_group, ++ NULL, ++}; ++ ++static bool lrw_ddr_pmu_is_support_event(struct perf_event *event) ++{ ++ u32 id = LRW_DDR_PMU_GET_EVENT_ID(event); ++ u32 domain = LRW_DDR_PMU_GET_EVENT_DOMAIN(event); ++ ++ switch (domain) { ++ case LRW_DDR_PMU_EVENT_DOMAIN_CHB: ++ return id <= LRW_DDR_PMU_MAX_CHB_EVENTS; ++ case LRW_DDR_PMU_EVENT_DOMAIN_DDRC: ++ return id <= LRW_DDR_PMU_MAX_DDRC_EVENTS; ++ default: ++ return false; ++ } ++} ++ ++static int lrw_ddr_pmu_get_event_idx(struct perf_event *event) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct lrw_ddr_pmu_domain *domain; ++ int idx; ++ ++ domain = LRW_DDR_PMU_GET_PMU_DOMAIN(event, ddr_pmu); ++ for (idx = 0; idx < domain->num_counters; ++idx) { ++ if (!test_and_set_bit(idx, domain->used_mask)) ++ return idx; ++ } ++ ++ return -EBUSY; ++} ++ ++static int lrw_ddr_pmu_event_init(struct perf_event *event) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct hw_perf_event *hwc = &event->hw; ++ ++ if (event->attr.type != event->pmu->type) ++ return -ENOENT; ++ ++ if (is_sampling_event(event)) ++ return -EOPNOTSUPP; ++ ++ event->cpu = ddr_pmu->cpu; ++ if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) ++ return -EOPNOTSUPP; ++ ++ if (!lrw_ddr_pmu_is_support_event(event)) ++ return -EOPNOTSUPP; ++ ++ hwc->idx = -1; ++ ++ return 0; ++} ++ ++static void lrw_ddr_pmu_bind_counter(struct perf_event *event) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct hw_perf_event *hwc = &event->hw; ++ u32 cfg = LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) + LRW_DDR_PMU_REG_CFG1; ++ u32 val = 0; ++ ++ val = FIELD_PREP(LRW_DDR_PMU_EVENT_CFG1_BOUND_EN, 1) | ++ FIELD_PREP(LRW_DDR_PMU_EVENT_CFG1_CNT_ID, hwc->idx) | ++ FIELD_PREP(LRW_DDR_PMU_EVENT_CFG1_ID, ++ LRW_DDR_PMU_GET_EVENT_ID(event)) | ++ FIELD_PREP(LRW_DDR_PMU_EVENT_CFG1_DCH_SEL, ++ LRW_DDR_PMU_GET_EVENT_DCH(event)); ++ writel(val, ddr_pmu->cfg_base + cfg); ++} ++ ++static void lrw_ddr_pmu_enable_counter(struct perf_event *event) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct hw_perf_event *hwc = &event->hw; ++ u32 reg, val; ++ ++ reg = LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) + LRW_DDR_PMU_REG_CFG0; ++ val = readl(ddr_pmu->cfg_base + reg); ++ val |= LRW_DDR_PMU_CNT_ENABLE_BIT(hwc->idx); ++ val |= LRW_DDR_PMU_CNT_CLEAR_BIT(hwc->idx); ++ writel(val, ddr_pmu->cfg_base + reg); ++} ++ ++static void lrw_ddr_pmu_disable_counter(struct perf_event *event) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct hw_perf_event *hwc = &event->hw; ++ u32 reg, val; ++ ++ reg = LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) + LRW_DDR_PMU_REG_CFG0; ++ val = readl(ddr_pmu->cfg_base + reg); ++ ++ val &= LRW_DDR_PMU_CNT_DISABLE_BIT(hwc->idx); ++ writel(val, ddr_pmu->cfg_base + reg); ++} ++ ++static u64 lrw_ddr_pmu_read_counter(struct perf_event *event) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct hw_perf_event *hwc = &event->hw; ++ u64 cycle_high, cycle_low; ++ ++ if (LRW_DDR_PMU_GET_EVENT_TYPE(event) == LRW_DDR_PMU_EVENT_TYPE_FIXED) { ++ cycle_high = readl(ddr_pmu->cfg_base + ++ LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) + ++ LRW_DDR_PMU_REG_CYCLE_H); ++ cycle_low = readl(ddr_pmu->cfg_base + ++ LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) + ++ LRW_DDR_PMU_REG_CYCLE_L); ++ } else { ++ cycle_high = readl(ddr_pmu->cfg_base + ++ LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) + ++ LRW_DDR_PMU_REG_CNT_H(hwc->idx)); ++ cycle_low = readl(ddr_pmu->cfg_base + ++ LRW_DDR_PMU_GET_DOMAIN_OFFSET(event) + ++ LRW_DDR_PMU_REG_CNT_L(hwc->idx)); ++ } ++ ++ return (u64)(cycle_high << 32 | cycle_low); ++} ++ ++static void lrw_ddr_pmu_event_update(struct perf_event *event) ++{ ++ struct hw_perf_event *hwc = &event->hw; ++ u64 delta, prev, now; ++ ++ do { ++ prev = local64_read(&hwc->prev_count); ++ now = lrw_ddr_pmu_read_counter(event); ++ } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); ++ ++ delta = now - prev; ++ local64_add(delta, &event->count); ++} ++ ++static void lrw_ddr_pmu_start(struct perf_event *event, int flags) ++{ ++ struct hw_perf_event *hwc = &event->hw; ++ u64 pre_val; ++ ++ hwc->state = 0; ++ ++ if (LRW_DDR_PMU_GET_EVENT_TYPE(event) == LRW_DDR_PMU_EVENT_TYPE_FIXED) { ++ pre_val = lrw_ddr_pmu_read_counter(event); ++ local64_set(&hwc->prev_count, pre_val); ++ return; ++ } ++ ++ pre_val = LRW_DDR_PMU_CNT_INIT; ++ local64_set(&hwc->prev_count, pre_val); ++ ++ lrw_ddr_pmu_bind_counter(event); ++ lrw_ddr_pmu_enable_counter(event); ++} ++ ++static int lrw_ddr_pmu_add(struct perf_event *event, int flags) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct lrw_ddr_pmu_domain *domain; ++ struct hw_perf_event *hwc = &event->hw; ++ int idx = -1; ++ ++ if (LRW_DDR_PMU_EVENT_TYPE_GENERAL == ++ LRW_DDR_PMU_GET_EVENT_TYPE(event)) { ++ idx = lrw_ddr_pmu_get_event_idx(event); ++ if (idx < 0) ++ return idx; ++ ++ /* allocate resources for general events */ ++ domain = LRW_DDR_PMU_GET_PMU_DOMAIN(event, ddr_pmu); ++ domain->events[idx] = event; ++ domain->config[idx] = LRW_DDR_PMU_GET_EVENT_CONFIG(event); ++ } ++ ++ hwc->idx = idx; ++ ++ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; ++ ++ if (flags & PERF_EF_START) ++ lrw_ddr_pmu_start(event, PERF_EF_RELOAD); ++ ++ perf_event_update_userpage(event); ++ ++ return 0; ++} ++ ++static void lrw_ddr_pmu_stop(struct perf_event *event, int flags) ++{ ++ struct hw_perf_event *hwc = &event->hw; ++ ++ if (hwc->state & PERF_HES_STOPPED) ++ return; ++ ++ if (LRW_DDR_PMU_EVENT_TYPE_GENERAL == ++ LRW_DDR_PMU_GET_EVENT_TYPE(event)) { ++ lrw_ddr_pmu_disable_counter(event); ++ } ++ ++ lrw_ddr_pmu_event_update(event); ++ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; ++} ++ ++static void lrw_ddr_pmu_del(struct perf_event *event, int flags) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = to_lrw_ddr_pmu(event->pmu); ++ struct lrw_ddr_pmu_domain *domain; ++ struct hw_perf_event *hwc = &event->hw; ++ int idx = hwc->idx; ++ ++ lrw_ddr_pmu_stop(event, PERF_EF_UPDATE); ++ ++ domain = LRW_DDR_PMU_GET_PMU_DOMAIN(event, ddr_pmu); ++ /* release resources for general events */ ++ if (idx >= 0 && idx < domain->num_counters) { ++ domain->events[idx] = NULL; ++ domain->config[idx] = -1; ++ clear_bit(idx, domain->used_mask); ++ hwc->idx = -1; ++ } ++ perf_event_update_userpage(event); ++} ++ ++static void lrw_ddr_pmu_read(struct perf_event *event) ++{ ++ lrw_ddr_pmu_event_update(event); ++} ++ ++static int lrw_ddr_pmu_domain_init(struct lrw_ddr_pmu_domain *domain, ++ struct device *dev, int num_counters) ++{ ++ if (!domain || !dev || num_counters <= 0 || ++ num_counters > LRW_DDR_PMU_MAX_COUNTERS) ++ return -EINVAL; ++ ++ domain->num_counters = num_counters; ++ ++ domain->used_mask = devm_kzalloc( ++ dev, BITS_TO_LONGS(num_counters) * sizeof(u64), GFP_KERNEL); ++ if (!domain->used_mask) ++ return -ENOMEM; ++ ++ domain->events = devm_kzalloc( ++ dev, num_counters * sizeof(struct perf_event *), GFP_KERNEL); ++ if (!domain->events) ++ return -ENOMEM; ++ ++ domain->config = ++ devm_kzalloc(dev, num_counters * sizeof(int), GFP_KERNEL); ++ if (!domain->config) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static int lrw_ddr_pmu_probe(struct platform_device *pdev) ++{ ++ struct lrw_ddr_pmu *ddr_pmu; ++ struct resource *res; ++ char *name; ++ int ret; ++ ++ ddr_pmu = devm_kzalloc(&pdev->dev, sizeof(*ddr_pmu), GFP_KERNEL); ++ if (!ddr_pmu) ++ return -ENOMEM; ++ ++ ddr_pmu->dev = &pdev->dev; ++ ++ platform_set_drvdata(pdev, ddr_pmu); ++ ++ ddr_pmu->cfg_base = ++ devm_platform_get_and_ioremap_resource(pdev, 0, &res); ++ ++ if (IS_ERR(ddr_pmu->cfg_base)) ++ return PTR_ERR(ddr_pmu->cfg_base); ++ ++ name = devm_kasprintf(ddr_pmu->dev, GFP_KERNEL, "lrw_ddr_%llx", ++ (u64)(res->start >> LRW_DDR_PMU_PA_SHIFT)); ++ ++ if (!name) ++ return -ENOMEM; ++ ++ ddr_pmu->cpu = smp_processor_id(); ++ ++ ret = lrw_ddr_pmu_domain_init(&ddr_pmu->ddrc, ddr_pmu->dev, ++ LRW_DDR_PMU_MAX_COUNTERS); ++ if (ret) ++ return ret; ++ ++ ret = lrw_ddr_pmu_domain_init(&ddr_pmu->chb, ddr_pmu->dev, ++ LRW_DDR_PMU_MAX_COUNTERS); ++ if (ret) ++ return ret; ++ ++ /* clear all chb counters */ ++ writel(LRW_DDR_PMU_CNT_CLEAR_MASK, ++ ddr_pmu->cfg_base + LRW_DDR_PMU_CHB_DOMAIN_OFFSET + ++ LRW_DDR_PMU_REG_CFG0); ++ /* clear all ddrc counters */ ++ writel(LRW_DDR_PMU_CNT_CLEAR_MASK, ++ ddr_pmu->cfg_base + LRW_DDR_PMU_DDRC_DOMAIN_OFFSET + ++ LRW_DDR_PMU_REG_CFG0); ++ ++ ddr_pmu->pmu = (struct pmu){ ++ .module = THIS_MODULE, ++ .parent = &pdev->dev, ++ .task_ctx_nr = perf_invalid_context, ++ .event_init = lrw_ddr_pmu_event_init, ++ .add = lrw_ddr_pmu_add, ++ .del = lrw_ddr_pmu_del, ++ .start = lrw_ddr_pmu_start, ++ .stop = lrw_ddr_pmu_stop, ++ .read = lrw_ddr_pmu_read, ++ .attr_groups = lrw_ddr_pmu_attr_groups, ++ .capabilities = PERF_PMU_CAP_NO_EXCLUDE, ++ }; ++ ++ ret = perf_pmu_register(&ddr_pmu->pmu, name, -1); ++ if (ret) ++ dev_err(ddr_pmu->dev, "LRW DDR PMU register failed!\n"); ++ ++ return ret; ++} ++ ++static int lrw_ddr_pmu_remove(struct platform_device *pdev) ++{ ++ struct lrw_ddr_pmu *ddr_pmu = platform_get_drvdata(pdev); ++ ++ perf_pmu_unregister(&ddr_pmu->pmu); ++ ++ return 0; ++} ++ ++static struct platform_driver lrw_ddr_pmu_driver = { ++ .driver = { ++ .name = "lrw_ddr_pmu", ++ .acpi_match_table = lrw_ddr_acpi_match, ++ }, ++ .probe = lrw_ddr_pmu_probe, ++ .remove = lrw_ddr_pmu_remove, ++}; ++ ++static int lrw_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) ++{ ++ struct lrw_ddr_pmu *pmu = ++ hlist_entry_safe(node, struct lrw_ddr_pmu, node); ++ unsigned int target; ++ ++ if (cpu != pmu->cpu) ++ return 0; ++ ++ target = cpumask_any_but(cpu_online_mask, cpu); ++ if (target >= nr_cpu_ids) ++ return 0; ++ ++ perf_pmu_migrate_context(&pmu->pmu, cpu, target); ++ ++ pmu->cpu = target; ++ return 0; ++} ++ ++static int __init lrw_ddr_pmu_init(void) ++{ ++ int ret; ++ ++ ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lrw_ddr_pmu:online", ++ NULL, lrw_ddr_pmu_offline_cpu); ++ ++ if (ret < 0) { ++ pr_err("LRW DDR PMU: setup hotplug failed, ret = %d\n", ret); ++ return ret; ++ } ++ lrw_ddr_pmu_cpuhp_state_num = ret; ++ ++ ret = platform_driver_register(&lrw_ddr_pmu_driver); ++ if (ret) ++ cpuhp_remove_multi_state(lrw_ddr_pmu_cpuhp_state_num); ++ ++ return ret; ++} ++module_init(lrw_ddr_pmu_init); ++ ++static void __exit lrw_ddr_pmu_exit(void) ++{ ++ platform_driver_unregister(&lrw_ddr_pmu_driver); ++ cpuhp_remove_multi_state(lrw_ddr_pmu_cpuhp_state_num); ++} ++module_exit(lrw_ddr_pmu_exit); ++ ++MODULE_AUTHOR("Wenjia Guo"); ++MODULE_AUTHOR("Yong Ma"); ++MODULE_AUTHOR("Jie Feng"); ++MODULE_DESCRIPTION("LRW DDR PMU driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c +index b4efdddb2ad9..0390febba1ea 100644 +--- a/drivers/perf/riscv_pmu.c ++++ b/drivers/perf/riscv_pmu.c +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -258,6 +259,22 @@ void riscv_pmu_start(struct perf_event *event, int flags) + perf_event_update_userpage(event); + } + ++static void riscv_pmu_disable(struct pmu *pmu) ++{ ++ struct riscv_pmu *rvpmu = to_riscv_pmu(pmu); ++ ++ if (rvpmu->sse_evt) ++ sse_event_disable_local(rvpmu->sse_evt); ++} ++ ++static void riscv_pmu_enable(struct pmu *pmu) ++{ ++ struct riscv_pmu *rvpmu = to_riscv_pmu(pmu); ++ ++ if (rvpmu->sse_evt) ++ sse_event_enable_local(rvpmu->sse_evt); ++} ++ + static int riscv_pmu_add(struct perf_event *event, int flags) + { + struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); +@@ -414,6 +431,8 @@ struct riscv_pmu *riscv_pmu_alloc(void) + .event_mapped = riscv_pmu_event_mapped, + .event_unmapped = riscv_pmu_event_unmapped, + .event_idx = riscv_pmu_event_idx, ++ .pmu_enable = riscv_pmu_enable, ++ .pmu_disable = riscv_pmu_disable, + .add = riscv_pmu_add, + .del = riscv_pmu_del, + .start = riscv_pmu_start, diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c -index 901da688ea3f..685938868681 100644 +index 901da688ea3f..f4768999ca02 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c -@@ -19,10 +19,36 @@ +@@ -17,12 +17,39 @@ + #include + #include #include ++#include #include #include +#include @@ -488392,7 +500734,7 @@ index 901da688ea3f..685938868681 100644 #define SYSCTL_NO_USER_ACCESS 0 #define SYSCTL_USER_ACCESS 1 -@@ -61,6 +87,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS; +@@ -61,6 +88,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS; static union sbi_pmu_ctr_info *pmu_ctr_list; static bool riscv_pmu_use_irq; static unsigned int riscv_pmu_irq_num; @@ -488400,25 +500742,119 @@ index 901da688ea3f..685938868681 100644 static unsigned int riscv_pmu_irq; /* Cache the available counters in a bitmask */ -@@ -694,7 +721,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) +@@ -668,10 +696,10 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, + } + } + +-static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) ++static irqreturn_t pmu_sbi_ovf_handler(struct cpu_hw_events *cpu_hw_evt, ++ struct pt_regs *regs, bool from_sse) + { + struct perf_sample_data data; +- struct pt_regs *regs; + struct hw_perf_event *hw_evt; + union sbi_pmu_ctr_info *info; + int lidx, hidx, fidx; +@@ -679,7 +707,6 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + struct perf_event *event; + unsigned long overflow; + unsigned long overflowed_ctrs = 0; +- struct cpu_hw_events *cpu_hw_evt = dev; + u64 start_clock = sched_clock(); + + if (WARN_ON_ONCE(!cpu_hw_evt)) +@@ -688,13 +715,15 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + /* Firmware counter don't support overflow yet */ + fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); + if (fidx == RISCV_MAX_COUNTERS) { +- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); ++ if (!from_sse) ++ csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); + return IRQ_NONE; + } event = cpu_hw_evt->events[fidx]; if (!event) { - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); -+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); ++ if (!from_sse) ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); return IRQ_NONE; } -@@ -708,7 +735,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) +@@ -706,16 +735,16 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + + /* * Overflow interrupt pending bit should only be cleared after stopping - * all the counters to avoid any race condition. +- * all the counters to avoid any race condition. ++ * all the counters to avoid any race condition. When using SSE, ++ * interrupt is cleared when stopping counters. */ - csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); -+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); ++ if (!from_sse) ++ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask); /* No overflow bit is set */ if (!overflow) -@@ -780,8 +807,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) + return IRQ_NONE; + +- regs = get_irq_regs(); +- + for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { + struct perf_event *event = cpu_hw_evt->events[lidx]; + +@@ -761,6 +790,51 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) + return IRQ_HANDLED; + } + ++static irqreturn_t pmu_sbi_ovf_irq_handler(int irq, void *dev) ++{ ++ return pmu_sbi_ovf_handler(dev, get_irq_regs(), false); ++} ++ ++#ifdef CONFIG_RISCV_PMU_SSE ++static int pmu_sbi_ovf_sse_handler(u32 evt, void *arg, struct pt_regs *regs) ++{ ++ struct cpu_hw_events __percpu *hw_events = arg; ++ struct cpu_hw_events *hw_event = raw_cpu_ptr(hw_events); ++ ++ pmu_sbi_ovf_handler(hw_event, regs, true); ++ ++ return 0; ++} ++ ++static int pmu_sbi_setup_sse(struct riscv_pmu *pmu) ++{ ++ int ret; ++ struct sse_event *evt; ++ struct cpu_hw_events __percpu *hw_events = pmu->hw_events; ++ ++ evt = sse_event_register(SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, 0, ++ pmu_sbi_ovf_sse_handler, hw_events); ++ if (IS_ERR(evt)) ++ return PTR_ERR(evt); ++ ++ ret = sse_event_enable(evt); ++ if (ret) { ++ sse_event_unregister(evt); ++ return ret; ++ } ++ ++ pr_info("using SSE for PMU event delivery\n"); ++ pmu->sse_evt = evt; ++ ++ return ret; ++} ++#else ++static int pmu_sbi_setup_sse(struct riscv_pmu *pmu) ++{ ++ return -EOPNOTSUPP; ++} ++#endif ++ + static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) + { + struct riscv_pmu *pmu = hlist_entry_safe(node, struct riscv_pmu, node); +@@ -780,8 +854,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) if (riscv_pmu_use_irq) { cpu_hw_evt->irq = riscv_pmu_irq; @@ -488428,7 +500864,7 @@ index 901da688ea3f..685938868681 100644 enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); } -@@ -792,7 +818,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) +@@ -792,7 +865,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) { if (riscv_pmu_use_irq) { disable_percpu_irq(riscv_pmu_irq); @@ -488436,7 +500872,18 @@ index 901da688ea3f..685938868681 100644 } /* Disable all counters access for user mode now */ -@@ -816,8 +841,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde +@@ -807,6 +879,10 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde + struct cpu_hw_events __percpu *hw_events = pmu->hw_events; + struct irq_domain *domain = NULL; + ++ ret = pmu_sbi_setup_sse(pmu); ++ if (!ret) ++ return 0; ++ + if (riscv_isa_extension_available(NULL, SSCOFPMF)) { + riscv_pmu_irq_num = RV_IRQ_PMU; + riscv_pmu_use_irq = true; +@@ -816,8 +892,15 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde riscv_cached_mimpid(0) == 0) { riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU; riscv_pmu_use_irq = true; @@ -488452,6 +500899,15 @@ index 901da688ea3f..685938868681 100644 if (!riscv_pmu_use_irq) return -EOPNOTSUPP; +@@ -834,7 +917,7 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde + return -ENODEV; + } + +- ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events); ++ ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_irq_handler, "riscv-pmu", hw_events); + if (ret) { + pr_err("registering percpu irq failed [%d]\n", ret); + return ret; diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index e4502958fd62..50f729360df1 100644 --- a/drivers/phy/Kconfig @@ -489333,7 +501789,7 @@ index 000000000000..931c9d9124e9 +MODULE_DESCRIPTION("Synopsys DesignWare MIPI DPHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig -index 79753411b778..7dff1019211b 100644 +index 79753411b778..710248039ed3 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -22,7 +22,7 @@ config PINCONF @@ -489386,16 +501842,16 @@ index 79753411b778..7dff1019211b 100644 each pin. This driver can also be built as a module called pinctrl-mlxbf3. -+config PINCTRL_SPACEMIT_K1X -+ bool "Spacemit k1x pinctrl driver" -+ depends on SOC_SPACEMIT_K1X ++config PINCTRL_SPACEMIT_K1 ++ bool "Spacemit k1 pinctrl driver" ++ depends on SOC_SPACEMIT_K1 + depends on OF + depends on HAS_IOMEM + select GENERIC_PINCTRL_GROUPS + select GENERIC_PINMUX_FUNCTIONS + select GENERIC_PINCONF + help -+ This support pinctrl driver for Spacemit k1x SoC. ++ This support pinctrl driver for Spacemit k1 SoC. + source "drivers/pinctrl/actions/Kconfig" source "drivers/pinctrl/aspeed/Kconfig" @@ -489408,14 +501864,14 @@ index 79753411b778..7dff1019211b 100644 endif diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile -index 4275eca92488..6ad6de6a67ca 100644 +index 4275eca92488..bc5959808ee2 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -44,10 +44,13 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o -+obj-$(CONFIG_PINCTRL_SPACEMIT_K1X) += pinctrl-spacemit-k1x.o ++obj-$(CONFIG_PINCTRL_SPACEMIT_K1) += pinctrl-spacemit-k1.o +obj-$(CONFIG_PINCTRL_SPACEMIT_P1) += pinctrl-spacemit-p1.o obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o @@ -489436,11 +501892,11 @@ index 4275eca92488..6ad6de6a67ca 100644 obj-$(CONFIG_PINCTRL_VISCONTI) += visconti/ obj-$(CONFIG_ARCH_VT8500) += vt8500/ +obj-$(CONFIG_ARCH_ULTRARISC) += ultrarisc/ -diff --git a/drivers/pinctrl/pinctrl-spacemit-k1x.c b/drivers/pinctrl/pinctrl-spacemit-k1x.c +diff --git a/drivers/pinctrl/pinctrl-spacemit-k1.c b/drivers/pinctrl/pinctrl-spacemit-k1.c new file mode 100644 -index 000000000000..feed95179f33 +index 000000000000..59cfc2f74741 --- /dev/null -+++ b/drivers/pinctrl/pinctrl-spacemit-k1x.c ++++ b/drivers/pinctrl/pinctrl-spacemit-k1.c @@ -0,0 +1,2101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* @@ -491511,14 +503967,14 @@ index 000000000000..feed95179f33 + return 0; +} + -+static const struct pcs_soc_data pinctrl_spacemit_k1x = { ++static const struct pcs_soc_data pinctrl_spacemit_k1 = { + .flags = PCS_QUIRK_SHARED_IRQ | PCS_FEAT_PINCONF, + .irq_enable_mask = (1 << EDGE_CLEAR), /* WAKEUPENABLE */ + .irq_status_mask = (1 << EDGE_CLEAR), /* WAKEUPENABLE */ +}; + +static const struct of_device_id pcs_of_match[] = { -+ { .compatible = "pinctrl-spacemit-k1x", .data = &pinctrl_spacemit_k1x }, ++ { .compatible = "pinctrl-spacemit-k1", .data = &pinctrl_spacemit_k1 }, + { }, +}; +MODULE_DEVICE_TABLE(of, pcs_of_match); @@ -494248,10 +506704,10 @@ index 000000000000..78f03c450e66 +obj-$(CONFIG_PINCTRL_ULTRARISC_DP1000) += pinctrl-ultrarisc-dp1000.o diff --git a/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc-dp1000.c b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc-dp1000.c new file mode 100644 -index 000000000000..78970c154018 +index 000000000000..7db11142cfbe --- /dev/null +++ b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc-dp1000.c -@@ -0,0 +1,122 @@ +@@ -0,0 +1,123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* UltraRISC DP1000 pinctrl driver + * @@ -494340,6 +506796,7 @@ index 000000000000..78970c154018 + .pins = ur_dp1000_pins, + .npins = ARRAY_SIZE(ur_dp1000_pins), + .offset = 0x2c0, ++ .num_ports = 5, + .ports = { + {"A", 16, 0x2c0, 0x310}, + {"B", 8, 0x2c4, 0x318}, @@ -494376,10 +506833,10 @@ index 000000000000..78970c154018 +module_platform_driver(ur_pinctrl_driver); diff --git a/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c new file mode 100644 -index 000000000000..7caae71a0ae8 +index 000000000000..1a1d118de148 --- /dev/null +++ b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.c -@@ -0,0 +1,499 @@ +@@ -0,0 +1,566 @@ +// SPDX-License-Identifier: GPL-2.0 +/* UltraRISC pinctrl driver + * @@ -494705,6 +507162,58 @@ index 000000000000..7caae71a0ae8 + +#define UR_CONF_BIT_PER_PIN (4) +#define UR_CONF_PIN_PER_REG (32/UR_CONF_BIT_PER_PIN) ++ ++static int ur_pin_num_to_port_pin(const struct ur_pinctrl_match_data *match_data, ++ struct ur_pin_val *pin_val, u32 pin_num) ++{ ++ const struct ur_port_desc *port_desc; ++ ++ for (int i = 0; i < match_data->num_ports; i++) { ++ port_desc = &match_data->ports[i]; ++ if (pin_num < port_desc->npins) { ++ pin_val->port = i; ++ pin_val->pin = pin_num; ++ pin_val->conf = 0; ++ return 0; ++ } ++ pin_num -= port_desc->npins; ++ } ++ return -EINVAL; ++} ++ ++static int ur_config_to_pin_val(struct ur_pinctrl *pin_ctrl, ++ struct ur_pin_val *pin_vals, ++ unsigned long *config) ++{ ++ enum pin_config_param param = pinconf_to_config_param(*config); ++ u32 arg = pinconf_to_config_argument(*config); ++ ++ dev_dbg(pin_ctrl->dev, "%s(%d): config_to_pin_val: param=%d, arg=0x%x\n", ++ __func__, __LINE__, param, arg); ++ ++ switch (param) { ++ case PIN_CONFIG_BIAS_DISABLE: ++ pin_vals->conf &= ~UR_BIAS_MASK; ++ break; ++ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: ++ pin_vals->conf &= ~(UR_PULL_DOWN | UR_PULL_UP); ++ break; ++ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT: ++ case PIN_CONFIG_BIAS_PULL_DOWN: ++ pin_vals->conf |= UR_PULL_DOWN; ++ break; ++ case PIN_CONFIG_BIAS_PULL_UP: ++ pin_vals->conf |= UR_PULL_UP; ++ break; ++ case PIN_CONFIG_DRIVE_PUSH_PULL: ++ case PIN_CONFIG_PERSIST_STATE: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ return 0; ++} ++ +static int ur_set_pin_conf(struct ur_pinctrl *pin_ctrl, struct ur_pin_val *pin_vals) +{ + const struct ur_port_desc *port_desc; @@ -494713,8 +507222,11 @@ index 000000000000..7caae71a0ae8 + u32 val, conf; + + port_desc = &pin_ctrl->match_data->ports[pin_vals->port]; ++ dev_dbg(pin_ctrl->dev, "set pinconf port=%d pin=%d conf=0x%x\n", ++ pin_vals->port, pin_vals->pin, pin_vals->conf); + reg = pin_ctrl->base + port_desc->conf_offset; -+ dev_dbg(pin_ctrl->dev, "pinconf base=0x%llx, reg=0x%llx\n", (u64)pin_ctrl->base, (u64)reg); ++ dev_dbg(pin_ctrl->dev, "pinconf base=0x%llx, conf_offset=0x%x, reg=0x%llx\n", ++ (u64)pin_ctrl->base, port_desc->conf_offset, (u64)reg); + reg += (pin_vals->pin / UR_CONF_PIN_PER_REG) * UR_CONF_BIT_PER_PIN; + dev_dbg(pin_ctrl->dev, "pinconf pin=0x%llx\n", (u64)reg); + @@ -494746,16 +507258,28 @@ index 000000000000..7caae71a0ae8 + unsigned long *configs, + unsigned int num_configs) +{ -+ struct ur_pin_val *pin_conf; ++ struct ur_pin_val pin_val; + struct ur_pinctrl *ur_pinctrl = pinctrl_dev_get_drvdata(pctldev); ++ int ret; ++ ++ ret = ur_pin_num_to_port_pin(ur_pinctrl->match_data, &pin_val, pin); ++ if (ret < 0) { ++ dev_err(pctldev->dev, "invalid pin number %d\n", pin); ++ return ret; ++ } ++ dev_dbg(pctldev->dev, "%s(%d): pin=%d, num_configs=%d, port=%d, pin=%d\n", ++ __func__, __LINE__, pin, num_configs, pin_val.port, pin_val.pin); + -+ dev_dbg(pctldev->dev, "%s(%d): pin=%d, num_configs=%d\n", -+ __func__, __LINE__, pin, num_configs); -+ pin_conf = (struct ur_pin_val *)configs; + for (int i = 0; i < num_configs; i++) { -+ dev_dbg(pctldev->dev, "pinconf[%d], port=%d, pin=%d, conf=0x%x\n", -+ i, pin_conf[i].port, pin_conf[i].pin, pin_conf[i].conf); -+ ur_set_pin_conf(ur_pinctrl, &pin_conf[i]); ++ ret = ur_config_to_pin_val(ur_pinctrl, &pin_val, &configs[i]); ++ if (ret < 0) { ++ dev_err(pctldev->dev, "invalid config 0x%lx\n", configs[i]); ++ return ret; ++ } ++ ++ dev_dbg(pctldev->dev, "%s(%d): port=%d, pin=%d, conf=0x%x\n", ++ __func__, __LINE__, pin_val.port, pin_val.pin, pin_val.conf); ++ ur_set_pin_conf(ur_pinctrl, &pin_val); + } + return 0; +} @@ -494881,10 +507405,10 @@ index 000000000000..7caae71a0ae8 +} diff --git a/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h new file mode 100644 -index 000000000000..ff30ddca9564 +index 000000000000..621cf4dd9002 --- /dev/null +++ b/drivers/pinctrl/ultrarisc/pinctrl-ultrarisc.h -@@ -0,0 +1,77 @@ +@@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* UltraRISC pinctrl driver + * @@ -494937,6 +507461,7 @@ index 000000000000..ff30ddca9564 + const struct pinctrl_pin_desc *pins; + u32 npins; + u32 offset; ++ u32 num_ports; + struct ur_port_desc ports[]; +}; + @@ -496780,8 +509305,26 @@ index 000000000000..dea23e7d127e +MODULE_AUTHOR("linghui.zlh "); +MODULE_DESCRIPTION("XuanTie TH1520 Aon regulator virtual driver"); +MODULE_LICENSE("GPL"); +diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c +index 0c363ca566ff..748eeb966c06 100644 +--- a/drivers/remoteproc/remoteproc_core.c ++++ b/drivers/remoteproc/remoteproc_core.c +@@ -108,10 +108,10 @@ static int rproc_enable_iommu(struct rproc *rproc) + return 0; + } + +- domain = iommu_domain_alloc(dev->bus); +- if (!domain) { ++ domain = iommu_paging_domain_alloc(dev); ++ if (IS_ERR(domain)) { + dev_err(dev, "can't alloc iommu domain\n"); +- return -ENOMEM; ++ return PTR_ERR(domain); + } + + iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig -index ccd59ddd7610..663427a27d87 100644 +index ccd59ddd7610..8eff8840516c 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -253,6 +253,16 @@ config RESET_SUNXI @@ -496805,17 +509348,17 @@ index ccd59ddd7610..663427a27d87 100644 help This enables the reset controller driver for Xilinx Zynq SoCs. -+config RESET_K1X_SPACEMIT -+ tristate "Reset controller driver for Spacemit K1X SoCs" -+ depends on SOC_SPACEMIT_K1X ++config RESET_K1_SPACEMIT ++ tristate "Reset controller driver for Spacemit K1 SoC" ++ depends on SOC_SPACEMIT_K1 + help -+ Support for reset controllers on Spacemit K1X SoCs. ++ Support for reset controllers on Spacemit K1 SoC. + source "drivers/reset/starfive/Kconfig" source "drivers/reset/sti/Kconfig" source "drivers/reset/hisilicon/Kconfig" diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile -index 8270da8a4baa..4d61ad1991f2 100644 +index 8270da8a4baa..36dd9534c11c 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -33,6 +33,8 @@ obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o @@ -496831,7 +509374,7 @@ index 8270da8a4baa..4d61ad1991f2 100644 obj-$(CONFIG_RESET_UNIPHIER_GLUE) += reset-uniphier-glue.o obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o obj-$(CONFIG_ARCH_ZYNQMP) += reset-zynqmp.o -+obj-$(CONFIG_RESET_K1X_SPACEMIT) += reset-spacemit-k1x.o ++obj-$(CONFIG_RESET_K1_SPACEMIT) += reset-spacemit-k1.o diff --git a/drivers/reset/reset-sophgo.c b/drivers/reset/reset-sophgo.c new file mode 100644 index 000000000000..3c46a43e24ba @@ -497001,11 +509544,11 @@ index 000000000000..3c46a43e24ba +MODULE_AUTHOR("Wei Huang"); +MODULE_DESCRIPTION("Bitmain SoC Reset Controoler Driver"); +MODULE_LICENSE("GPL"); -diff --git a/drivers/reset/reset-spacemit-k1x.c b/drivers/reset/reset-spacemit-k1x.c +diff --git a/drivers/reset/reset-spacemit-k1.c b/drivers/reset/reset-spacemit-k1.c new file mode 100644 -index 000000000000..d2ea9c9f196c +index 000000000000..ac25f3249d00 --- /dev/null -+++ b/drivers/reset/reset-spacemit-k1x.c ++++ b/drivers/reset/reset-spacemit-k1.c @@ -0,0 +1,669 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* @@ -497022,7 +509565,7 @@ index 000000000000..d2ea9c9f196c +#include +#include +#include -+#include ++#include +#include +#include +#include @@ -497190,10 +509733,10 @@ index 000000000000..d2ea9c9f196c + +/* for register access protection */ +extern spinlock_t g_cru_lock; -+struct spacemit_reset k1x_reset_controller; ++struct spacemit_reset k1_reset_controller; + +static const struct spacemit_reset_signal -+ k1x_reset_signals[RESET_NUMBER] = { ++ k1_reset_signals[RESET_NUMBER] = { + [RESET_UART1] = { APBC_UART1_CLK_RST, BIT(2), 0, + BIT(2), RST_BASE_TYPE_APBC }, + [RESET_UART2] = { APBC_UART2_CLK_RST, BIT(2), 0, @@ -497584,9 +510127,9 @@ index 000000000000..d2ea9c9f196c + return spacemit_reset_update(rcdev, id, false); +} + -+static const struct spacemit_reset_variant k1x_reset_data = { -+ .signals = k1x_reset_signals, -+ .signals_num = ARRAY_SIZE(k1x_reset_signals), ++static const struct spacemit_reset_variant k1_reset_data = { ++ .signals = k1_reset_signals, ++ .signals_num = ARRAY_SIZE(k1_reset_signals), + .ops = { + .assert = spacemit_reset_assert, + .deassert = spacemit_reset_deassert, @@ -497597,8 +510140,8 @@ index 000000000000..d2ea9c9f196c +{ + struct spacemit_reset *reset; + -+ if (of_device_is_compatible(np, "spacemit,k1x-reset")) { -+ reset = &k1x_reset_controller; ++ if (of_device_is_compatible(np, "spacemit,k1-reset")) { ++ reset = &k1_reset_controller; + reset->mpmu_base = of_iomap(np, 0); + if (!reset->mpmu_base) { + pr_err("failed to map mpmu registers\n"); @@ -497659,22 +510202,22 @@ index 000000000000..d2ea9c9f196c + goto out; + } + } else { -+ pr_err("not spacemit,k1x-reset\n"); ++ pr_err("not spacemit,k1-reset\n"); + goto out; + } + + reset->lock = &g_cru_lock; -+ reset->signals = k1x_reset_data.signals; ++ reset->signals = k1_reset_data.signals; + reset->rcdev.owner = THIS_MODULE; -+ reset->rcdev.nr_resets = k1x_reset_data.signals_num; -+ reset->rcdev.ops = &k1x_reset_data.ops; ++ reset->rcdev.nr_resets = k1_reset_data.signals_num; ++ reset->rcdev.ops = &k1_reset_data.ops; + reset->rcdev.of_node = np; + reset_controller_register(&reset->rcdev); +out: + return; +} + -+CLK_OF_DECLARE(k1x_reset, "spacemit,k1x-reset", spacemit_reset_init); ++CLK_OF_DECLARE(k1_reset, "spacemit,k1-reset", spacemit_reset_init); + diff --git a/drivers/reset/reset-th1520.c b/drivers/reset/reset-th1520.c new file mode 100644 @@ -500276,6 +512819,23 @@ index 0706a27d13be..e58b1adf9e0b 100644 obj-y += xilinx/ +obj-y += xuantie/ +obj-$(CONFIG_SOC_SPACEMIT) += spacemit/ +diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c +index e23b60618c1a..456ef5d5c199 100644 +--- a/drivers/soc/fsl/qbman/qman_portal.c ++++ b/drivers/soc/fsl/qbman/qman_portal.c +@@ -48,9 +48,10 @@ static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu) + struct device *dev = pcfg->dev; + int ret; + +- pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type); +- if (!pcfg->iommu_domain) { ++ pcfg->iommu_domain = iommu_paging_domain_alloc(dev); ++ if (IS_ERR(pcfg->iommu_domain)) { + dev_err(dev, "%s(): iommu_domain_alloc() failed", __func__); ++ pcfg->iommu_domain = NULL; + goto no_iommu; + } + ret = fsl_pamu_configure_l1_stash(pcfg->iommu_domain, cpu); diff --git a/drivers/soc/sophgo/Makefile b/drivers/soc/sophgo/Makefile new file mode 100644 index 000000000000..1e143d85aa17 @@ -502683,13 +515243,14 @@ index 000000000000..ca442d313d86 +Public License instead of this License. diff --git a/drivers/soc/xuantie/nna/Kconfig b/drivers/soc/xuantie/nna/Kconfig new file mode 100644 -index 000000000000..d9abb52c6a21 +index 000000000000..704340ba02de --- /dev/null +++ b/drivers/soc/xuantie/nna/Kconfig -@@ -0,0 +1,64 @@ +@@ -0,0 +1,65 @@ +menuconfig VHA + tristate "IMG neural network accelerator" + default n ++ select SYNC_FILE + +if VHA +choice @@ -607224,7 +619785,7 @@ index 000000000000..bf30d17ce373 +/* This part must be outside protection */ +#include diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig -index e3217ce5a3f6..cce525a1bb28 100644 +index e3217ce5a3f6..06343050e33e 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -295,6 +295,12 @@ config SPI_DAVINCI @@ -607246,14 +619807,14 @@ index e3217ce5a3f6..cce525a1bb28 100644 +config SPI_SPACEMIT_K1 + tristate "Spacemit K1 SPI Controller Platform Driver Support" -+ depends on SOC_SPACEMIT_K1X ++ depends on SOC_SPACEMIT_K1 + help + This enables support for the SPI master controller in the Spacemit + k1 SOC. + +config SPI_SPACEMIT_K1_QSPI + tristate "Spacemit K1 QuadSPI Controller Platform Driver Support" -+ depends on SOC_SPACEMIT_K1X && SPI_MEM ++ depends on SOC_SPACEMIT_K1 && SPI_MEM + help + This enables support for the Spacemit K1 QuadSPI controller in master mode. + This controller does only support the high-level SPI memory interface @@ -612101,8 +624662,115 @@ index 31f53fa77e4a..2f3571f17ecd 100644 } -console_initcall(hvc_sbi_console_init); +device_initcall(hvc_sbi_init); +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c +index c536028e92dc..59b350f0a28c 100644 +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -825,6 +825,65 @@ void serial8250_resume_port(int line) + } + EXPORT_SYMBOL(serial8250_resume_port); + ++/* ++ * Generic 16550A platform devices ++ */ ++static int serial8250_platform_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct uart_8250_port uart = { 0 }; ++ struct resource *regs; ++ unsigned char iotype; ++ int ret, line; ++ ++ regs = platform_get_resource(pdev, IORESOURCE_IO, 0); ++ if (regs) { ++ uart.port.iobase = regs->start; ++ iotype = UPIO_PORT; ++ } else { ++ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!regs) { ++ dev_err(dev, "no registers defined\n"); ++ return -EINVAL; ++ } ++ ++ uart.port.mapbase = regs->start; ++ uart.port.mapsize = resource_size(regs); ++ uart.port.flags = UPF_IOREMAP; ++ iotype = UPIO_MEM; ++ } ++ ++ /* Default clock frequency*/ ++ uart.port.uartclk = 1843200; ++ uart.port.type = PORT_16550A; ++ uart.port.dev = &pdev->dev; ++ uart.port.flags |= UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; ++ ret = uart_read_and_validate_port_properties(&uart.port); ++ /* no interrupt -> fall back to polling */ ++ if (ret == -ENXIO) ++ ret = 0; ++ if (ret) ++ return ret; ++ ++ if (uart.port.mapbase) { ++ uart.port.membase = devm_ioremap(dev, uart.port.mapbase, uart.port.mapsize); ++ if (!uart.port.membase) ++ return -ENOMEM; ++ } ++ ++ /* ++ * The previous call may not set iotype correctly when reg-io-width ++ * property is absent and it doesn't support IO port resource. ++ */ ++ uart.port.iotype = iotype; ++ ++ line = serial8250_register_8250_port(&uart); ++ if (line < 0) ++ return -ENODEV; ++ ++ return 0; ++} ++ + /* + * Register a set of serial devices attached to a platform device. The + * list is terminated with a zero flags entry, which means we expect +@@ -835,7 +894,15 @@ static int serial8250_probe(struct platform_device *dev) + struct plat_serial8250_port *p = dev_get_platdata(&dev->dev); + struct uart_8250_port uart; + int ret, i, irqflag = 0; ++ struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); + ++ /* ++ * Probe platform UART devices defined using standard hardware ++ * discovery mechanism like ACPI or DT. Support only ACPI based ++ * serial device for now. ++ */ ++ if (!p && is_acpi_node(fwnode)) ++ return serial8250_platform_probe(dev); + memset(&uart, 0, sizeof(uart)); + + if (share_irqs) +@@ -924,6 +991,12 @@ static int serial8250_resume(struct platform_device *dev) + return 0; + } + ++static const struct acpi_device_id acpi_platform_serial_table[] = { ++ { "RSCV0003", 0 }, // RISC-V Generic 16550A UART ++ { } ++}; ++MODULE_DEVICE_TABLE(acpi, acpi_platform_serial_table); ++ + static struct platform_driver serial8250_isa_driver = { + .probe = serial8250_probe, + .remove = serial8250_remove, +@@ -931,6 +1004,7 @@ static struct platform_driver serial8250_isa_driver = { + .resume = serial8250_resume, + .driver = { + .name = "serial8250", ++ .acpi_match_table = ACPI_PTR(acpi_platform_serial_table), + }, + }; + diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c -index 7f23037813bc..eaa808e16861 100644 +index 62492cf10bc9..6ca8ee2caede 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -221,66 +221,68 @@ int serial8250_request_dma(struct uart_8250_port *p) @@ -612635,11 +625303,366 @@ index f13e91f2cace..7dd2a8e7b780 100644 static inline u32 dw8250_readl_ext(struct uart_port *p, int offset) { if (p->iotype == UPIO_MEM32BE) +diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c +index 51329625c48a..1b07d89c4398 100644 +--- a/drivers/tty/serial/8250/8250_of.c ++++ b/drivers/tty/serial/8250/8250_of.c +@@ -4,7 +4,10 @@ + * + * Copyright (C) 2006 Arnd Bergmann , IBM Corp. + */ ++ ++#include + #include ++#include + #include + #include + #include +@@ -15,16 +18,69 @@ + #include + #include + #include ++#include + + #include "8250.h" + + struct of_serial_info { + struct clk *clk; ++ struct clk *bus_clk; + struct reset_control *rst; + int type; + int line; ++ struct notifier_block clk_notifier; + }; + ++/* Nuvoton NPCM timeout register */ ++#define UART_NPCM_TOR 7 ++#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */ ++ ++static int npcm_startup(struct uart_port *port) ++{ ++ /* ++ * Nuvoton calls the scratch register 'UART_TOR' (timeout ++ * register). Enable it, and set TIOC (timeout interrupt ++ * comparator) to be 0x20 for correct operation. ++ */ ++ serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20); ++ ++ return serial8250_do_startup(port); ++} ++ ++/* Nuvoton NPCM UARTs have a custom divisor calculation */ ++static unsigned int npcm_get_divisor(struct uart_port *port, unsigned int baud, ++ unsigned int *frac) ++{ ++ return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2; ++} ++ ++static int npcm_setup(struct uart_port *port) ++{ ++ port->get_divisor = npcm_get_divisor; ++ port->startup = npcm_startup; ++ return 0; ++} ++ ++static inline struct of_serial_info *clk_nb_to_info(struct notifier_block *nb) ++{ ++ return container_of(nb, struct of_serial_info, clk_notifier); ++} ++ ++static int of_platform_serial_clk_notifier_cb(struct notifier_block *nb, unsigned long event, ++ void *data) ++{ ++ struct of_serial_info *info = clk_nb_to_info(nb); ++ struct uart_8250_port *port8250 = serial8250_get_port(info->line); ++ struct clk_notifier_data *ndata = data; ++ ++ if (event == POST_RATE_CHANGE) { ++ serial8250_update_uartclk(&port8250->port, ndata->new_rate); ++ return NOTIFY_OK; ++ } ++ ++ return NOTIFY_DONE; ++} ++ + /* + * Fill a struct uart_port for a given device node + */ +@@ -33,137 +89,78 @@ static int of_platform_serial_setup(struct platform_device *ofdev, + struct of_serial_info *info) + { + struct resource resource; +- struct device_node *np = ofdev->dev.of_node; ++ struct device *dev = &ofdev->dev; ++ struct device_node *np = dev->of_node; + struct uart_port *port = &up->port; +- u32 clk, spd, prop; +- int ret, irq; ++ u32 spd; ++ int ret; + + memset(port, 0, sizeof *port); + + pm_runtime_enable(&ofdev->dev); + pm_runtime_get_sync(&ofdev->dev); + +- if (of_property_read_u32(np, "clock-frequency", &clk)) { +- +- /* Get clk rate through clk driver if present */ +- info->clk = devm_clk_get(&ofdev->dev, NULL); +- if (IS_ERR(info->clk)) { +- ret = PTR_ERR(info->clk); +- if (ret != -EPROBE_DEFER) +- dev_warn(&ofdev->dev, +- "failed to get clock: %d\n", ret); +- goto err_pmruntime; +- } +- +- ret = clk_prepare_enable(info->clk); +- if (ret < 0) +- goto err_pmruntime; +- +- clk = clk_get_rate(info->clk); +- } +- /* If current-speed was set, then try not to change it. */ +- if (of_property_read_u32(np, "current-speed", &spd) == 0) +- port->custom_divisor = clk / (16 * spd); +- + ret = of_address_to_resource(np, 0, &resource); + if (ret) { +- dev_warn(&ofdev->dev, "invalid address\n"); +- goto err_unprepare; ++ dev_err_probe(dev, ret, "invalid address\n"); ++ goto err_pmruntime; + } + +- port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | +- UPF_FIXED_TYPE; ++ port->dev = &ofdev->dev; ++ port->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE; + spin_lock_init(&port->lock); + + if (resource_type(&resource) == IORESOURCE_IO) { +- port->iotype = UPIO_PORT; + port->iobase = resource.start; + } else { + port->mapbase = resource.start; + port->mapsize = resource_size(&resource); +- +- /* Check for shifted address mapping */ +- if (of_property_read_u32(np, "reg-offset", &prop) == 0) { +- if (prop >= port->mapsize) { +- dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n", +- prop, &port->mapsize); +- ret = -EINVAL; +- goto err_unprepare; +- } +- +- port->mapbase += prop; +- port->mapsize -= prop; +- } +- +- port->iotype = UPIO_MEM; +- if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { +- switch (prop) { +- case 1: +- port->iotype = UPIO_MEM; +- break; +- case 2: +- port->iotype = UPIO_MEM16; +- break; +- case 4: +- port->iotype = of_device_is_big_endian(np) ? +- UPIO_MEM32BE : UPIO_MEM32; +- break; +- default: +- dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n", +- prop); +- ret = -EINVAL; +- goto err_unprepare; +- } +- } + port->flags |= UPF_IOREMAP; + } + +- /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */ +- if (of_device_is_compatible(np, "mrvl,mmp-uart")) +- port->regshift = 2; +- +- /* Check for registers offset within the devices address range */ +- if (of_property_read_u32(np, "reg-shift", &prop) == 0) +- port->regshift = prop; ++ ret = uart_read_and_validate_port_properties(port); ++ if (ret) ++ goto err_pmruntime; + +- /* Check for fifo size */ +- if (of_property_read_u32(np, "fifo-size", &prop) == 0) +- port->fifosize = prop; ++ /* Get clk rate through clk driver if present */ ++ if (!port->uartclk) { ++ struct clk *bus_clk; + +- /* Check for a fixed line number */ +- ret = of_alias_get_id(np, "serial"); +- if (ret >= 0) +- port->line = ret; ++ bus_clk = devm_clk_get_optional_enabled(dev, "bus"); ++ if (IS_ERR(bus_clk)) { ++ ret = dev_err_probe(dev, PTR_ERR(bus_clk), "failed to get bus clock\n"); ++ goto err_pmruntime; ++ } + +- irq = of_irq_get(np, 0); +- if (irq < 0) { +- if (irq == -EPROBE_DEFER) { +- ret = -EPROBE_DEFER; +- goto err_unprepare; ++ /* If the bus clock is required, core clock must be named */ ++ info->clk = devm_clk_get_enabled(dev, bus_clk ? "core" : NULL); ++ if (IS_ERR(info->clk)) { ++ ret = dev_err_probe(dev, PTR_ERR(info->clk), "failed to get clock\n"); ++ goto err_pmruntime; + } +- /* IRQ support not mandatory */ +- irq = 0; ++ ++ info->bus_clk = bus_clk; ++ port->uartclk = clk_get_rate(info->clk); + } ++ /* If current-speed was set, then try not to change it. */ ++ if (of_property_read_u32(np, "current-speed", &spd) == 0) ++ port->custom_divisor = port->uartclk / (16 * spd); + +- port->irq = irq; ++ /* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */ ++ if (of_device_is_compatible(np, "mrvl,mmp-uart")) ++ port->regshift = 2; + + info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL); + if (IS_ERR(info->rst)) { + ret = PTR_ERR(info->rst); +- goto err_unprepare; ++ goto err_pmruntime; + } + + ret = reset_control_deassert(info->rst); + if (ret) +- goto err_unprepare; ++ goto err_pmruntime; + + port->type = type; +- port->uartclk = clk; +- +- if (of_property_read_bool(np, "no-loopback-test")) +- port->flags |= UPF_SKIP_TEST; +- +- port->dev = &ofdev->dev; + port->rs485_config = serial8250_em485_config; + port->rs485_supported = serial8250_em485_supported; + up->rs485_start_tx = serial8250_em485_start_tx; +@@ -172,10 +169,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev, + switch (type) { + case PORT_RT2880: + ret = rt288x_setup(port); +- if (ret) +- goto err_unprepare; ++ break; ++ case PORT_NPCM: ++ ret = npcm_setup(port); ++ break; ++ default: ++ /* Nothing to do */ ++ ret = 0; + break; + } ++ if (ret) ++ goto err_pmruntime; + + if (IS_REACHABLE(CONFIG_SERIAL_8250_FSL) && + (of_device_is_compatible(np, "fsl,ns16550") || +@@ -185,8 +189,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev, + } + + return 0; +-err_unprepare: +- clk_disable_unprepare(info->clk); + err_pmruntime: + pm_runtime_put_sync(&ofdev->dev); + pm_runtime_disable(&ofdev->dev); +@@ -248,12 +250,22 @@ static int of_platform_serial_probe(struct platform_device *ofdev) + info->type = port_type; + info->line = ret; + platform_set_drvdata(ofdev, info); ++ ++ if (info->clk) { ++ info->clk_notifier.notifier_call = of_platform_serial_clk_notifier_cb; ++ ret = clk_notifier_register(info->clk, &info->clk_notifier); ++ if (ret) { ++ dev_err_probe(port8250.port.dev, ret, "Failed to set the clock notifier\n"); ++ goto err_unregister; ++ } ++ } ++ + return 0; ++err_unregister: ++ serial8250_unregister_port(info->line); + err_dispose: +- irq_dispose_mapping(port8250.port.irq); + pm_runtime_put_sync(&ofdev->dev); + pm_runtime_disable(&ofdev->dev); +- clk_disable_unprepare(info->clk); + err_free: + kfree(info); + return ret; +@@ -266,12 +278,14 @@ static int of_platform_serial_remove(struct platform_device *ofdev) + { + struct of_serial_info *info = platform_get_drvdata(ofdev); + ++ if (info->clk) ++ clk_notifier_unregister(info->clk, &info->clk_notifier); ++ + serial8250_unregister_port(info->line); + + reset_control_assert(info->rst); + pm_runtime_put_sync(&ofdev->dev); + pm_runtime_disable(&ofdev->dev); +- clk_disable_unprepare(info->clk); + kfree(info); + return 0; + } +@@ -288,6 +302,7 @@ static int of_serial_suspend(struct device *dev) + if (!uart_console(port) || console_suspend_enabled) { + pm_runtime_put_sync(dev); + clk_disable_unprepare(info->clk); ++ clk_disable_unprepare(info->bus_clk); + } + return 0; + } +@@ -300,6 +315,7 @@ static int of_serial_resume(struct device *dev) + + if (!uart_console(port) || console_suspend_enabled) { + pm_runtime_get_sync(dev); ++ clk_prepare_enable(info->bus_clk); + clk_prepare_enable(info->clk); + } + diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c -index 23aed9e89e30..45729185fc3c 100644 +index 23aed9e89e30..ca1b8463eba9 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -1535,7 +1535,7 @@ static inline void __start_tx(struct uart_port *port) +@@ -38,10 +38,6 @@ + + #include "8250.h" + +-/* Nuvoton NPCM timeout register */ +-#define UART_NPCM_TOR 7 +-#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */ +- + /* + * Debugging. + */ +@@ -1535,7 +1531,7 @@ static inline void __start_tx(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); @@ -612648,7 +625671,7 @@ index 23aed9e89e30..45729185fc3c 100644 return; if (serial8250_set_THRI(up)) { -@@ -1935,7 +1935,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) +@@ -1935,7 +1931,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir) d = irq_get_irq_data(port->irq); if (d && irqd_is_wakeup_set(d)) pm_wakeup_event(tport->tty->dev, 0); @@ -612657,7 +625680,23 @@ index 23aed9e89e30..45729185fc3c 100644 status = serial8250_rx_chars(up, status); } serial8250_modem_status(up); -@@ -2450,6 +2450,14 @@ int serial8250_do_startup(struct uart_port *port) +@@ -2238,15 +2234,6 @@ int serial8250_do_startup(struct uart_port *port) + UART_DA830_PWREMU_MGMT_FREE); + } + +- if (port->type == PORT_NPCM) { +- /* +- * Nuvoton calls the scratch register 'UART_TOR' (timeout +- * register). Enable it, and set TIOC (timeout interrupt +- * comparator) to be 0x20 for correct operation. +- */ +- serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20); +- } +- + #ifdef CONFIG_SERIAL_8250_RSA + /* + * If this is an RSA port, see if we can kick it up to the +@@ -2450,6 +2437,14 @@ int serial8250_do_startup(struct uart_port *port) dev_warn_ratelimited(port->dev, "%s\n", msg); up->dma = NULL; } @@ -612672,8 +625711,33 @@ index 23aed9e89e30..45729185fc3c 100644 } /* +@@ -2547,15 +2542,6 @@ static void serial8250_shutdown(struct uart_port *port) + serial8250_do_shutdown(port); + } + +-/* Nuvoton NPCM UARTs have a custom divisor calculation */ +-static unsigned int npcm_get_divisor(struct uart_8250_port *up, +- unsigned int baud) +-{ +- struct uart_port *port = &up->port; +- +- return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2; +-} +- + static void serial8250_flush_buffer(struct uart_port *port) + { + struct uart_8250_port *up = up_to_u8250p(port); +@@ -2608,8 +2594,6 @@ static unsigned int serial8250_do_get_divisor(struct uart_port *port, + quot = 0x8001; + else if (magic_multiplier && baud >= port->uartclk / 12) + quot = 0x8002; +- else if (up->port.type == PORT_NPCM) +- quot = npcm_get_divisor(up, baud); + else + quot = uart_get_divisor(port, baud); + diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig -index bdc568a4ab66..2df72d126498 100644 +index bdc568a4ab66..81819eb04585 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -87,7 +87,7 @@ config SERIAL_EARLYCON_SEMIHOST @@ -612685,51 +625749,58 @@ index bdc568a4ab66..2df72d126498 100644 select SERIAL_CORE select SERIAL_CORE_CONSOLE select SERIAL_EARLYCON -@@ -411,7 +411,8 @@ config SERIAL_PXA - depends on ARCH_PXA || ARCH_MMP - select SERIAL_CORE - select SERIAL_8250_PXA if SERIAL_8250=y -- select SERIAL_PXA_NON8250 if !SERIAL_8250=y -+ select SERIAL_PXA_NON8250 if !SERIAL_8250=y && !SOC_SPACEMIT_K1X -+ select SERIAL_PXA_K1X if !SERIAL_8250=y && SOC_SPACEMIT_K1X - help - If you have a machine based on an Intel XScale PXA2xx CPU you - can enable its onboard serial ports by enabling this option. -@@ -442,6 +443,22 @@ config SERIAL_PXA_CONSOLE - Unless you have a specific need, you should use SERIAL_8250_PXA - and SERIAL_8250_CONSOLE instead of this. - -+config SERIAL_SPACEMIT_K1X -+ bool "Spacemit k1x serial port support" -+ depends on SOC_SPACEMIT_K1X -+ depends on SERIAL_CORE +@@ -1578,6 +1578,39 @@ config SERIAL_NUVOTON_MA35D1_CONSOLE + but you can alter that using a kernel command line option such as + "console=ttyNVTx". + ++config SERIAL_LRW_UART ++ tristate "LRW UART support" ++ select SERIAL_CORE + help -+ If you have a machine based on Spacemit k1x soc, -+ can enable its onboard serial port by enabling this option. ++ This option enables support for the LRW Universal Asynchronous ++ Receiver/Transmitter (UART) serial controller. ++ ++ Select this option if you are building a kernel for a device that ++ contains a LRW UART IP block. ++ ++ This driver can be built as a module; if so, the module will be ++ called lrw_uart. + -+config SERIAL_SPACEMIT_K1X_CONSOLE -+ bool "Console on spacemit k1x serial port" -+ depends on SERIAL_SPACEMIT_K1X -+ depends on SERIAL_CORE_CONSOLE ++ If you are using a system with an LRW UART controller, say Y or M here. ++ If unsure, say N. ++ ++config SERIAL_LRW_UART_CONSOLE ++ bool "Console on LRW UART" ++ depends on SERIAL_LRW_UART=y ++ select SERIAL_CORE_CONSOLE ++ select SERIAL_EARLYCON + help -+ If you have enabled the serial port on the Spacemit k1 chip, -+ you can make it the console by answering Y to this option. ++ Say Y here if you wish to use an LRW UART as the system ++ console (the system console is the device which receives all kernel ++ messages and warnings and which allows logins in single user mode). + - config SERIAL_SA1100 - bool "SA1100 serial port support" - depends on ARCH_SA1100 ++ Even if you say Y here, the currently visible framebuffer console ++ (/dev/tty0) will still be used as the system console by default, but ++ you can alter that using a kernel command line option such as ++ "console=ttyLRW0". (Try "man bootparam" or see the documentation of ++ your boot loader (lilo or loadlin) about how to pass options to the ++ kernel at boot time.) ++ + endmenu + + config SERIAL_MCTRL_GPIO diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile -index 138abbc89738..0feb268753fb 100644 +index 138abbc89738..6146cc3faf39 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile -@@ -28,6 +28,7 @@ obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o - obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o - obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o - obj-$(CONFIG_SERIAL_PXA_NON8250) += pxa.o -+obj-$(CONFIG_SERIAL_SPACEMIT_K1X) += spacemit_k1x_uart.o - obj-$(CONFIG_SERIAL_SA1100) += sa1100.o - obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o - obj-$(CONFIG_SERIAL_SAMSUNG) += samsung_tty.o +@@ -88,6 +88,7 @@ obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o + obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o + obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o + obj-$(CONFIG_SERIAL_SUNPLUS) += sunplus-uart.o ++obj-$(CONFIG_SERIAL_LRW_UART) += lrw_uart.o + + # GPIOLIB helpers for modem control lines + obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c index 27afb0b74ea7..0162155f0c83 100644 --- a/drivers/tty/serial/earlycon-riscv-sbi.c @@ -612776,2157 +625847,3017 @@ index 27afb0b74ea7..0162155f0c83 100644 return 0; } EARLYCON_DECLARE(sbi, early_sbi_setup); -diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c -index ed3953bd0407..469ad26cde48 100644 ---- a/drivers/tty/serial/serial_port.c -+++ b/drivers/tty/serial/serial_port.c -@@ -8,7 +8,10 @@ - - #include - #include -+#include -+#include - #include -+#include - #include - #include - -@@ -146,6 +149,148 @@ void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) - } - EXPORT_SYMBOL(uart_remove_one_port); - -+/** -+ * __uart_read_properties - read firmware properties of the given UART port -+ * @port: corresponding port -+ * @use_defaults: apply defaults (when %true) or validate the values (when %false) -+ * -+ * The following device properties are supported: -+ * - clock-frequency (optional) -+ * - fifo-size (optional) -+ * - no-loopback-test (optional) -+ * - reg-shift (defaults may apply) -+ * - reg-offset (value may be validated) -+ * - reg-io-width (defaults may apply or value may be validated) -+ * - interrupts (OF only) -+ * - serial [alias ID] (OF only) -+ * -+ * If the port->dev is of struct platform_device type the interrupt line -+ * will be retrieved via platform_get_irq() call against that device. -+ * Otherwise it will be assigned by fwnode_irq_get() call. In both cases -+ * the index 0 of the resource is used. -+ * -+ * The caller is responsible to initialize the following fields of the @port -+ * ->dev (must be valid) -+ * ->flags -+ * ->mapbase -+ * ->mapsize -+ * ->regshift (if @use_defaults is false) -+ * before calling this function. Alternatively the above mentioned fields -+ * may be zeroed, in such case the only ones, that have associated properties -+ * found, will be set to the respective values. -+ * -+ * If no error happened, the ->irq, ->mapbase, ->mapsize will be altered. -+ * The ->iotype is always altered. -+ * -+ * When @use_defaults is true and the respective property is not found -+ * the following values will be applied: -+ * ->regshift = 0 -+ * In this case IRQ must be provided, otherwise an error will be returned. -+ * -+ * When @use_defaults is false and the respective property is found -+ * the following values will be validated: -+ * - reg-io-width (->iotype) -+ * - reg-offset (->mapsize against ->mapbase) -+ * -+ * Returns: 0 on success or negative errno on failure -+ */ -+static int __uart_read_properties(struct uart_port *port, bool use_defaults) -+{ -+ struct device *dev = port->dev; -+ u32 value; -+ int ret; -+ -+ /* Read optional UART functional clock frequency */ -+ device_property_read_u32(dev, "clock-frequency", &port->uartclk); -+ -+ /* Read the registers alignment (default: 8-bit) */ -+ ret = device_property_read_u32(dev, "reg-shift", &value); -+ if (ret) -+ port->regshift = use_defaults ? 0 : port->regshift; -+ else -+ port->regshift = value; -+ -+ /* Read the registers I/O access type (default: MMIO 8-bit) */ -+ ret = device_property_read_u32(dev, "reg-io-width", &value); -+ if (ret) { -+ port->iotype = UPIO_MEM; -+ } else { -+ switch (value) { -+ case 1: -+ port->iotype = UPIO_MEM; -+ break; -+ case 2: -+ port->iotype = UPIO_MEM16; -+ break; -+ case 4: -+ port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32; -+ break; -+ default: -+ if (!use_defaults) { -+ dev_err(dev, "Unsupported reg-io-width (%u)\n", value); -+ return -EINVAL; -+ } -+ port->iotype = UPIO_UNKNOWN; -+ break; -+ } -+ } -+ -+ /* Read the address mapping base offset (default: no offset) */ -+ ret = device_property_read_u32(dev, "reg-offset", &value); -+ if (ret) -+ value = 0; -+ -+ /* Check for shifted address mapping overflow */ -+ if (!use_defaults && port->mapsize < value) { -+ dev_err(dev, "reg-offset %u exceeds region size %pa\n", value, &port->mapsize); -+ return -EINVAL; -+ } -+ -+ port->mapbase += value; -+ port->mapsize -= value; -+ -+ /* Read optional FIFO size */ -+ device_property_read_u32(dev, "fifo-size", &port->fifosize); -+ -+ if (device_property_read_bool(dev, "no-loopback-test")) -+ port->flags |= UPF_SKIP_TEST; -+ -+ /* Get index of serial line, if found in DT aliases */ -+ ret = of_alias_get_id(dev_of_node(dev), "serial"); -+ if (ret >= 0) -+ port->line = ret; -+ -+ if (dev_is_platform(dev)) -+ ret = platform_get_irq(to_platform_device(dev), 0); -+ else -+ ret = fwnode_irq_get(dev_fwnode(dev), 0); -+ if (ret == -EPROBE_DEFER) -+ return ret; -+ if (ret > 0) -+ port->irq = ret; -+ else if (use_defaults) -+ /* By default IRQ support is mandatory */ -+ return ret; -+ else -+ port->irq = 0; -+ -+ port->flags |= UPF_SHARE_IRQ; -+ -+ return 0; -+} -+ -+int uart_read_port_properties(struct uart_port *port) -+{ -+ return __uart_read_properties(port, true); -+} -+EXPORT_SYMBOL_GPL(uart_read_port_properties); -+ -+int uart_read_and_validate_port_properties(struct uart_port *port) -+{ -+ return __uart_read_properties(port, false); -+} -+EXPORT_SYMBOL_GPL(uart_read_and_validate_port_properties); -+ - static struct device_driver serial_port_driver = { - .name = "port", - .suppress_bind_attrs = true, -diff --git a/drivers/tty/serial/spacemit_k1x_uart.c b/drivers/tty/serial/spacemit_k1x_uart.c +diff --git a/drivers/tty/serial/lrw_uart.c b/drivers/tty/serial/lrw_uart.c new file mode 100644 -index 000000000000..6fa51bc4be80 +index 000000000000..491ef782a2a1 --- /dev/null -+++ b/drivers/tty/serial/spacemit_k1x_uart.c -@@ -0,0 +1,1979 @@ ++++ b/drivers/tty/serial/lrw_uart.c +@@ -0,0 +1,2839 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* -+ * Based on drivers/serial/8250.c by Russell King. -+ * -+ * Author: Nicolas Pitre -+ * Created: Feb 20, 2003 -+ * Copyright: (C) 2003 Monta Vista Software, Inc. -+ * Copyright: (C) 2023 Spacemit Co., Ltd. -+ * Note 1: This driver is made separate from the already too overloaded -+ * 8250.c because it needs some kirks of its own and that'll make it -+ * easier to add DMA support. ++ * Serial Port driver for LRW + * -+ * Note 2: I'm too sick of device allocation policies for serial ports. -+ * If someone else wants to request an "official" allocation of major/minor -+ * for this driver please be my guest. And don't forget that new hardware -+ * to come from Intel might have more than 3 or 4 of those UARTs. Let's -+ * hope for a better port registration and dynamic device allocation scheme -+ * with the serial core maintainer satisfaction to appear soon. ++ * Copyright (c) 2025, LRW CORPORATION. All rights reserved. + */ + ++#include +#include +#include +#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include +#include ++#include ++#include +#include +#include +#include ++#include ++#include ++#include +#include -+#include -+#include +#include -+#include -+#include -+#include +#include -+#include -+#include -+#include -+#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + -+#define DMA_BLOCK UART_XMIT_SIZE -+#define DMA_BURST_SIZE (8) -+#define DMA_FIFO_THRESHOLD (32) -+#define DMA_RX_BLOCK_SIZE DMA_BLOCK -+#define DMA_BUF_POLLING_SWITCH (1) -+ -+#define PXA_UART_TX (0) -+#define PXA_UART_RX (1) -+ -+#define NUM_UART_PORTS (10) -+#define BT_UART_PORT (2) -+ -+#define UART_FCR_PXA_BUS32 (0x20) -+#define UART_FCR_PXA_TRAIL (0x10) -+#define UART_FOR (9) -+ -+#define PXA_NAME_LEN (8) -+ -+#define SUPPORT_POWER_QOS (1) -+ -+#define TX_DMA_RUNNING BIT(0) -+#define RX_DMA_RUNNING BIT(1) -+ -+#define PXA_TIMER_TIMEOUT (3 * HZ) -+#define BLOCK_SUSPEND_TIMEOUT (3000) -+ -+struct uart_pxa_dma { -+ unsigned int dma_status; -+ struct dma_chan *txdma_chan; -+ struct dma_chan *rxdma_chan; -+ struct dma_async_tx_descriptor *rx_desc; -+ struct dma_async_tx_descriptor *tx_desc; -+ void *txdma_addr; -+ void *rxdma_addr; -+ dma_addr_t txdma_addr_phys; -+ dma_addr_t rxdma_addr_phys; -+ int tx_stop; -+ int rx_stop; -+ dma_cookie_t rx_cookie; -+ dma_cookie_t tx_cookie; -+ int tx_size; -+ struct tasklet_struct tklet; ++#define UART_NR 14 ++ ++#define ISR_PASS_LIMIT 256 ++ ++#define LRW_UART_NAME "lrw-uart" ++ ++#define LRW_UART_TTY_PREFIX "ttyLRW" ++ ++/* LRW_UART_TX_FIFO_DEPTH: depth of the TX FIFO (in bytes) */ ++#define LRW_UART_TX_FIFO_DEPTH 16 ++ ++/* LRW_UART_RX_FIFO_DEPTH: depth of the RX FIFO (in bytes) */ ++#define LRW_UART_RX_FIFO_DEPTH 16 ++ ++/* LRW UART register offsets */ ++#define UARTDR 0x00 /* Data register */ ++#define UARTRSR 0x04 /* Receive status register */ ++#define UARTECR 0x04 /* Error clear register */ ++#define UARTSC 0x08 /* Special character register */ ++#define UARTMDR 0x0C /* RS485 Muti-drop register */ ++#define UARTTAT 0x10 /* RS485 turn-around time register */ ++#define UARTFCR 0x14 /* FIFO control register */ ++#define UARTFR 0x18 /* Flag register */ ++#define UARTIND 0x1C /* Integer baud rate register */ ++#define UARTFD 0x20 /* Fractional baud rate register */ ++#define UARTBSR 0x24 /* Baud sample rate register */ ++#define UARTFRCR 0x28 /* Frame control register */ ++#define UARTMCFG 0x2C /* config register */ ++#define UARTMCR 0x30 /* Modem control register */ ++#define UARTIRCR 0x34 /* IrDA mode control register */ ++#define UARTIMSC 0x38 /* Interrupt mask set/clear register */ ++#define UARTRIS 0x3C /* Raw interrupt status register */ ++#define UARTMIS 0x40 /* Masked interrupt states register */ ++#define UARTICR 0x44 /* Interrupt clear register */ ++#define UARTFCCR 0x48 /* Flow control register */ ++#define UARTRVS 0x58 /* Version register */ ++ ++#define UARTDR_OE BIT(11) ++#define UARTDR_BE BIT(10) ++#define UARTDR_PE BIT(9) ++#define UARTDR_FE BIT(8) ++ ++#define UARTRSR_OE BIT(3) ++#define UARTRSR_BE BIT(2) ++#define UARTRSR_PE BIT(1) ++#define UARTRSR_FE BIT(0) ++ ++#define UARTFCR_RXFTRS GENMASK(7, 5) ++#define UARTFCR_RXFTRS_RX1_8 FIELD_PREP_CONST(UARTFCR_RXFTRS, 0) ++#define UARTFCR_RXFTRS_RX2_8 FIELD_PREP_CONST(UARTFCR_RXFTRS, 1) ++#define UARTFCR_RXFTRS_RX4_8 FIELD_PREP_CONST(UARTFCR_RXFTRS, 2) ++#define UARTFCR_RXFTRS_RX6_8 FIELD_PREP_CONST(UARTFCR_RXFTRS, 3) ++#define UARTFCR_RXFTRS_RX7_8 FIELD_PREP_CONST(UARTFCR_RXFTRS, 4) ++#define UARTFCR_TXFTRS GENMASK(4, 2) ++#define UARTFCR_TXFTRS_TX1_8 FIELD_PREP_CONST(UARTFCR_TXFTRS, 0) ++#define UARTFCR_TXFTRS_TX2_8 FIELD_PREP_CONST(UARTFCR_TXFTRS, 1) ++#define UARTFCR_TXFTRS_TX4_8 FIELD_PREP_CONST(UARTFCR_TXFTRS, 2) ++#define UARTFCR_TXFTRS_TX6_8 FIELD_PREP_CONST(UARTFCR_TXFTRS, 3) ++#define UARTFCR_TXFTRS_TX7_8 FIELD_PREP_CONST(UARTFCR_TXFTRS, 4) ++#define UARTFCR_FEN BIT(0) ++ ++#define UARTFR_RI BIT(8) ++#define UARTFR_TXFE BIT(7) ++#define UARTFR_RXFF BIT(6) ++#define UARTFR_TXFF (1 << 5) /* used in ASM */ ++#define UARTFR_RXFE BIT(4) ++#define UARTFR_BUSY (1 << 3) /* used in ASM */ ++#define UARTFR_DCD BIT(2) ++#define UARTFR_DSR BIT(1) ++#define UARTFR_CTS BIT(0) ++#define UARTFR_TMSK (UARTFR_TXFF + UARTFR_BUSY) ++ ++#define UARTFRCR_STP2 BIT(5) ++#define UARTFRCR_SPS BIT(4) ++#define UARTFRCR_EOP BIT(3) ++#define UARTFRCR_PEN BIT(2) ++#define UARTFRCR_WLEN_8 0x3 ++#define UARTFRCR_WLEN_7 0x2 ++#define UARTFRCR_WLEN_6 0x1 ++#define UARTFRCR_WLEN_5 0x0 ++ ++#define UARTMCFG_LBE BIT(7) /* loopback enable */ ++#define UARTMCFG_RXE BIT(3) /* receive enable */ ++#define UARTMCFG_TXE BIT(2) /* transmit enable */ ++#define UARTMCFG_BRK BIT(1) /* send break */ ++#define UARTMCFG_UARTEN BIT(0) /* UART enable */ ++ ++#define UARTMCR_OUT2 BIT(3) /* OUT2 */ ++#define UARTMCR_OUT1 BIT(2) /* OUT1 */ ++#define UARTMCR_RTS BIT(1) /* RTS */ ++#define UARTMCR_DTR BIT(0) /* DTR */ ++ ++#define UARTIMSC_OEIM BIT(10) /* overrun error interrupt mask */ ++#define UARTIMSC_BEIM BIT(9) /* break error interrupt mask */ ++#define UARTIMSC_PEIM BIT(8) /* parity error interrupt mask */ ++#define UARTIMSC_FEIM BIT(7) /* framing error interrupt mask */ ++#define UARTIMSC_RTIM BIT(6) /* receive timeout interrupt mask */ ++#define UARTIMSC_TXIM BIT(5) /* transmit interrupt mask */ ++#define UARTIMSC_RXIM BIT(4) /* receive interrupt mask */ ++#define UARTIMSC_DSRMIM BIT(3) /* DSR interrupt mask */ ++#define UARTIMSC_DCDMIM BIT(2) /* DCD interrupt mask */ ++#define UARTIMSC_CTSMIM BIT(1) /* CTS interrupt mask */ ++#define UARTIMSC_RIMIM BIT(0) /* RI interrupt mask */ ++ ++#define UARTICR_OEIC BIT(10) /* overrun error interrupt clear */ ++#define UARTICR_BEIC BIT(9) /* break error interrupt clear */ ++#define UARTICR_PEIC BIT(8) /* parity error interrupt clear */ ++#define UARTICR_FEIC BIT(7) /* framing error interrupt clear */ ++#define UARTICR_RTIC BIT(6) /* receive timeout interrupt clear */ ++#define UARTICR_TXIC BIT(5) /* transmit interrupt clear */ ++#define UARTICR_RXIC BIT(4) /* receive interrupt clear */ ++#define UARTICR_DSRMIC BIT(3) /* DSR interrupt clear */ ++#define UARTICR_DCDMIC BIT(2) /* DCD interrupt clear */ ++#define UARTICR_CTSMIC BIT(1) /* CTS interrupt clear */ ++#define UARTICR_RIMIC BIT(0) /* RI interrupt clear */ ++ ++#define UARTFCCR_CTSEN BIT(5) /* CTS hardware flow control */ ++#define UARTFCCR_RTSEN BIT(4) /* RTS hardware flow control */ ++#define UARTFCCR_DMAONERR BIT(2) /* disable dma on error */ ++#define UARTFCCR_TXDMAE BIT(1) /* enable transmit dma */ ++#define UARTFCCR_RXDMAE BIT(0) /* enable receive dma */ ++ ++#define UARTRSR_ANY (UARTRSR_OE | UARTRSR_BE | UARTRSR_PE | UARTRSR_FE) ++#define UARTFR_MODEM_ANY (UARTFR_DCD | UARTFR_DSR | UARTFR_CTS) ++ ++#define UART_DR_ERROR (UARTDR_OE | UARTDR_BE | UARTDR_PE | UARTDR_FE) ++#define UART_DUMMY_DR_RX BIT(16) + -+#ifdef CONFIG_PM -+ void *tx_buf_save; -+ int tx_saved_len; -+#endif ++enum { ++ REG_DR, ++ REG_FCR, ++ REG_FR, ++ REG_IND, ++ REG_FD, ++ REG_BSR, ++ REG_FRCR, ++ REG_MCFG, ++ REG_MCR, ++ REG_IMSC, ++ REG_RIS, ++ REG_MIS, ++ REG_ICR, ++ REG_FCCR, ++ ++ /* The size of the array - must be last */ ++ REG_ARRAY_SIZE, ++}; ++ ++static u16 lrw_uart_std_offsets[REG_ARRAY_SIZE] = { ++ [REG_DR] = UARTDR, ++ [REG_FCR] = UARTFCR, ++ [REG_FR] = UARTFR, ++ [REG_IND] = UARTIND, ++ [REG_FD] = UARTFD, ++ [REG_BSR] = UARTBSR, ++ [REG_FRCR] = UARTFRCR, ++ [REG_MCFG] = UARTMCFG, ++ [REG_MCR] = UARTMCR, ++ [REG_IMSC] = UARTIMSC, ++ [REG_RIS] = UARTRIS, ++ [REG_MIS] = UARTMIS, ++ [REG_ICR] = UARTICR, ++ [REG_FCCR] = UARTFCCR, ++}; ++ ++/* There is by now at least one vendor with differing details, so handle it */ ++struct vendor_data { ++ const u16 *reg_offset; ++ unsigned int fcr; ++ unsigned int fr_busy; ++ unsigned int fr_dsr; ++ unsigned int fr_cts; ++ unsigned int fr_ri; ++ unsigned int inv_fr; ++ bool access_32b; ++ bool oversampling; ++ bool dma_threshold; ++ bool cts_event_workaround; ++ bool always_enabled; ++ bool fixed_options; ++}; ++ ++static struct vendor_data vendor_lrw = { ++ .reg_offset = lrw_uart_std_offsets, ++ .fcr = UARTFCR_RXFTRS_RX4_8 | UARTFCR_TXFTRS_TX4_8 | UARTFCR_FEN, ++ .fr_busy = UARTFR_BUSY, ++ .fr_dsr = UARTFR_DSR, ++ .fr_cts = UARTFR_CTS, ++ .fr_ri = UARTFR_RI, ++ .access_32b = true, ++ .oversampling = false, ++ .dma_threshold = false, ++ .cts_event_workaround = false, ++ .always_enabled = true, ++ .fixed_options = true, ++}; ++ ++/* Deals with DMA transactions */ ++ ++struct lrw_uart_dmabuf { ++ dma_addr_t dma; ++ size_t len; ++ char *buf; ++}; + -+ bool dma_init; ++struct lrw_uart_dmarx_data { ++ struct dma_chan *chan; ++ struct completion complete; ++ bool use_buf_b; ++ struct lrw_uart_dmabuf dbuf_a; ++ struct lrw_uart_dmabuf dbuf_b; ++ dma_cookie_t cookie; ++ bool running; ++ struct timer_list timer; ++ unsigned int last_residue; ++ unsigned long last_jiffies; ++ bool auto_poll_rate; ++ unsigned int poll_rate; ++ unsigned int poll_timeout; ++}; ++ ++struct lrw_uart_dmatx_data { ++ struct dma_chan *chan; ++ dma_addr_t dma; ++ size_t len; ++ char *buf; ++ bool queued; ++}; + -+#if (DMA_BUF_POLLING_SWITCH == 1) -+ int dma_poll_timeout; -+ int dma_poll_max_time; -+#endif ++struct lrw_uart_data { ++ bool (*dma_filter)(struct dma_chan *chan, void *filter_param); ++ void *dma_rx_param; ++ void *dma_tx_param; ++ bool dma_rx_poll_enable; ++ unsigned int dma_rx_poll_rate; ++ unsigned int dma_rx_poll_timeout; ++ void (*init)(void); ++ void (*exit)(void); +}; + -+struct uart_pxa_port { ++/* ++ * We wrap our port structure around the generic uart_port. ++ */ ++struct lrw_uart_port { + struct uart_port port; -+ unsigned char ier; -+ unsigned char lcr; -+ unsigned char mcr; -+ unsigned int lsr_break_flag; -+ struct clk *fclk; -+ struct clk *gclk; -+ struct reset_control *resets; -+ char name[PXA_NAME_LEN]; -+ -+ struct timer_list pxa_timer; -+ int edge_wakeup_gpio; -+ struct work_struct uart_tx_lpm_work; -+ int dma_enable; -+ struct uart_pxa_dma uart_dma; -+ unsigned long flags; -+ unsigned int cons_udelay; -+ bool from_resume; -+ bool device_ctrl_rts; -+ bool in_resume; -+ unsigned int current_baud; -+}; -+ -+static void pxa_uart_transmit_dma_cb(void *data); -+static void pxa_uart_receive_dma_cb(void *data); -+static void pxa_uart_transmit_dma_start(struct uart_pxa_port *up, int count); -+static void pxa_uart_receive_dma_start(struct uart_pxa_port *up); -+static inline void wait_for_xmitr(struct uart_pxa_port *up); -+static unsigned int serial_pxa_tx_empty(struct uart_port *port); -+#ifdef CONFIG_PM -+static void _pxa_timer_handler(struct uart_pxa_port *up); ++ const u16 *reg_offset; ++ struct clk *clk; ++ const struct vendor_data *vendor; ++ unsigned int im; /* interrupt mask */ ++ unsigned int old_status; ++ unsigned int fifosize; /* vendor-specific */ ++ unsigned int fixed_baud; /* vendor-set fixed baud rate */ ++ char type[12]; ++ bool rs485_tx_started; ++ unsigned int rs485_tx_drain_interval; /* usecs */ ++#ifdef CONFIG_DMA_ENGINE ++ /* DMA stuff */ ++ unsigned int dmacr; /* dma control reg */ ++ bool using_tx_dma; ++ bool using_rx_dma; ++ struct lrw_uart_dmarx_data dmarx; ++ struct lrw_uart_dmatx_data dmatx; ++ bool dma_probed; +#endif ++}; + -+static inline void stop_dma(struct uart_pxa_port *up, int read) -+{ -+ unsigned long flags; -+ struct uart_pxa_dma *pxa_dma = &up->uart_dma; -+ struct dma_chan *channel; -+ -+ if (!pxa_dma->dma_init) -+ return; -+ -+ channel = read ? pxa_dma->rxdma_chan : pxa_dma->txdma_chan; -+ -+ dmaengine_terminate_all(channel); -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (read) -+ pxa_dma->dma_status &= ~RX_DMA_RUNNING; -+ else -+ pxa_dma->dma_status &= ~TX_DMA_RUNNING; -+ spin_unlock_irqrestore(&up->port.lock, flags); -+} ++static unsigned int lrw_uart_tx_empty(struct uart_port *port); + -+static inline unsigned int serial_in(struct uart_pxa_port *up, int offset) ++static unsigned int lrw_uart_reg_to_offset(const struct lrw_uart_port *sup, ++ unsigned int reg) +{ -+ offset <<= 2; -+ return readl(up->port.membase + offset); ++ return sup->reg_offset[reg]; +} + -+static inline void serial_out(struct uart_pxa_port *up, int offset, int value) ++static unsigned int lrw_uart_read(const struct lrw_uart_port *sup, ++ unsigned int reg) +{ -+ offset <<= 2; -+ writel(value, up->port.membase + offset); ++ void __iomem *addr = sup->port.membase + lrw_uart_reg_to_offset(sup, reg); ++ ++ return (sup->port.iotype == UPIO_MEM32) ? ++ readl_relaxed(addr) : readw_relaxed(addr); +} + -+static void serial_pxa_enable_ms(struct uart_port *port) ++static void lrw_uart_write(unsigned int val, const struct lrw_uart_port *sup, ++ unsigned int reg) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ -+ if (up->dma_enable) -+ return; ++ void __iomem *addr = sup->port.membase + lrw_uart_reg_to_offset(sup, reg); + -+ up->ier |= UART_IER_MSI; -+ serial_out(up, UART_IER, up->ier); ++ if (sup->port.iotype == UPIO_MEM32) ++ writel_relaxed(val, addr); ++ else ++ writew_relaxed(val, addr); +} + -+static void serial_pxa_stop_tx(struct uart_port *port) ++/* ++ * Reads up to 256 characters from the FIFO or until it's empty and ++ * inserts them into the TTY layer. Returns the number of characters ++ * read from the FIFO. ++ */ ++static int lrw_uart_fifo_to_tty(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned int timeout = 0x100000 / up->cons_udelay; -+ unsigned long flags; ++ unsigned int ch, fifotaken; ++ int sysrq; ++ u16 status; ++ u8 flag; + -+ if (up->dma_enable) { -+ up->uart_dma.tx_stop = 1; ++ for (fifotaken = 0; fifotaken != 256; fifotaken++) { ++ status = lrw_uart_read(sup, REG_FR); ++ if (status & UARTFR_RXFE) ++ break; + -+ if (up->ier & UART_IER_DMAE && up->uart_dma.dma_init) { -+ while (dma_async_is_tx_complete(up->uart_dma.txdma_chan, -+ up->uart_dma.tx_cookie, NULL, NULL) -+ != DMA_COMPLETE && (timeout-- > 0)) { -+ spin_unlock(&up->port.lock); -+ local_irq_save(flags); -+ local_irq_enable(); -+ udelay(up->cons_udelay); -+ local_irq_disable(); -+ local_irq_restore(flags); -+ spin_lock(&up->port.lock); ++ /* Take chars from the FIFO and update status */ ++ ch = lrw_uart_read(sup, REG_DR) | UART_DUMMY_DR_RX; ++ flag = TTY_NORMAL; ++ sup->port.icount.rx++; ++ ++ if (unlikely(ch & UART_DR_ERROR)) { ++ if (ch & UARTDR_BE) { ++ ch &= ~(UARTDR_FE | UARTDR_PE); ++ sup->port.icount.brk++; ++ if (uart_handle_break(&sup->port)) ++ continue; ++ } else if (ch & UARTDR_PE) { ++ sup->port.icount.parity++; ++ } else if (ch & UARTDR_FE) { ++ sup->port.icount.frame++; + } ++ if (ch & UARTDR_OE) ++ sup->port.icount.overrun++; + -+ WARN_ON_ONCE(timeout == 0); -+ } -+ } else { -+ if (up->ier & UART_IER_THRI) { -+ up->ier &= ~UART_IER_THRI; -+ serial_out(up, UART_IER, up->ier); ++ ch &= sup->port.read_status_mask; ++ ++ if (ch & UARTDR_BE) ++ flag = TTY_BREAK; ++ else if (ch & UARTDR_PE) ++ flag = TTY_PARITY; ++ else if (ch & UARTDR_FE) ++ flag = TTY_FRAME; + } ++ ++ uart_port_unlock(&sup->port); ++ sysrq = uart_prepare_sysrq_char(&sup->port, ch & 255); ++ uart_port_lock(&sup->port); ++ ++ if (!sysrq) ++ uart_insert_char(&sup->port, ch, UARTDR_OE, ch, flag); + } ++ ++ return fifotaken; +} + -+static void serial_pxa_stop_rx(struct uart_port *port) ++/* ++ * All the DMA operation mode stuff goes inside this ifdef. ++ * This assumes that you have a generic DMA device interface, ++ * no custom DMA interfaces are supported. ++ */ ++#ifdef CONFIG_DMA_ENGINE ++ ++#define LRW_UART_DMA_BUFFER_SIZE PAGE_SIZE ++ ++static int lrw_uart_dmabuf_init(struct dma_chan *chan, struct lrw_uart_dmabuf *db, ++ enum dma_data_direction dir) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ db->buf = dma_alloc_coherent(chan->device->dev, LRW_UART_DMA_BUFFER_SIZE, ++ &db->dma, GFP_KERNEL); ++ if (!db->buf) ++ return -ENOMEM; ++ db->len = LRW_UART_DMA_BUFFER_SIZE; + -+ if (up->dma_enable) { -+ if (up->ier & UART_IER_DMAE) { -+ spin_unlock_irqrestore(&up->port.lock, up->flags); -+ stop_dma(up, PXA_UART_RX); -+ spin_lock_irqsave(&up->port.lock, up->flags); -+ } -+ up->uart_dma.rx_stop = 1; -+ } else { -+ up->ier &= ~UART_IER_RLSI; -+ up->port.read_status_mask &= ~UART_LSR_DR; -+ serial_out(up, UART_IER, up->ier); ++ return 0; ++} ++ ++static void lrw_uart_dmabuf_free(struct dma_chan *chan, struct lrw_uart_dmabuf *db, ++ enum dma_data_direction dir) ++{ ++ if (db->buf) { ++ dma_free_coherent(chan->device->dev, ++ LRW_UART_DMA_BUFFER_SIZE, db->buf, db->dma); + } +} + -+static inline void receive_chars(struct uart_pxa_port *up, int *status) ++static void lrw_uart_dma_probe(struct lrw_uart_port *sup) +{ -+ unsigned int ch, flag; -+ int max_count = 256; ++ /* DMA is the sole user of the platform data right now */ ++ struct lrw_uart_data *plat = dev_get_platdata(sup->port.dev); ++ struct device *dev = sup->port.dev; ++ struct dma_slave_config tx_conf = { ++ .dst_addr = sup->port.mapbase + ++ lrw_uart_reg_to_offset(sup, REG_DR), ++ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, ++ .direction = DMA_MEM_TO_DEV, ++ .dst_maxburst = sup->fifosize >> 1, ++ .device_fc = false, ++ }; ++ struct dma_chan *chan; ++ dma_cap_mask_t mask; + -+ do { -+ spin_lock_irqsave(&up->port.lock, up->flags); -+ up->ier &= ~UART_IER_RTOIE; -+ serial_out(up, UART_IER, up->ier); -+ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ sup->dma_probed = true; ++ chan = dma_request_chan(dev, "tx"); ++ if (IS_ERR(chan)) { ++ if (PTR_ERR(chan) == -EPROBE_DEFER) { ++ sup->dma_probed = false; ++ return; ++ } + -+ ch = serial_in(up, UART_RX); -+ flag = TTY_NORMAL; -+ up->port.icount.rx++; ++ /* We need platform data */ ++ if (!plat || !plat->dma_filter) { ++ dev_dbg(sup->port.dev, "no DMA platform data\n"); ++ return; ++ } + -+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | -+ UART_LSR_FE | UART_LSR_OE))) { -+ if (*status & UART_LSR_BI) { -+ *status &= ~(UART_LSR_FE | UART_LSR_PE); -+ up->port.icount.brk++; -+ if (uart_handle_break(&up->port)) -+ goto ignore_char; -+ } else if (*status & UART_LSR_PE) { -+ up->port.icount.parity++; -+ } else if (*status & UART_LSR_FE) { -+ up->port.icount.frame++; -+ } ++ /* Try to acquire a generic DMA engine slave TX channel */ ++ dma_cap_zero(mask); ++ dma_cap_set(DMA_SLAVE, mask); ++ ++ chan = dma_request_channel(mask, plat->dma_filter, ++ plat->dma_tx_param); ++ if (!chan) { ++ dev_err(sup->port.dev, "no TX DMA channel!\n"); ++ return; ++ } ++ } ++ ++ dmaengine_slave_config(chan, &tx_conf); ++ sup->dmatx.chan = chan; + -+ if (*status & UART_LSR_OE) -+ up->port.icount.overrun++; ++ dev_info(sup->port.dev, "DMA channel TX %s\n", ++ dma_chan_name(sup->dmatx.chan)); ++ ++ /* Optionally make use of an RX channel as well */ ++ chan = dma_request_slave_channel(dev, "rx"); ++ ++ if (IS_ERR(chan) && plat && plat->dma_rx_param) { ++ chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); ++ ++ if (!chan) { ++ dev_err(sup->port.dev, "no RX DMA channel!\n"); ++ return; ++ } ++ } + -+ *status &= up->port.read_status_mask; ++ if (!IS_ERR(chan)) { ++ struct dma_slave_config rx_conf = { ++ .src_addr = sup->port.mapbase + ++ lrw_uart_reg_to_offset(sup, REG_DR), ++ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE, ++ .direction = DMA_DEV_TO_MEM, ++ .src_maxburst = sup->fifosize >> 2, ++ .device_fc = false, ++ }; ++ struct dma_slave_caps caps; + -+#ifdef CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE -+ if (up->port.line == up->port.cons->index) { -+ *status |= up->lsr_break_flag; -+ up->lsr_break_flag = 0; ++ /* ++ * Some DMA controllers provide information on their capabilities. ++ * If the controller does, check for suitable residue processing ++ * otherwise assime all is well. ++ */ ++ if (dma_get_slave_caps(chan, &caps) == 0) { ++ if (caps.residue_granularity == ++ DMA_RESIDUE_GRANULARITY_DESCRIPTOR) { ++ dma_release_channel(chan); ++ dev_info(sup->port.dev, ++ "RX DMA disabled - no residue processing\n"); ++ return; + } -+#endif -+ if (*status & UART_LSR_BI) -+ flag = TTY_BREAK; -+ else if (*status & UART_LSR_PE) -+ flag = TTY_PARITY; -+ else if (*status & UART_LSR_FE) -+ flag = TTY_FRAME; + } -+ if (!uart_handle_sysrq_char(&up->port, ch)) -+ uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag); ++ dmaengine_slave_config(chan, &rx_conf); ++ sup->dmarx.chan = chan; + -+ignore_char: -+ *status = serial_in(up, UART_LSR); -+ } while ((*status & UART_LSR_DR) && (max_count-- > 0)); -+ tty_flip_buffer_push(&up->port.state->port); ++ sup->dmarx.auto_poll_rate = false; ++ if (plat && plat->dma_rx_poll_enable) { ++ /* Set poll rate if specified. */ ++ if (plat->dma_rx_poll_rate) { ++ sup->dmarx.auto_poll_rate = false; ++ sup->dmarx.poll_rate = plat->dma_rx_poll_rate; ++ } else { ++ /* ++ * 100 ms defaults to poll rate if not ++ * specified. This will be adjusted with ++ * the baud rate at set_termios. ++ */ ++ sup->dmarx.auto_poll_rate = true; ++ sup->dmarx.poll_rate = 100; ++ } ++ /* 3 secs defaults poll_timeout if not specified. */ ++ if (plat->dma_rx_poll_timeout) ++ sup->dmarx.poll_timeout = ++ plat->dma_rx_poll_timeout; ++ else ++ sup->dmarx.poll_timeout = 3000; ++ } else if (!plat && dev->of_node) { ++ sup->dmarx.auto_poll_rate = ++ of_property_read_bool(dev->of_node, "auto-poll"); ++ if (sup->dmarx.auto_poll_rate) { ++ u32 x; ++ ++ if (of_property_read_u32(dev->of_node, "poll-rate-ms", &x) == 0) ++ sup->dmarx.poll_rate = x; ++ else ++ sup->dmarx.poll_rate = 100; ++ if (of_property_read_u32(dev->of_node, "poll-timeout-ms", &x) == 0) ++ sup->dmarx.poll_timeout = x; ++ else ++ sup->dmarx.poll_timeout = 3000; ++ } ++ } ++ dev_info(sup->port.dev, "DMA channel RX %s\n", ++ dma_chan_name(sup->dmarx.chan)); ++ } ++} + -+ spin_lock_irqsave(&up->port.lock, up->flags); -+ up->ier |= UART_IER_RTOIE; -+ serial_out(up, UART_IER, up->ier); -+ spin_unlock_irqrestore(&up->port.lock, up->flags); ++static void lrw_uart_dma_remove(struct lrw_uart_port *sup) ++{ ++ if (sup->dmatx.chan) ++ dma_release_channel(sup->dmatx.chan); ++ if (sup->dmarx.chan) ++ dma_release_channel(sup->dmarx.chan); +} + -+static void transmit_chars(struct uart_pxa_port *up) ++/* Forward declare these for the refill routine */ ++static int lrw_uart_dma_tx_refill(struct lrw_uart_port *sup); ++static void lrw_uart_start_tx_pio(struct lrw_uart_port *sup); ++ ++/* ++ * The current DMA TX buffer has been sent. ++ * Try to queue up another DMA buffer. ++ */ ++static void lrw_uart_dma_tx_callback(void *data) +{ -+ struct circ_buf *xmit = &up->port.state->xmit; -+ int count; ++ struct lrw_uart_port *sup = data; ++ struct lrw_uart_dmatx_data *dmatx = &sup->dmatx; ++ unsigned long flags; ++ u16 dmacr; ++ ++ uart_port_lock_irqsave(&sup->port, &flags); ++ if (sup->dmatx.queued) ++ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, ++ dmatx->len, DMA_TO_DEVICE); + -+ if (up->port.x_char) { -+ serial_out(up, UART_TX, up->port.x_char); -+ up->port.icount.tx++; -+ up->port.x_char = 0; ++ dmacr = sup->dmacr; ++ sup->dmacr = dmacr & ~UARTFCCR_TXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ ++ /* ++ * If TX DMA was disabled, it means that we've stopped the DMA for ++ * some reason (eg, XOFF received, or we want to send an X-char.) ++ * ++ * Note: we need to be careful here of a potential race between DMA ++ * and the rest of the driver - if the driver disables TX DMA while ++ * a TX buffer completing, we must update the tx queued status to ++ * get further refills (hence we check dmacr). ++ */ ++ if (!(dmacr & UARTFCCR_TXDMAE) || uart_tx_stopped(&sup->port) || ++ uart_circ_empty(&sup->port.state->xmit)) { ++ sup->dmatx.queued = false; ++ uart_port_unlock_irqrestore(&sup->port, flags); + return; + } -+ if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { -+ spin_lock_irqsave(&up->port.lock, up->flags); -+ serial_pxa_stop_tx(&up->port); -+ spin_unlock_irqrestore(&up->port.lock, up->flags); -+ return; ++ ++ if (lrw_uart_dma_tx_refill(sup) <= 0) ++ /* ++ * We didn't queue a DMA buffer for some reason, but we ++ * have data pending to be sent. Re-enable the TX IRQ. ++ */ ++ lrw_uart_start_tx_pio(sup); ++ ++ uart_port_unlock_irqrestore(&sup->port, flags); ++} ++ ++/* ++ * Try to refill the TX DMA buffer. ++ * Locking: called with port lock held and IRQs disabled. ++ * Returns: ++ * 1 if we queued up a TX DMA buffer. ++ * 0 if we didn't want to handle this by DMA ++ * <0 on error ++ */ ++static int lrw_uart_dma_tx_refill(struct lrw_uart_port *sup) ++{ ++ struct lrw_uart_dmatx_data *dmatx = &sup->dmatx; ++ struct dma_chan *chan = dmatx->chan; ++ struct dma_device *dma_dev = chan->device; ++ struct dma_async_tx_descriptor *desc; ++ struct circ_buf *xmit = &sup->port.state->xmit; ++ unsigned int count; ++ ++ /* ++ * Try to avoid the overhead involved in using DMA if the ++ * transaction fits in the first half of the FIFO, by using ++ * the standard interrupt handling. This ensures that we ++ * issue a uart_write_wakeup() at the appropriate time. ++ */ ++ count = uart_circ_chars_pending(xmit); ++ if (count < (sup->fifosize >> 1)) { ++ sup->dmatx.queued = false; ++ return 0; + } + -+ count = up->port.fifosize / 2; -+ do { -+ serial_out(up, UART_TX, xmit->buf[xmit->tail]); -+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); -+ up->port.icount.tx++; -+ if (uart_circ_empty(xmit)) -+ break; -+ } while (--count > 0); ++ /* ++ * Bodge: don't send the last character by DMA, as this ++ * will prevent XON from notifying us to restart DMA. ++ */ ++ count -= 1; + -+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) -+ uart_write_wakeup(&up->port); ++ /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ ++ if (count > LRW_UART_DMA_BUFFER_SIZE) ++ count = LRW_UART_DMA_BUFFER_SIZE; + -+ if (uart_circ_empty(xmit)) { -+ spin_lock_irqsave(&up->port.lock, up->flags); -+ serial_pxa_stop_tx(&up->port); -+ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ if (xmit->tail < xmit->head) ++ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); ++ else { ++ size_t first = UART_XMIT_SIZE - xmit->tail; ++ size_t second; ++ ++ if (first > count) ++ first = count; ++ second = count - first; ++ ++ memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); ++ if (second) ++ memcpy(&dmatx->buf[first], &xmit->buf[0], second); + } ++ ++ dmatx->len = count; ++ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, ++ DMA_TO_DEVICE); ++ if (dmatx->dma == DMA_MAPPING_ERROR) { ++ sup->dmatx.queued = false; ++ dev_dbg(sup->port.dev, "unable to map TX DMA\n"); ++ return -EBUSY; ++ } ++ ++ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ if (!desc) { ++ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); ++ sup->dmatx.queued = false; ++ /* ++ * If DMA cannot be used right now, we complete this ++ * transaction via IRQ and let the TTY layer retry. ++ */ ++ dev_dbg(sup->port.dev, "TX DMA busy\n"); ++ return -EBUSY; ++ } ++ ++ /* Some data to go along to the callback */ ++ desc->callback = lrw_uart_dma_tx_callback; ++ desc->callback_param = sup; ++ ++ /* All errors should happen at prepare time */ ++ dmaengine_submit(desc); ++ ++ /* Fire the DMA transaction */ ++ dma_dev->device_issue_pending(chan); ++ ++ sup->dmacr |= UARTFCCR_TXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ sup->dmatx.queued = true; ++ ++ /* ++ * Now we know that DMA will fire, so advance the ring buffer ++ * with the stuff we just dispatched. ++ */ ++ uart_xmit_advance(&sup->port, count); ++ ++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) ++ uart_write_wakeup(&sup->port); ++ ++ return 1; +} + -+static inline void dma_receive_chars(struct uart_pxa_port *up, int *status) ++/* ++ * We received a transmit interrupt without a pending X-char but with ++ * pending characters. ++ * Locking: called with port lock held and IRQs disabled. ++ * Returns: ++ * false if we want to use PIO to transmit ++ * true if we queued a DMA buffer ++ */ ++static bool lrw_uart_dma_tx_irq(struct lrw_uart_port *sup) +{ -+ struct tty_port *port = &up->port.state->port; -+ unsigned char ch; -+ int max_count = 256; -+ int count = 0; -+ unsigned char *tmp; -+ unsigned int flag = TTY_NORMAL; -+ struct uart_pxa_dma *pxa_dma = &up->uart_dma; -+ struct dma_tx_state dma_state; ++ if (!sup->using_tx_dma) ++ return false; + -+ if (!pxa_dma->dma_init) -+ return; ++ /* ++ * If we already have a TX buffer queued, but received a ++ * TX interrupt, it will be because we've just sent an X-char. ++ * Ensure the TX DMA is enabled and the TX IRQ is disabled. ++ */ ++ if (sup->dmatx.queued) { ++ sup->dmacr |= UARTFCCR_TXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ sup->im &= ~UARTIMSC_TXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ return true; ++ } + -+ dmaengine_pause(pxa_dma->rxdma_chan); -+ dmaengine_tx_status(pxa_dma->rxdma_chan, pxa_dma->rx_cookie, -+ &dma_state); -+ count = DMA_RX_BLOCK_SIZE - dma_state.residue; -+ tmp = pxa_dma->rxdma_addr; -+ if (up->port.sysrq) { -+ while (count > 0) { -+ if (!uart_handle_sysrq_char(&up->port, *tmp)) { -+ uart_insert_char(&up->port, *status, 0, *tmp, flag); -+ up->port.icount.rx++; -+ } -+ tmp++; -+ count--; -+ } -+ } else { -+ tty_insert_flip_string(port, tmp, count); -+ up->port.icount.rx += count; ++ /* ++ * We don't have a TX buffer queued, so try to queue one. ++ * If we successfully queued a buffer, mask the TX IRQ. ++ */ ++ if (lrw_uart_dma_tx_refill(sup) > 0) { ++ sup->im &= ~UARTIMSC_TXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ return true; + } ++ return false; ++} + -+ do { -+ ch = serial_in(up, UART_RX); -+ flag = TTY_NORMAL; -+ up->port.icount.rx++; ++/* ++ * Stop the DMA transmit (eg, due to received XOFF). ++ * Locking: called with port lock held and IRQs disabled. ++ */ ++static inline void lrw_uart_dma_tx_stop(struct lrw_uart_port *sup) ++{ ++ if (sup->dmatx.queued) { ++ sup->dmacr &= ~UARTFCCR_TXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ } ++} + -+ if (unlikely(*status & (UART_LSR_BI | UART_LSR_PE | -+ UART_LSR_FE | UART_LSR_OE))) { -+ if (*status & UART_LSR_BI) { -+ *status &= ~(UART_LSR_FE | UART_LSR_PE); -+ up->port.icount.brk++; -+ if (uart_handle_break(&up->port)) -+ goto ignore_char2; -+ } else if (*status & UART_LSR_PE) { -+ up->port.icount.parity++; -+ } else if (*status & UART_LSR_FE) { -+ up->port.icount.frame++; -+ } ++/* ++ * Try to start a DMA transmit, or in the case of an XON/OFF ++ * character queued for send, try to get that character out ASAP. ++ * Locking: called with port lock held and IRQs disabled. ++ * Returns: ++ * false if we want the TX IRQ to be enabled ++ * true if we have a buffer queued ++ */ ++static inline bool lrw_uart_dma_tx_start(struct lrw_uart_port *sup) ++{ ++ u16 dmacr; + -+ if (*status & UART_LSR_OE) -+ up->port.icount.overrun++; ++ if (!sup->using_tx_dma) ++ return false; + -+ *status &= up->port.read_status_mask; ++ if (!sup->port.x_char) { ++ /* no X-char, try to push chars out in DMA mode */ ++ bool ret = true; + -+#ifdef CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE -+ if (up->port.line == up->port.cons->index) { -+ *status |= up->lsr_break_flag; -+ up->lsr_break_flag = 0; ++ if (!sup->dmatx.queued) { ++ if (lrw_uart_dma_tx_refill(sup) > 0) { ++ sup->im &= ~UARTIMSC_TXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ } else { ++ ret = false; + } -+#endif -+ if (*status & UART_LSR_BI) -+ flag = TTY_BREAK; -+ else if (*status & UART_LSR_PE) -+ flag = TTY_PARITY; -+ else if (*status & UART_LSR_FE) -+ flag = TTY_FRAME; ++ } else if (!(sup->dmacr & UARTFCCR_TXDMAE)) { ++ sup->dmacr |= UARTFCCR_TXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); + } -+ if (!uart_handle_sysrq_char(&up->port, ch)) -+ uart_insert_char(&up->port, *status, UART_LSR_OE, -+ ch, flag); -+ignore_char2: -+ *status = serial_in(up, UART_LSR); -+ } while ((*status & UART_LSR_DR) && (max_count-- > 0)); -+ -+ tty_flip_buffer_push(port); -+ stop_dma(up, 1); -+ if (pxa_dma->rx_stop) -+ return; -+ pxa_uart_receive_dma_start(up); -+} ++ return ret; ++ } + -+static void serial_pxa_start_tx(struct uart_port *port) -+{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ /* ++ * We have an X-char to send. Disable DMA to prevent it loading ++ * the TX fifo, and then see if we can stuff it into the FIFO. ++ */ ++ dmacr = sup->dmacr; ++ sup->dmacr &= ~UARTFCCR_TXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); + -+ if (up->dma_enable) { -+ up->uart_dma.tx_stop = 0; -+ tasklet_schedule(&up->uart_dma.tklet); -+ } else { -+ if (!(up->ier & UART_IER_THRI)) { -+ up->ier |= UART_IER_THRI; -+ serial_out(up, UART_IER, up->ier); -+ } ++ if (lrw_uart_read(sup, REG_FR) & UARTFR_TXFF) { ++ /* ++ * No space in the FIFO, so enable the transmit interrupt ++ * so we know when there is space. Note that once we've ++ * loaded the character, we should just re-enable DMA. ++ */ ++ return false; + } ++ ++ lrw_uart_write(sup->port.x_char, sup, REG_DR); ++ sup->port.icount.tx++; ++ sup->port.x_char = 0; ++ ++ /* Success - restore the DMA state */ ++ sup->dmacr = dmacr; ++ lrw_uart_write(dmacr, sup, REG_FCCR); ++ ++ return true; +} + -+static inline void check_modem_status(struct uart_pxa_port *up) ++/* ++ * Flush the transmit buffer. ++ * Locking: called with port lock held and IRQs disabled. ++ */ ++static void lrw_uart_dma_flush_buffer(struct uart_port *port) ++__releases(&sup->port.lock) ++__acquires(&sup->port.lock) +{ -+ int status; -+ -+ status = serial_in(up, UART_MSR); ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ if ((status & UART_MSR_ANY_DELTA) == 0) ++ if (!sup->using_tx_dma) + return; + -+ spin_lock(&up->port.lock); -+ if (status & UART_MSR_TERI) -+ up->port.icount.rng++; -+ if (status & UART_MSR_DDSR) -+ up->port.icount.dsr++; -+ if (status & UART_MSR_DDCD) -+ uart_handle_dcd_change(&up->port, status & UART_MSR_DCD); -+ if (status & UART_MSR_DCTS) -+ uart_handle_cts_change(&up->port, status & UART_MSR_CTS); -+ spin_unlock(&up->port.lock); ++ dmaengine_terminate_async(sup->dmatx.chan); + -+ wake_up_interruptible(&up->port.state->port.delta_msr_wait); ++ if (sup->dmatx.queued) { ++ dma_unmap_single(sup->dmatx.chan->device->dev, sup->dmatx.dma, ++ sup->dmatx.len, DMA_TO_DEVICE); ++ sup->dmatx.queued = false; ++ sup->dmacr &= ~UARTFCCR_TXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ } +} + -+static int serial_pxa_is_open(struct uart_pxa_port *up); ++static void lrw_uart_dma_rx_callback(void *data); + -+static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id) ++static int lrw_uart_dma_rx_trigger_dma(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = dev_id; -+ unsigned int iir, lsr; ++ struct dma_chan *rxchan = sup->dmarx.chan; ++ struct lrw_uart_dmarx_data *dmarx = &sup->dmarx; ++ struct dma_async_tx_descriptor *desc; ++ struct lrw_uart_dmabuf *dbuf; + -+ iir = serial_in(up, UART_IIR); -+ if (iir & UART_IIR_NO_INT) -+ return IRQ_NONE; ++ if (!rxchan) ++ return -EIO; + -+ if (!serial_pxa_is_open(up)) -+ return IRQ_HANDLED; ++ /* Start the RX DMA job */ ++ dbuf = sup->dmarx.use_buf_b ? ++ &sup->dmarx.dbuf_b : &sup->dmarx.dbuf_a; ++ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len, ++ DMA_DEV_TO_MEM, ++ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); ++ /* ++ * If the DMA engine is busy and cannot prepare a ++ * channel, no big deal, the driver will fall back ++ * to interrupt mode as a result of this error code. ++ */ ++ if (!desc) { ++ sup->dmarx.running = false; ++ dmaengine_terminate_all(rxchan); ++ return -EBUSY; ++ } + -+#ifdef CONFIG_PM -+#if SUPPORT_POWER_QOS -+ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) -+ pm_runtime_get_sync(up->port.dev); -+#endif -+#endif ++ /* Some data to go along to the callback */ ++ desc->callback = lrw_uart_dma_rx_callback; ++ desc->callback_param = sup; ++ dmarx->cookie = dmaengine_submit(desc); ++ dma_async_issue_pending(rxchan); + -+ lsr = serial_in(up, UART_LSR); -+ if (up->dma_enable) { -+ if (lsr & UART_LSR_FIFOE) -+ dma_receive_chars(up, &lsr); -+ } else { -+ if (lsr & UART_LSR_DR) { -+ receive_chars(up, &lsr); -+ if (up->edge_wakeup_gpio >= 0) -+ pm_wakeup_event(up->port.dev, BLOCK_SUSPEND_TIMEOUT); -+ } ++ sup->dmacr |= UARTFCCR_RXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ sup->dmarx.running = true; + -+ check_modem_status(up); -+ if (lsr & UART_LSR_THRE) { -+ transmit_chars(up); -+ while (!serial_pxa_tx_empty((struct uart_port *)dev_id)) -+ ; -+ } -+ } ++ sup->im &= ~UARTIMSC_RXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); + -+ return IRQ_HANDLED; ++ return 0; +} + -+static unsigned int serial_pxa_tx_empty(struct uart_port *port) ++/* ++ * This is called when either the DMA job is complete, or ++ * the FIFO timeout interrupt occurred. This must be called ++ * with the port spinlock sup->port.lock held. ++ */ ++static void lrw_uart_dma_rx_chars(struct lrw_uart_port *sup, ++ u32 pending, bool use_buf_b, ++ bool readfifo) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned long flags; -+ unsigned int ret; ++ struct tty_port *port = &sup->port.state->port; ++ struct lrw_uart_dmabuf *dbuf = use_buf_b ? ++ &sup->dmarx.dbuf_b : &sup->dmarx.dbuf_a; ++ int dma_count = 0; ++ u32 fifotaken = 0; /* only used for vdbg() */ + -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (up->dma_enable) { -+ if (up->ier & UART_IER_DMAE) { -+ if (up->uart_dma.dma_status & TX_DMA_RUNNING) { -+ spin_unlock_irqrestore(&up->port.lock, flags); -+ return 0; -+ } -+ } ++ struct lrw_uart_dmarx_data *dmarx = &sup->dmarx; ++ int dmataken = 0; ++ ++ if (sup->dmarx.poll_rate) { ++ /* The data can be taken by polling */ ++ dmataken = dbuf->len - dmarx->last_residue; ++ /* Recalculate the pending size */ ++ if (pending >= dmataken) ++ pending -= dmataken; + } -+ ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0; -+ spin_unlock_irqrestore(&up->port.lock, flags); + -+ return ret; ++ /* Pick the remain data from the DMA */ ++ if (pending) { ++ /* ++ * First take all chars in the DMA pipe, then look in the FIFO. ++ * Note that tty_insert_flip_buf() tries to take as many chars ++ * as it can. ++ */ ++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending); ++ ++ sup->port.icount.rx += dma_count; ++ if (dma_count < pending) ++ dev_warn(sup->port.dev, ++ "couldn't insert all characters (TTY is full?)\n"); ++ } ++ ++ /* Reset the last_residue for Rx DMA poll */ ++ if (sup->dmarx.poll_rate) ++ dmarx->last_residue = dbuf->len; ++ ++ /* ++ * Only continue with trying to read the FIFO if all DMA chars have ++ * been taken first. ++ */ ++ if (dma_count == pending && readfifo) { ++ /* Clear any error flags */ ++ lrw_uart_write(UARTICR_OEIC | UARTICR_BEIC | UARTICR_PEIC | ++ UARTICR_FEIC, sup, REG_ICR); ++ ++ /* ++ * If we read all the DMA'd characters, and we had an ++ * incomplete buffer, that could be due to an rx error, or ++ * maybe we just timed out. Read any pending chars and check ++ * the error status. ++ * ++ * Error conditions will only occur in the FIFO, these will ++ * trigger an immediate interrupt and stop the DMA job, so we ++ * will always find the error in the FIFO, never in the DMA ++ * buffer. ++ */ ++ fifotaken = lrw_uart_fifo_to_tty(sup); ++ } ++ ++ dev_vdbg(sup->port.dev, ++ "Took %d chars from DMA buffer and %d chars from the FIFO\n", ++ dma_count, fifotaken); ++ tty_flip_buffer_push(port); +} + -+static unsigned int serial_pxa_get_mctrl(struct uart_port *port) ++static void lrw_uart_dma_rx_irq(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned char status; -+ unsigned int ret; ++ struct lrw_uart_dmarx_data *dmarx = &sup->dmarx; ++ struct dma_chan *rxchan = dmarx->chan; ++ struct lrw_uart_dmabuf *dbuf = dmarx->use_buf_b ? ++ &dmarx->dbuf_b : &dmarx->dbuf_a; ++ size_t pending; ++ struct dma_tx_state state; ++ enum dma_status dmastat; + -+ status = serial_in(up, UART_MSR); ++ /* ++ * Pause the transfer so we can trust the current counter, ++ * do this before we pause the LRW UART block, else we may ++ * overflow the FIFO. ++ */ ++ if (dmaengine_pause(rxchan)) ++ dev_err(sup->port.dev, "unable to pause DMA transfer\n"); ++ dmastat = rxchan->device->device_tx_status(rxchan, ++ dmarx->cookie, &state); ++ if (dmastat != DMA_PAUSED) ++ dev_err(sup->port.dev, "unable to pause DMA transfer\n"); + -+ ret = 0; -+ if (status & UART_MSR_DCD) -+ ret |= TIOCM_CAR; -+ if (status & UART_MSR_RI) -+ ret |= TIOCM_RNG; -+ if (status & UART_MSR_DSR) -+ ret |= TIOCM_DSR; -+ if (status & UART_MSR_CTS) -+ ret |= TIOCM_CTS; -+ return ret; ++ /* Disable RX DMA - incoming data will wait in the FIFO */ ++ sup->dmacr &= ~UARTFCCR_RXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ sup->dmarx.running = false; ++ ++ pending = dbuf->len - state.residue; ++ if (WARN_ON_ONCE(pending > LRW_UART_DMA_BUFFER_SIZE)) ++ pending = LRW_UART_DMA_BUFFER_SIZE; ++ ++ /* Then we terminate the transfer - we now know our residue */ ++ dmaengine_terminate_all(rxchan); ++ ++ /* ++ * This will take the chars we have so far and insert ++ * into the framework. ++ */ ++ lrw_uart_dma_rx_chars(sup, pending, dmarx->use_buf_b, true); ++ ++ /* Switch buffer & re-trigger DMA job */ ++ dmarx->use_buf_b = !dmarx->use_buf_b; ++ if (lrw_uart_dma_rx_trigger_dma(sup)) { ++ dev_dbg(sup->port.dev, ++ "could not retrigger RX DMA job fall back to interrupt mode\n"); ++ sup->im |= UARTIMSC_RXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ } +} + -+static void serial_pxa_set_mctrl(struct uart_port *port, unsigned int mctrl) ++static void lrw_uart_dma_rx_callback(void *data) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned char mcr = 0; -+ int hostwake = 0; ++ struct lrw_uart_port *sup = data; ++ struct lrw_uart_dmarx_data *dmarx = &sup->dmarx; ++ struct dma_chan *rxchan = dmarx->chan; ++ bool lastbuf = dmarx->use_buf_b; ++ struct lrw_uart_dmabuf *dbuf = dmarx->use_buf_b ? ++ &dmarx->dbuf_b : &dmarx->dbuf_a; ++ size_t pending; ++ struct dma_tx_state state; ++ int ret; ++ ++ /* ++ * This completion interrupt occurs typically when the ++ * RX buffer is totally stuffed but no timeout has yet ++ * occurred. When that happens, we just want the RX ++ * routine to flush out the secondary DMA buffer while ++ * we immediately trigger the next DMA job. ++ */ ++ uart_port_lock_irq(&sup->port); ++ /* ++ * Rx data can be taken by the UART interrupts during ++ * the DMA irq handler. So we check the residue here. ++ */ ++ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); ++ pending = dbuf->len - state.residue; ++ if (WARN_ON_ONCE(pending > LRW_UART_DMA_BUFFER_SIZE)) ++ pending = LRW_UART_DMA_BUFFER_SIZE; + -+ if (mctrl & TIOCM_RTS) -+ mcr |= UART_MCR_RTS; -+ if (mctrl & TIOCM_DTR) -+ mcr |= UART_MCR_DTR; -+ if (mctrl & TIOCM_OUT1) -+ mcr |= UART_MCR_OUT1; -+ if (mctrl & TIOCM_OUT2) -+ mcr |= UART_MCR_OUT2; -+ if (mctrl & TIOCM_LOOP) -+ mcr |= UART_MCR_LOOP; ++ /* Then we terminate the transfer - we now know our residue */ ++ dmaengine_terminate_all(rxchan); + -+ if (up->device_ctrl_rts) { -+ if ((hostwake || up->in_resume) && (mctrl & TIOCM_RTS)) -+ mcr &= ~UART_MCR_RTS; -+ } ++ sup->dmarx.running = false; ++ dmarx->use_buf_b = !lastbuf; ++ ret = lrw_uart_dma_rx_trigger_dma(sup); + -+ mcr |= up->mcr; ++ lrw_uart_dma_rx_chars(sup, pending, lastbuf, false); ++ uart_unlock_and_check_sysrq(&sup->port); ++ /* ++ * Do this check after we picked the DMA chars so we don't ++ * get some IRQ immediately from RX. ++ */ ++ if (ret) { ++ dev_dbg(sup->port.dev, ++ "could not retrigger RX DMA job fall back to interrupt mode\n"); ++ sup->im |= UARTIMSC_RXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ } ++} + -+ serial_out(up, UART_MCR, mcr); ++/* ++ * Stop accepting received characters, when we're shutting down or ++ * suspending this port. ++ * Locking: called with port lock held and IRQs disabled. ++ */ ++static inline void lrw_uart_dma_rx_stop(struct lrw_uart_port *sup) ++{ ++ if (!sup->using_rx_dma) ++ return; + -+#ifdef CONFIG_BT -+ if (up->port.line == BT_UART_PORT) -+ pr_debug("%s: rts: 0x%x\n", __func__, mcr & UART_MCR_RTS); -+#endif ++ /* FIXME. Just disable the DMA enable */ ++ sup->dmacr &= ~UARTFCCR_RXDMAE; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); +} + -+static void serial_pxa_break_ctl(struct uart_port *port, int break_state) ++/* ++ * Timer handler for Rx DMA polling. ++ * Every polling, It checks the residue in the dma buffer and transfer ++ * data to the tty. Also, last_residue is updated for the next polling. ++ */ ++static void lrw_uart_dma_rx_poll(struct timer_list *t) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ struct lrw_uart_port *sup = from_timer(sup, t, dmarx.timer); ++ struct tty_port *port = &sup->port.state->port; ++ struct lrw_uart_dmarx_data *dmarx = &sup->dmarx; ++ struct dma_chan *rxchan = sup->dmarx.chan; + unsigned long flags; ++ unsigned int dmataken = 0; ++ unsigned int size = 0; ++ struct lrw_uart_dmabuf *dbuf; ++ int dma_count; ++ struct dma_tx_state state; ++ ++ dbuf = dmarx->use_buf_b ? &sup->dmarx.dbuf_b : &sup->dmarx.dbuf_a; ++ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); ++ if (likely(state.residue < dmarx->last_residue)) { ++ dmataken = dbuf->len - dmarx->last_residue; ++ size = dmarx->last_residue - state.residue; ++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, ++ size); ++ if (dma_count == size) ++ dmarx->last_residue = state.residue; ++ dmarx->last_jiffies = jiffies; ++ } ++ tty_flip_buffer_push(port); + -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (break_state == -1) -+ up->lcr |= UART_LCR_SBC; -+ else -+ up->lcr &= ~UART_LCR_SBC; -+ serial_out(up, UART_LCR, up->lcr); -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ /* ++ * If no data is received in poll_timeout, the driver will fall back ++ * to interrupt mode. We will retrigger DMA at the first interrupt. ++ */ ++ if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) ++ > sup->dmarx.poll_timeout) { ++ uart_port_lock_irqsave(&sup->port, &flags); ++ lrw_uart_dma_rx_stop(sup); ++ sup->im |= UARTIMSC_RXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ uart_port_unlock_irqrestore(&sup->port, flags); ++ ++ sup->dmarx.running = false; ++ dmaengine_terminate_all(rxchan); ++ del_timer(&sup->dmarx.timer); ++ } else { ++ mod_timer(&sup->dmarx.timer, ++ jiffies + msecs_to_jiffies(sup->dmarx.poll_rate)); ++ } +} + -+static void pxa_uart_transmit_dma_start(struct uart_pxa_port *up, int count) ++static void lrw_uart_dma_startup(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_dma *pxa_dma = &up->uart_dma; -+ struct dma_slave_config slave_config; + int ret; + -+ if (!pxa_dma->txdma_chan) { -+ dev_err(up->port.dev, "tx dma channel is not initialized\n"); ++ if (!sup->dma_probed) ++ lrw_uart_dma_probe(sup); ++ ++ if (!sup->dmatx.chan) ++ return; ++ ++ sup->dmatx.buf = kmalloc(LRW_UART_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); ++ if (!sup->dmatx.buf) { ++ sup->port.fifosize = sup->fifosize; + return; + } + -+ slave_config.direction = DMA_MEM_TO_DEV; -+ slave_config.dst_addr = up->port.mapbase; -+ slave_config.dst_maxburst = DMA_BURST_SIZE; -+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ sup->dmatx.len = LRW_UART_DMA_BUFFER_SIZE; ++ ++ /* The DMA buffer is now the FIFO the TTY subsystem can use */ ++ sup->port.fifosize = LRW_UART_DMA_BUFFER_SIZE; ++ sup->using_tx_dma = true; ++ ++ if (!sup->dmarx.chan) ++ goto skip_rx; + -+ ret = dmaengine_slave_config(pxa_dma->txdma_chan, &slave_config); ++ /* Allocate and map DMA RX buffers */ ++ ret = lrw_uart_dmabuf_init(sup->dmarx.chan, &sup->dmarx.dbuf_a, ++ DMA_FROM_DEVICE); + if (ret) { -+ dev_err(up->port.dev, -+ "%s: dmaengine slave config err.\n", __func__); -+ return; ++ dev_err(sup->port.dev, "failed to init DMA %s: %d\n", ++ "RX buffer A", ret); ++ goto skip_rx; + } + -+ pxa_dma->tx_size = count; -+ pxa_dma->tx_desc = -+ dmaengine_prep_slave_single(pxa_dma->txdma_chan, pxa_dma->txdma_addr_phys, -+ count, DMA_MEM_TO_DEV, 0); -+ if (!pxa_dma->tx_desc) { -+ dev_err(up->port.dev, -+ "%s: Unable to get desc for Tx\n", __func__); -+ return; ++ ret = lrw_uart_dmabuf_init(sup->dmarx.chan, &sup->dmarx.dbuf_b, ++ DMA_FROM_DEVICE); ++ if (ret) { ++ dev_err(sup->port.dev, "failed to init DMA %s: %d\n", ++ "RX buffer B", ret); ++ lrw_uart_dmabuf_free(sup->dmarx.chan, &sup->dmarx.dbuf_a, ++ DMA_FROM_DEVICE); ++ goto skip_rx; + } -+ pxa_dma->tx_desc->callback = pxa_uart_transmit_dma_cb; -+ pxa_dma->tx_desc->callback_param = up; + -+ pxa_dma->tx_cookie = dmaengine_submit(pxa_dma->tx_desc); -+#ifdef CONFIG_PM -+#if SUPPORT_POWER_QOS -+ pm_runtime_get_sync(up->port.dev); -+#endif -+#endif ++ sup->using_rx_dma = true; + -+ dma_async_issue_pending(pxa_dma->txdma_chan); ++skip_rx: ++ /* Turn on DMA error (RX/TX will be enabled on demand) */ ++ sup->dmacr |= UARTFCCR_DMAONERR; ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ ++ if (sup->using_rx_dma) { ++ if (lrw_uart_dma_rx_trigger_dma(sup)) ++ dev_dbg(sup->port.dev, ++ "could not trigger initial RX DMA job, fall back to interrupt mode\n"); ++ if (sup->dmarx.poll_rate) { ++ timer_setup(&sup->dmarx.timer, lrw_uart_dma_rx_poll, 0); ++ mod_timer(&sup->dmarx.timer, ++ jiffies + msecs_to_jiffies(sup->dmarx.poll_rate)); ++ sup->dmarx.last_residue = LRW_UART_DMA_BUFFER_SIZE; ++ sup->dmarx.last_jiffies = jiffies; ++ } ++ } +} + -+static void pxa_uart_receive_dma_start(struct uart_pxa_port *up) ++static void lrw_uart_dma_shutdown(struct lrw_uart_port *sup) +{ -+ unsigned long flags; -+ struct uart_pxa_dma *uart_dma = &up->uart_dma; -+ struct dma_slave_config slave_config; -+ int ret; -+ -+ if (!uart_dma->rxdma_chan) { -+ dev_err(up->port.dev, "rx dma channel is not initialized\n"); ++ if (!(sup->using_tx_dma || sup->using_rx_dma)) + return; -+ } + -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (uart_dma->dma_status & RX_DMA_RUNNING) { -+ spin_unlock_irqrestore(&up->port.lock, flags); -+ return; -+ } -+ uart_dma->dma_status |= RX_DMA_RUNNING; -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ /* Disable RX and TX DMA */ ++ while (lrw_uart_read(sup, REG_FR) & sup->vendor->fr_busy) ++ cpu_relax(); + -+ slave_config.direction = DMA_DEV_TO_MEM; -+ slave_config.src_addr = up->port.mapbase; -+ slave_config.src_maxburst = DMA_BURST_SIZE; -+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; ++ uart_port_lock_irq(&sup->port); ++ sup->dmacr &= ~(UARTFCCR_DMAONERR | UARTFCCR_RXDMAE | UARTFCCR_TXDMAE); ++ lrw_uart_write(sup->dmacr, sup, REG_FCCR); ++ uart_port_unlock_irq(&sup->port); + -+ ret = dmaengine_slave_config(uart_dma->rxdma_chan, &slave_config); -+ if (ret) { -+ dev_err(up->port.dev, "%s: dmaengine slave config err.\n", __func__); -+ return; ++ if (sup->using_tx_dma) { ++ /* In theory, this should already be done by lrw_uart_dma_flush_buffer */ ++ dmaengine_terminate_all(sup->dmatx.chan); ++ if (sup->dmatx.queued) { ++ dma_unmap_single(sup->dmatx.chan->device->dev, ++ sup->dmatx.dma, sup->dmatx.len, ++ DMA_TO_DEVICE); ++ sup->dmatx.queued = false; ++ } ++ ++ kfree(sup->dmatx.buf); ++ sup->using_tx_dma = false; + } + -+ uart_dma->rx_desc = -+ dmaengine_prep_slave_single(uart_dma->rxdma_chan, -+ uart_dma->rxdma_addr_phys, DMA_RX_BLOCK_SIZE, -+ DMA_DEV_TO_MEM, 0); -+ if (!uart_dma->rx_desc) { -+ dev_err(up->port.dev, "%s: Unable to get desc for Rx\n", __func__); -+ return; ++ if (sup->using_rx_dma) { ++ dmaengine_terminate_all(sup->dmarx.chan); ++ /* Clean up the RX DMA */ ++ lrw_uart_dmabuf_free(sup->dmarx.chan, &sup->dmarx.dbuf_a, DMA_FROM_DEVICE); ++ lrw_uart_dmabuf_free(sup->dmarx.chan, &sup->dmarx.dbuf_b, DMA_FROM_DEVICE); ++ if (sup->dmarx.poll_rate) ++ del_timer_sync(&sup->dmarx.timer); ++ sup->using_rx_dma = false; + } -+ uart_dma->rx_desc->callback = pxa_uart_receive_dma_cb; -+ uart_dma->rx_desc->callback_param = up; ++} + -+ uart_dma->rx_cookie = dmaengine_submit(uart_dma->rx_desc); -+ dma_async_issue_pending(uart_dma->rxdma_chan); ++static inline bool lrw_uart_dma_rx_available(struct lrw_uart_port *sup) ++{ ++ return sup->using_rx_dma; +} + -+static void pxa_uart_receive_dma_cb(void *data) ++static inline bool lrw_uart_dma_rx_running(struct lrw_uart_port *sup) +{ -+ unsigned long flags; -+ struct uart_pxa_port *up = (struct uart_pxa_port *)data; -+ struct uart_pxa_dma *pxa_dma = &up->uart_dma; -+ struct tty_port *port = &up->port.state->port; -+ unsigned int count; -+ unsigned char *tmp = pxa_dma->rxdma_addr; -+ struct dma_tx_state dma_state; -+#if (DMA_BUF_POLLING_SWITCH == 1) -+ unsigned int buf_used, trail_cnt = 0; -+ unsigned char *trail_addr, *last_burst_addr; -+ u8 mark_1bytes = 0xff; -+ int timeout = 5000, cycle = 2; /* us */ -+ int times_1 = 0, times_2 = 0, duration_time_us; -+#endif ++ return sup->using_rx_dma && sup->dmarx.running; ++} + -+#ifdef CONFIG_PM -+#if SUPPORT_POWER_QOS -+ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) -+ pm_runtime_get_sync(up->port.dev); -+#endif -+#endif ++#else ++/* Blank functions if the DMA engine is not available */ ++static inline void lrw_uart_dma_remove(struct lrw_uart_port *sup) ++{ ++} + -+ dmaengine_tx_status(pxa_dma->rxdma_chan, pxa_dma->rx_cookie, &dma_state); -+ count = DMA_RX_BLOCK_SIZE - dma_state.residue; ++static inline void lrw_uart_dma_startup(struct lrw_uart_port *sup) ++{ ++} + -+#if (DMA_BUF_POLLING_SWITCH == 1) -+ buf_used = count; -+ if (count > 0 && count < DMA_FIFO_THRESHOLD) { -+ trail_cnt = count; -+ trail_addr = tmp; -+ times_1 = timeout / cycle; -+ times_2 = timeout / cycle; ++static inline void lrw_uart_dma_shutdown(struct lrw_uart_port *sup) ++{ ++} + -+ while ((*trail_addr == mark_1bytes) && (times_1-- >= 0)) -+ udelay(cycle); ++static inline bool lrw_uart_dma_tx_irq(struct lrw_uart_port *sup) ++{ ++ return false; ++} + -+ if (trail_cnt > 1) { -+ trail_addr = trail_addr + trail_cnt - 1; -+ while ((*trail_addr == mark_1bytes) && (times_2-- >= 0)) -+ udelay(cycle); -+ } ++static inline void lrw_uart_dma_tx_stop(struct lrw_uart_port *sup) ++{ ++} + -+ if (times_1 <= 0 || times_2 <= 0) -+ pxa_dma->dma_poll_timeout++; -+ } ++static inline bool lrw_uart_dma_tx_start(struct lrw_uart_port *sup) ++{ ++ return false; ++} + -+ if (count >= DMA_FIFO_THRESHOLD && count < DMA_RX_BLOCK_SIZE) { -+ trail_cnt = (count % DMA_BURST_SIZE) + (DMA_FIFO_THRESHOLD - DMA_BURST_SIZE); -+ trail_addr = tmp + count - trail_cnt; ++static inline void lrw_uart_dma_rx_irq(struct lrw_uart_port *sup) ++{ ++} + -+ #if (DMA_BURST_SIZE == DMA_FIFO_THRESHOLD) -+ if (trail_cnt == 0) { -+ trail_addr = tmp + count - DMA_BURST_SIZE; -+ trail_cnt = DMA_BURST_SIZE; -+ } -+ #endif ++static inline void lrw_uart_dma_rx_stop(struct lrw_uart_port *sup) ++{ ++} + -+ times_1 = timeout / cycle; -+ times_2 = timeout / cycle; ++static inline int lrw_uart_dma_rx_trigger_dma(struct lrw_uart_port *sup) ++{ ++ return -EIO; ++} + -+ while ((*trail_addr == mark_1bytes) && (times_1-- >= 0)) -+ udelay(cycle); ++static inline bool lrw_uart_dma_rx_available(struct lrw_uart_port *sup) ++{ ++ return false; ++} + -+ if (trail_cnt > 1) { -+ trail_addr = trail_addr + trail_cnt - 1; -+ while ((*trail_addr == mark_1bytes) && (times_2-- >= 0)) -+ udelay(cycle); ++static inline bool lrw_uart_dma_rx_running(struct lrw_uart_port *sup) ++{ ++ return false; ++} ++ ++#define lrw_uart_dma_flush_buffer NULL ++#endif ++ ++static void lrw_uart_rs485_tx_stop(struct lrw_uart_port *sup) ++{ ++ /* ++ * To be on the safe side only time out after twice as many iterations ++ * as fifo size. ++ */ ++ const int MAX_TX_DRAIN_ITERS = sup->port.fifosize * 2; ++ struct uart_port *port = &sup->port; ++ int i = 0; ++ u32 mcr, mcfg; ++ ++ /* Wait until hardware tx queue is empty */ ++ while (!lrw_uart_tx_empty(port)) { ++ if (i > MAX_TX_DRAIN_ITERS) { ++ dev_warn(port->dev, ++ "timeout while draining hardware tx queue\n"); ++ break; + } + -+ if (times_1 <= 0 || times_2 <= 0) -+ pxa_dma->dma_poll_timeout++; ++ udelay(sup->rs485_tx_drain_interval); ++ i++; + } + -+ if (count == DMA_RX_BLOCK_SIZE) { -+ last_burst_addr = tmp + DMA_RX_BLOCK_SIZE - DMA_BURST_SIZE; -+ trail_cnt = DMA_BURST_SIZE; -+ times_1 = timeout / cycle; -+ times_2 = timeout / cycle; ++ if (port->rs485.delay_rts_after_send) ++ mdelay(port->rs485.delay_rts_after_send); + -+ while ((*last_burst_addr == mark_1bytes) && (times_1-- >= 0)) -+ udelay(cycle); ++ mcr = lrw_uart_read(sup, REG_MCR); + -+ if (trail_cnt > 1) { -+ last_burst_addr = tmp + DMA_RX_BLOCK_SIZE - 1; -+ while ((*last_burst_addr == mark_1bytes) && (times_2-- >= 0)) -+ udelay(cycle); -+ } ++ if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ++ mcr &= ~UARTMCR_RTS; ++ else ++ mcr |= UARTMCR_RTS; + -+ if (times_1 <= 0 || times_2 <= 0) -+ pxa_dma->dma_poll_timeout++; -+ } -+#endif /* #if (DMA_BUF_POLLING_SWITCH == 1) */ ++ lrw_uart_write(mcr, sup, REG_MCR); + -+ if (up->port.sysrq) { -+ while (count > 0) { -+ if (!uart_handle_sysrq_char(&up->port, *tmp)) { -+ tty_insert_flip_char(port, *tmp, TTY_NORMAL); -+ up->port.icount.rx++; -+ } -+ tmp++; -+ count--; -+ } -+ } else { -+ tty_insert_flip_string(port, tmp, count); -+ up->port.icount.rx += count; -+ } -+ tty_flip_buffer_push(port); ++ /* Disable the transmitter and reenable the transceiver */ ++ mcfg = lrw_uart_read(sup, REG_MCFG); ++ mcfg &= ~UARTMCFG_TXE; ++ mcfg |= UARTMCFG_RXE; ++ lrw_uart_write(mcfg, sup, REG_MCFG); + -+ spin_lock_irqsave(&up->port.lock, flags); -+ pxa_dma->dma_status &= ~RX_DMA_RUNNING; -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ sup->rs485_tx_started = false; ++} + -+#if (DMA_BUF_POLLING_SWITCH == 1) -+ if (buf_used > 0) { -+ tmp = pxa_dma->rxdma_addr; -+ memset(tmp, mark_1bytes, buf_used); -+ } ++static void lrw_uart_stop_tx(struct uart_port *port) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ if (times_1 > 0) { -+ duration_time_us = (timeout / cycle - times_1) * cycle; -+ if (pxa_dma->dma_poll_max_time < duration_time_us) -+ pxa_dma->dma_poll_max_time = duration_time_us; -+ } -+ if (times_2 > 0) { -+ duration_time_us = (timeout / cycle - times_2) * cycle; -+ if (pxa_dma->dma_poll_max_time < duration_time_us) -+ pxa_dma->dma_poll_max_time = duration_time_us; -+ } -+ if (times_1 > 0 && times_2 > 0) { -+ duration_time_us = (2 * timeout / cycle - times_1 - times_2) * cycle; -+ if (pxa_dma->dma_poll_max_time < duration_time_us) -+ pxa_dma->dma_poll_max_time = duration_time_us; -+ } -+#endif /* #if (DMA_BUF_POLLING_SWITCH == 1) */ ++ sup->im &= ~UARTIMSC_TXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ lrw_uart_dma_tx_stop(sup); + -+ if (pxa_dma->rx_stop || !serial_pxa_is_open(up)) -+ return; -+ pxa_uart_receive_dma_start(up); -+ if (up->edge_wakeup_gpio >= 0) -+ pm_wakeup_event(up->port.dev, BLOCK_SUSPEND_TIMEOUT); ++ if ((port->rs485.flags & SER_RS485_ENABLED) && sup->rs485_tx_started) ++ lrw_uart_rs485_tx_stop(sup); +} + -+static void pxa_uart_transmit_dma_cb(void *data) ++static bool lrw_uart_tx_chars(struct lrw_uart_port *sup, bool from_irq); ++ ++/* Start TX with programmed I/O only (no DMA) */ ++static void lrw_uart_start_tx_pio(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)data; -+ struct uart_pxa_dma *pxa_dma = &up->uart_dma; -+ struct circ_buf *xmit = &up->port.state->xmit; ++ if (lrw_uart_tx_chars(sup, false)) { ++ sup->im |= UARTIMSC_TXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ } ++} + -+ if (up->from_resume) -+ up->from_resume = false; ++static void lrw_uart_rs485_tx_start(struct lrw_uart_port *sup) ++{ ++ struct uart_port *port = &sup->port; ++ u32 mcr, mcfg; + -+ if (dma_async_is_tx_complete(pxa_dma->txdma_chan, pxa_dma->tx_cookie, -+ NULL, NULL) == DMA_COMPLETE) -+ schedule_work(&up->uart_tx_lpm_work); ++ /* Enable transmitter */ ++ mcfg = lrw_uart_read(sup, REG_MCFG); ++ mcfg |= UARTMCFG_TXE; + -+ spin_lock_irqsave(&up->port.lock, up->flags); -+ pxa_dma->dma_status &= ~TX_DMA_RUNNING; -+ spin_unlock_irqrestore(&up->port.lock, up->flags); ++ /* Disable receiver if half-duplex */ ++ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) ++ mcfg &= ~UARTMCFG_RXE; + -+ if (pxa_dma->tx_stop || !serial_pxa_is_open(up)) -+ return; ++ lrw_uart_write(mcfg, sup, REG_MCFG); + -+ if (up->port.x_char) { -+ serial_out(up, UART_TX, up->port.x_char); -+ up->port.icount.tx++; -+ up->port.x_char = 0; -+ } ++ mcr = lrw_uart_read(sup, REG_MCR); ++ if (port->rs485.flags & SER_RS485_RTS_ON_SEND) ++ mcr &= ~UARTMCR_RTS; ++ else ++ mcr |= UARTMCR_RTS; + -+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) -+ uart_write_wakeup(&up->port); ++ lrw_uart_write(mcr, sup, REG_MCR); + -+ if (!uart_circ_empty(xmit)) -+ tasklet_schedule(&pxa_dma->tklet); ++ if (port->rs485.delay_rts_before_send) ++ mdelay(port->rs485.delay_rts_before_send); ++ ++ sup->rs485_tx_started = true; +} + -+static void pxa_uart_dma_init(struct uart_pxa_port *up) ++static void lrw_uart_start_tx(struct uart_port *port) +{ -+ struct uart_pxa_dma *pxa_dma = &up->uart_dma; -+ dma_cap_mask_t mask; ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ dma_cap_zero(mask); -+ dma_cap_set(DMA_SLAVE, mask); ++ if ((sup->port.rs485.flags & SER_RS485_ENABLED) && ++ !sup->rs485_tx_started) ++ lrw_uart_rs485_tx_start(sup); + -+ if (!pxa_dma->rxdma_chan) { -+ pxa_dma->rxdma_chan = dma_request_slave_channel(up->port.dev, "rx"); -+ if (IS_ERR_OR_NULL(pxa_dma->rxdma_chan)) { -+ dev_WARN_ONCE(up->port.dev, 1, "failed to request rx dma channel\n"); -+ goto out; -+ } -+ } ++ if (!lrw_uart_dma_tx_start(sup)) ++ lrw_uart_start_tx_pio(sup); ++} + -+ if (!pxa_dma->txdma_chan) { -+ pxa_dma->txdma_chan = dma_request_slave_channel(up->port.dev, "tx"); -+ if (IS_ERR_OR_NULL(pxa_dma->txdma_chan)) { -+ dev_WARN_ONCE(up->port.dev, 1, "failed to request tx dma channel\n"); -+ goto err_txdma; -+ } -+ } ++static void lrw_uart_stop_rx(struct uart_port *port) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ if (!pxa_dma->txdma_addr) { -+ pxa_dma->txdma_addr = -+ dma_direct_alloc(up->port.dev, DMA_BLOCK, -+ &pxa_dma->txdma_addr_phys, GFP_KERNEL, -+ DMA_ATTR_FORCE_CONTIGUOUS); -+ if (!pxa_dma->txdma_addr) { -+ dev_WARN_ONCE(up->port.dev, 1, "failed to allocate tx dma memory\n"); -+ goto txdma_err_alloc; -+ } -+ } ++ sup->im &= ~(UARTIMSC_RXIM | UARTIMSC_RTIM | UARTIMSC_FEIM | ++ UARTIMSC_PEIM | UARTIMSC_BEIM | UARTIMSC_OEIM); ++ lrw_uart_write(sup->im, sup, REG_IMSC); + -+ if (!pxa_dma->rxdma_addr) { -+ pxa_dma->rxdma_addr = -+ dma_direct_alloc(up->port.dev, DMA_RX_BLOCK_SIZE, -+ &pxa_dma->rxdma_addr_phys, GFP_KERNEL, -+ DMA_ATTR_FORCE_CONTIGUOUS); -+ if (!pxa_dma->rxdma_addr) { -+ dev_WARN_ONCE(up->port.dev, 1, "failed to allocate rx dma memory\n"); -+ goto rxdma_err_alloc; -+ } -+ } ++ lrw_uart_dma_rx_stop(sup); ++} + -+ pxa_dma->dma_status = 0; -+ pxa_dma->dma_init = true; -+ return; ++static void lrw_uart_throttle_rx(struct uart_port *port) ++{ ++ unsigned long flags; + -+rxdma_err_alloc: -+ dma_direct_free(up->port.dev, DMA_BLOCK, pxa_dma->txdma_addr, -+ pxa_dma->txdma_addr_phys, DMA_ATTR_FORCE_CONTIGUOUS); -+ pxa_dma->txdma_addr = NULL; -+txdma_err_alloc: -+ dma_release_channel(pxa_dma->txdma_chan); -+ pxa_dma->txdma_chan = NULL; -+err_txdma: -+ dma_release_channel(pxa_dma->rxdma_chan); -+ pxa_dma->rxdma_chan = NULL; -+out: -+ pxa_dma->dma_init = false; ++ uart_port_lock_irqsave(port, &flags); ++ lrw_uart_stop_rx(port); ++ uart_port_unlock_irqrestore(port, flags); +} + -+static void pxa_uart_dma_uninit(struct uart_pxa_port *up) ++static void lrw_uart_enable_ms(struct uart_port *port) +{ -+ struct uart_pxa_dma *pxa_dma; ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ pxa_dma = &up->uart_dma; -+ -+ stop_dma(up, PXA_UART_TX); -+ stop_dma(up, PXA_UART_RX); ++ sup->im |= UARTIMSC_RIMIM | UARTIMSC_CTSMIM | UARTIMSC_DCDMIM | UARTIMSC_DSRMIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++} + -+ pxa_dma->dma_init = false; ++static void lrw_uart_rx_chars(struct lrw_uart_port *sup) ++__releases(&sup->port.lock) ++__acquires(&sup->port.lock) ++{ ++ lrw_uart_fifo_to_tty(sup); + -+ if (pxa_dma->txdma_chan) { -+ dma_release_channel(pxa_dma->txdma_chan); -+ pxa_dma->txdma_chan = NULL; ++ uart_port_unlock(&sup->port); ++ tty_flip_buffer_push(&sup->port.state->port); ++ /* ++ * If we were temporarily out of DMA mode for a while, ++ * attempt to switch back to DMA mode again. ++ */ ++ if (lrw_uart_dma_rx_available(sup)) { ++ if (lrw_uart_dma_rx_trigger_dma(sup)) { ++ dev_dbg(sup->port.dev, ++ "could not trigger RX DMA job fall back to interrupt mode again\n"); ++ sup->im |= UARTIMSC_RXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ } else { ++#ifdef CONFIG_DMA_ENGINE ++ /* Start Rx DMA poll */ ++ if (sup->dmarx.poll_rate) { ++ sup->dmarx.last_jiffies = jiffies; ++ sup->dmarx.last_residue = LRW_UART_DMA_BUFFER_SIZE; ++ mod_timer(&sup->dmarx.timer, ++ jiffies + msecs_to_jiffies(sup->dmarx.poll_rate)); ++ } ++#endif ++ } + } ++ uart_port_lock(&sup->port); ++} ++ ++static bool lrw_uart_tx_char(struct lrw_uart_port *sup, unsigned char c, ++ bool from_irq) ++{ ++ if (unlikely(!from_irq) && ++ lrw_uart_read(sup, REG_FR) & UARTFR_TXFF) ++ return false; /* unable to transmit character */ ++ ++ lrw_uart_write(c, sup, REG_DR); ++ sup->port.icount.tx++; ++ ++ return true; ++} + -+ if (pxa_dma->txdma_addr) { -+ dma_direct_free(up->port.dev, DMA_BLOCK, pxa_dma->txdma_addr, -+ pxa_dma->txdma_addr_phys, -+ DMA_ATTR_FORCE_CONTIGUOUS); -+ pxa_dma->txdma_addr = NULL; ++/* Returns true if tx interrupts have to be (kept) enabled */ ++static bool lrw_uart_tx_chars(struct lrw_uart_port *sup, bool from_irq) ++{ ++ struct circ_buf *xmit = &sup->port.state->xmit; ++ int count = sup->fifosize >> 1; ++ ++ if (sup->port.x_char) { ++ if (!lrw_uart_tx_char(sup, sup->port.x_char, from_irq)) ++ return true; ++ sup->port.x_char = 0; ++ --count; + } + -+ if (pxa_dma->rxdma_chan) { -+ dma_release_channel(pxa_dma->rxdma_chan); -+ pxa_dma->rxdma_chan = NULL; ++ if (uart_circ_empty(xmit) || uart_tx_stopped(&sup->port)) { ++ lrw_uart_stop_tx(&sup->port); ++ return false; + } + -+ if (pxa_dma->rxdma_addr) { -+ dma_direct_free(up->port.dev, DMA_RX_BLOCK_SIZE, pxa_dma->rxdma_addr, -+ pxa_dma->rxdma_addr_phys, -+ DMA_ATTR_FORCE_CONTIGUOUS); -+ pxa_dma->rxdma_addr = NULL; ++ /* If we are using DMA mode, try to send some characters. */ ++ if (lrw_uart_dma_tx_irq(sup)) ++ return true; ++ ++ do { ++ if (likely(from_irq) && count-- == 0) ++ break; ++ ++ if (!lrw_uart_tx_char(sup, xmit->buf[xmit->tail], from_irq)) ++ break; ++ ++ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); ++ } while (!uart_circ_empty(xmit)); ++ ++ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) ++ uart_write_wakeup(&sup->port); ++ ++ if (uart_circ_empty(xmit)) { ++ lrw_uart_stop_tx(&sup->port); ++ return false; + } ++ return true; +} + -+static void uart_task_action(unsigned long data) ++static void lrw_uart_modem_status(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)data; -+ struct circ_buf *xmit = &up->port.state->xmit; -+ unsigned char *tmp = up->uart_dma.txdma_addr; -+ unsigned long flags; -+ int count = 0, c; ++ unsigned int status, delta; + -+ if (up->uart_dma.tx_stop || up->port.suspended || -+ !serial_pxa_is_open(up) || up->from_resume) -+ return; ++ status = lrw_uart_read(sup, REG_FR) & UARTFR_MODEM_ANY; ++ ++ delta = status ^ sup->old_status; ++ sup->old_status = status; + -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (up->uart_dma.dma_status & TX_DMA_RUNNING) { -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ if (!delta) + return; -+ } + -+ up->uart_dma.dma_status |= TX_DMA_RUNNING; -+ while (1) { -+ c = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); -+ if (c <= 0) -+ break; ++ if (delta & UARTFR_DCD) ++ uart_handle_dcd_change(&sup->port, status & UARTFR_DCD); + -+ memcpy(tmp, xmit->buf + xmit->tail, c); -+ xmit->tail = (xmit->tail + c) & (UART_XMIT_SIZE - 1); -+ tmp += c; -+ count += c; -+ up->port.icount.tx += c; -+ } -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ if (delta & sup->vendor->fr_dsr) ++ sup->port.icount.dsr++; ++ ++ if (delta & sup->vendor->fr_cts) ++ uart_handle_cts_change(&sup->port, ++ status & sup->vendor->fr_cts); + -+ pr_debug("count =%d", count); -+ pxa_uart_transmit_dma_start(up, count); ++ wake_up_interruptible(&sup->port.state->port.delta_msr_wait); +} + -+static int serial_pxa_startup(struct uart_port *port) ++static void check_apply_cts_event_workaround(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned long flags; -+ int tmp = 0; ++ if (!sup->vendor->cts_event_workaround) ++ return; + -+ if (port->line == 3) -+ up->mcr |= UART_MCR_AFE; -+ else -+ up->mcr = 0; ++ /* workaround to make sure that all bits are unlocked.. */ ++ lrw_uart_write(0x00, sup, REG_ICR); + -+ up->port.uartclk = clk_get_rate(up->fclk); ++ /* ++ * WA: introduce 26ns(1 uart clk) delay before W1C; ++ * single apb access will incur 2 pclk(133.12Mhz) delay, ++ * so add 2 dummy reads ++ */ ++ lrw_uart_read(sup, REG_ICR); ++ lrw_uart_read(sup, REG_ICR); ++} + -+ enable_irq(up->port.irq); ++static irqreturn_t lrw_uart_int(int irq, void *dev_id) ++{ ++ struct lrw_uart_port *sup = dev_id; ++ unsigned long flags; ++ unsigned int status, pass_counter = ISR_PASS_LIMIT; ++ int handled = 0; + -+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO); -+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | -+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); -+ serial_out(up, UART_FCR, 0); ++ uart_port_lock_irqsave(&sup->port, &flags); ++ status = lrw_uart_read(sup, REG_RIS) & sup->im; ++ if (status) { ++ do { ++ check_apply_cts_event_workaround(sup); + -+ (void)serial_in(up, UART_LSR); -+ (void)serial_in(up, UART_RX); -+ (void)serial_in(up, UART_IIR); -+ (void)serial_in(up, UART_MSR); ++ lrw_uart_write(status & ~(UARTICR_TXIC | UARTICR_RTIC | UARTICR_RXIC), ++ sup, REG_ICR); + -+ serial_out(up, UART_LCR, UART_LCR_WLEN8); ++ if (status & (UARTICR_RTIC | UARTICR_RXIC)) { ++ if (lrw_uart_dma_rx_running(sup)) ++ lrw_uart_dma_rx_irq(sup); ++ else ++ lrw_uart_rx_chars(sup); ++ } ++ if (status & (UARTICR_DSRMIC | UARTICR_DCDMIC | ++ UARTICR_CTSMIC | UARTICR_RIMIC)) ++ lrw_uart_modem_status(sup); ++ if (status & UARTICR_TXIC) ++ lrw_uart_tx_chars(sup, true); + -+ spin_lock_irqsave(&up->port.lock, flags); -+ up->port.mctrl |= TIOCM_OUT2; -+ tmp = serial_in(up, UART_MCR); -+ tmp |= TIOCM_OUT2; -+ serial_out(up, UART_MCR, tmp); -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ if (pass_counter-- == 0) ++ break; + -+ if (up->dma_enable) { -+ pxa_uart_dma_init(up); -+ up->uart_dma.rx_stop = 0; -+ pxa_uart_receive_dma_start(up); -+ tasklet_init(&up->uart_dma.tklet, uart_task_action, (unsigned long)up); ++ status = lrw_uart_read(sup, REG_RIS) & sup->im; ++ } while (status != 0); ++ handled = 1; + } + -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (up->dma_enable) -+ up->ier = UART_IER_DMAE | UART_IER_UUE; -+ else -+ up->ier = UART_IER_RLSI | UART_IER_RDI | UART_IER_RTOIE | UART_IER_UUE; -+ serial_out(up, UART_IER, up->ier); -+ spin_unlock_irqrestore(&up->port.lock, flags); -+ -+ (void)serial_in(up, UART_LSR); -+ (void)serial_in(up, UART_RX); -+ (void)serial_in(up, UART_IIR); -+ (void)serial_in(up, UART_MSR); ++ uart_port_unlock_irqrestore(&sup->port, flags); + -+ return 0; ++ return IRQ_RETVAL(handled); +} + -+static void serial_pxa_shutdown(struct uart_port *port) ++static unsigned int lrw_uart_tx_empty(struct uart_port *port) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned long flags; -+ unsigned int tmp = 0; ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ disable_irq(up->port.irq); -+ if (up->dma_enable) { -+ tasklet_kill(&up->uart_dma.tklet); -+ up->uart_dma.tx_stop = 1; -+ up->uart_dma.rx_stop = 1; -+ pxa_uart_dma_uninit(up); -+ } ++ /* Allow feature register bits to be inverted to work around errata */ ++ unsigned int status = lrw_uart_read(sup, REG_FR) ^ sup->vendor->inv_fr; + -+ flush_work(&up->uart_tx_lpm_work); ++ return status & (sup->vendor->fr_busy | UARTFR_TXFF) ? ++ 0 : TIOCSER_TEMT; ++} + -+ spin_lock_irqsave(&up->port.lock, flags); -+ up->ier = 0; -+ serial_out(up, UART_IER, 0); ++static void lrw_uart_maybe_set_bit(bool cond, unsigned int *ptr, unsigned int mask) ++{ ++ if (cond) ++ *ptr |= mask; ++} ++ ++static unsigned int lrw_uart_get_mctrl(struct uart_port *port) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ unsigned int result = 0; ++ unsigned int status = lrw_uart_read(sup, REG_FR); + -+ up->port.mctrl &= ~TIOCM_OUT2; -+ tmp = serial_in(up, UART_MCR); -+ tmp &= ~TIOCM_OUT2; -+ serial_out(up, UART_MCR, tmp); -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ lrw_uart_maybe_set_bit(status & UARTFR_DCD, &result, TIOCM_CAR); ++ lrw_uart_maybe_set_bit(status & sup->vendor->fr_dsr, &result, TIOCM_DSR); ++ lrw_uart_maybe_set_bit(status & sup->vendor->fr_cts, &result, TIOCM_CTS); ++ lrw_uart_maybe_set_bit(status & sup->vendor->fr_ri, &result, TIOCM_RNG); + -+ serial_out(up, UART_LCR, serial_in(up, UART_LCR) & ~UART_LCR_SBC); -+ serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | -+ UART_FCR_CLEAR_RCVR | -+ UART_FCR_CLEAR_XMIT); -+ serial_out(up, UART_FCR, 0); ++ return result; +} + -+static int pxa_set_baudrate_clk(struct uart_port *port, unsigned int baud) ++static void lrw_uart_assign_bit(bool cond, unsigned int *ptr, unsigned int mask) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned long rate; -+ int ret; ++ if (cond) ++ *ptr |= mask; ++ else ++ *ptr &= ~mask; ++} + -+ if (up->current_baud == baud) -+ return 0; ++static void lrw_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ unsigned int mcr; ++ unsigned int mcfg; ++ unsigned int fccr; + -+ switch (baud) { -+ case 500000: -+ case 1000000: -+ case 1500000: -+ case 3000000: -+ rate = 48000000; -+ break; -+ case 576000: -+ case 1152000: -+ case 2500000: -+ case 4000000: -+ rate = 73000000; -+ break; -+ case 2000000: -+ case 3500000: -+ rate = 58000000; -+ break; -+ default: -+ rate = 14700000; -+ break; -+ } ++ mcr = lrw_uart_read(sup, REG_MCR); ++ mcfg = lrw_uart_read(sup, REG_MCFG); ++ fccr = lrw_uart_read(sup, REG_FCCR); + -+ ret = clk_set_rate(up->fclk, rate); -+ if (ret < 0) { -+ dev_err(port->dev, -+ "Failed to set clk rate %lu\n", rate); -+ return ret; -+ } ++ lrw_uart_assign_bit(mctrl & TIOCM_RTS, &mcr, UARTMCR_RTS); ++ lrw_uart_assign_bit(mctrl & TIOCM_DTR, &mcr, UARTMCR_DTR); ++ lrw_uart_assign_bit(mctrl & TIOCM_OUT1, &mcr, UARTMCR_OUT1); ++ lrw_uart_assign_bit(mctrl & TIOCM_OUT2, &mcr, UARTMCR_OUT2); ++ lrw_uart_assign_bit(mctrl & TIOCM_LOOP, &mcfg, UARTMCFG_LBE); + -+ up->port.uartclk = clk_get_rate(up->fclk); -+ up->current_baud = baud; ++ if (port->status & UPSTAT_AUTORTS) { ++ /* We need to disable auto-RTS if we want to turn RTS off */ ++ lrw_uart_assign_bit(mctrl & TIOCM_RTS, &fccr, UARTFCCR_RTSEN); ++ } + -+ return 0; ++ lrw_uart_write(mcr, sup, REG_MCR); ++ lrw_uart_write(mcfg, sup, REG_MCFG); ++ lrw_uart_write(fccr, sup, REG_FCCR); +} + -+static void serial_pxa_set_termios(struct uart_port *port, -+ struct ktermios *termios, -+ const struct ktermios *old) ++static void lrw_uart_break_ctl(struct uart_port *port, int break_state) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned char cval, fcr = 0; ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + unsigned long flags; -+ unsigned int baud, quot; -+ unsigned int dll; -+ int ret; ++ unsigned int mcfg; + -+ if (up->dma_enable && up->uart_dma.dma_init) -+ stop_dma(up, PXA_UART_RX); ++ uart_port_lock_irqsave(&sup->port, &flags); ++ mcfg = lrw_uart_read(sup, REG_MCFG); ++ if (break_state == -1) ++ mcfg |= UARTMCFG_BRK; ++ else ++ mcfg &= ~UARTMCFG_BRK; ++ lrw_uart_write(mcfg, sup, REG_MCFG); ++ uart_port_unlock_irqrestore(&sup->port, flags); ++} + -+ cval = UART_LCR_WLEN(tty_get_char_size(termios->c_cflag)); ++#ifdef CONFIG_CONSOLE_POLL + -+ if (termios->c_cflag & CSTOPB) -+ cval |= UART_LCR_STOP; -+ if (termios->c_cflag & PARENB) -+ cval |= UART_LCR_PARITY; -+ if (!(termios->c_cflag & PARODD)) -+ cval |= UART_LCR_EPAR; -+ -+ baud = uart_get_baud_rate(port, termios, old, 0, 4000000); -+ if (!baud) -+ baud = 9600; -+ ret = pxa_set_baudrate_clk(port, baud); -+ if (ret < 0) { -+ dev_err(port->dev, "Failed to set baud rate clk: %d\n", ret); -+ return; -+ } -+ if (tty_termios_baud_rate(termios)) -+ tty_termios_encode_baud_rate(termios, baud, baud); ++static void lrw_uart_quiesce_irqs(struct uart_port *port) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ quot = uart_get_divisor(port, baud); ++ lrw_uart_write(lrw_uart_read(sup, REG_MIS), sup, REG_ICR); ++ /* ++ * There is no way to clear TXIM as this is "ready to transmit IRQ", so ++ * we simply mask it. start_tx() will unmask it. ++ * ++ * Note we can race with start_tx(), and if the race happens, the ++ * polling user might get another interrupt just after we clear it. ++ * But it should be OK and can happen even w/o the race, e.g. ++ * controller immediately got some new data and raised the IRQ. ++ * ++ * And whoever uses polling routines assumes that it manages the device ++ * (including tx queue), so we're also fine with start_tx()'s caller ++ * side. ++ */ ++ lrw_uart_write(lrw_uart_read(sup, REG_IMSC) & ~UARTIMSC_TXIM, sup, ++ REG_IMSC); ++} + -+ if (!quot) -+ quot = 1; -+ if (up->dma_enable) { -+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32 | UART_FCR_PXA_TRAIL; -+ fcr &= ~UART_FCR_PXA_BUS32; -+ } else { -+ if ((up->port.uartclk / quot) < (2400 * 16)) -+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR1; -+ else if ((up->port.uartclk / quot) < (230400 * 16)) -+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR8; -+ else -+ fcr = UART_FCR_ENABLE_FIFO | UART_FCR_PXAR32; -+ } ++static int lrw_uart_get_poll_char(struct uart_port *port) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ unsigned int status; + -+ spin_lock_irqsave(&up->port.lock, flags); -+ up->ier |= UART_IER_UUE; -+ uart_update_timeout(port, termios->c_cflag, baud); -+ up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR; -+ if (termios->c_iflag & INPCK) -+ up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE; -+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) -+ up->port.read_status_mask |= UART_LSR_BI; ++ /* ++ * The caller might need IRQs lowered, e.g. if used with KDB NMI ++ * debugger. ++ */ ++ lrw_uart_quiesce_irqs(port); + -+ up->port.ignore_status_mask = 0; -+ if (termios->c_iflag & IGNPAR) -+ up->port.ignore_status_mask |= UART_LSR_PE | UART_LSR_FE; -+ if (termios->c_iflag & IGNBRK) { -+ up->port.ignore_status_mask |= UART_LSR_BI; -+ if (termios->c_iflag & IGNPAR) -+ up->port.ignore_status_mask |= UART_LSR_OE; -+ } ++ status = lrw_uart_read(sup, REG_FR); ++ if (status & UARTFR_RXFE) ++ return NO_POLL_CHAR; + -+ if ((termios->c_cflag & CREAD) == 0) -+ up->port.ignore_status_mask |= UART_LSR_DR; ++ return lrw_uart_read(sup, REG_DR); ++} + -+ if (!up->dma_enable) { -+ up->ier &= ~UART_IER_MSI; -+ if (UART_ENABLE_MS(&up->port, termios->c_cflag)) -+ up->ier |= UART_IER_MSI; -+ } ++static void lrw_uart_put_poll_char(struct uart_port *port, unsigned char ch) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ serial_out(up, UART_IER, up->ier); ++ while (lrw_uart_read(sup, REG_FR) & UARTFR_TXFF) ++ cpu_relax(); + -+ if (termios->c_cflag & CRTSCTS) -+ up->mcr |= UART_MCR_AFE; -+ else -+ up->mcr &= ~UART_MCR_AFE; ++ lrw_uart_write(ch, sup, REG_DR); ++} + -+ serial_out(up, UART_LCR, cval | UART_LCR_DLAB); -+ serial_out(up, UART_DLM, (quot >> 8) & 0xff); -+ (void)serial_in(up, UART_DLM); -+ serial_out(up, UART_DLL, quot & 0xff); ++#endif /* CONFIG_CONSOLE_POLL */ + -+ (void)serial_in(up, UART_DLL); -+ dll = serial_in(up, UART_DLL); -+ WARN(dll != (quot & 0xff), "uart %d baud %d target 0x%x real 0x%x\n", -+ up->port.line, baud, quot & 0xff, dll); ++static int lrw_uart_hwinit(struct uart_port *port) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ int retval; ++ unsigned int clk; ++ ++ /* Optionaly enable pins to be muxed in and configured */ ++ pinctrl_pm_select_default_state(port->dev); + -+ serial_out(up, UART_LCR, cval); -+ up->lcr = cval; -+ serial_pxa_set_mctrl(&up->port, up->port.mctrl); -+ serial_out(up, UART_FCR, fcr); -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ /* ++ * Try to enable the clock producer. ++ */ ++ retval = clk_prepare_enable(sup->clk); ++ if (retval) ++ return retval; + -+ if (uart_console(&up->port)) { -+ up->cons_udelay = 1000000000 / baud * 10 / 8 / 1000; -+ if (up->cons_udelay <= 0) -+ up->cons_udelay = 1; -+ if (up->cons_udelay > 20) -+ up->cons_udelay = 20; ++ if (has_acpi_companion(sup->port.dev)) { ++ device_property_read_u32(sup->port.dev, "clock-frequency", &clk); ++ sup->port.uartclk = clk; ++ } else { ++ sup->port.uartclk = clk_get_rate(sup->clk); + } + -+ if (up->dma_enable && up->uart_dma.dma_init) -+ pxa_uart_receive_dma_start(up); ++ /* Clear pending error and receive interrupts */ ++ lrw_uart_write(UARTICR_OEIC | UARTICR_BEIC | UARTICR_PEIC | ++ UARTICR_FEIC | UARTICR_RTIC | UARTICR_RXIC, ++ sup, REG_ICR); ++ ++ /* ++ * Save interrupts enable mask, and enable RX interrupts in case if ++ * the interrupt is used for NMI entry. ++ */ ++ sup->im = lrw_uart_read(sup, REG_IMSC); ++ lrw_uart_write(UARTIMSC_RTIM | UARTIMSC_RXIM, sup, REG_IMSC); ++ ++ if (dev_get_platdata(sup->port.dev)) { ++ struct lrw_uart_data *plat; ++ ++ plat = dev_get_platdata(sup->port.dev); ++ if (plat->init) ++ plat->init(); ++ } ++ return 0; +} + -+static void serial_pxa_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) ++static int lrw_uart_allocate_irq(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ lrw_uart_write(sup->im, sup, REG_IMSC); + -+ if (!state) { -+ clk_prepare_enable(up->gclk); -+ clk_prepare_enable(up->fclk); -+ } else { -+ clk_disable_unprepare(up->fclk); -+ clk_disable_unprepare(up->gclk); ++ return request_irq(sup->port.irq, lrw_uart_int, IRQF_SHARED, "lrw-uart", sup); ++} ++ ++/* ++ * Enable interrupts, only timeouts when using DMA ++ * if initial RX DMA job failed, start in interrupt mode ++ * as well. ++ */ ++static void lrw_uart_enable_interrupts(struct lrw_uart_port *sup) ++{ ++ unsigned long flags; ++ unsigned int i; ++ ++ uart_port_lock_irqsave(&sup->port, &flags); ++ ++ /* Clear out any spuriously appearing RX interrupts */ ++ lrw_uart_write(UARTICR_RTIC | UARTICR_RXIC, sup, REG_ICR); ++ ++ /* ++ * RXIS is asserted only when the RX FIFO transitions from below ++ * to above the trigger threshold. If the RX FIFO is already ++ * full to the threshold this can't happen and RXIS will now be ++ * stuck off. Drain the RX FIFO explicitly to fix this: ++ */ ++ for (i = 0; i < sup->fifosize * 2; ++i) { ++ if (lrw_uart_read(sup, REG_FR) & UARTFR_RXFE) ++ break; ++ ++ lrw_uart_read(sup, REG_DR); + } ++ ++ sup->im = UARTIMSC_RTIM; ++ if (!lrw_uart_dma_rx_running(sup)) ++ sup->im |= UARTIMSC_RXIM; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ uart_port_unlock_irqrestore(&sup->port, flags); +} + -+static void serial_pxa_release_port(struct uart_port *port) ++static void lrw_uart_unthrottle_rx(struct uart_port *port) +{ ++ struct lrw_uart_port *sup = container_of(port, struct lrw_uart_port, port); ++ unsigned long flags; ++ ++ uart_port_lock_irqsave(&sup->port, &flags); ++ ++ sup->im = UARTIMSC_RTIM; ++ if (!lrw_uart_dma_rx_running(sup)) ++ sup->im |= UARTIMSC_RXIM; ++ ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ ++ uart_port_unlock_irqrestore(&sup->port, flags); +} + -+static int serial_pxa_request_port(struct uart_port *port) ++static int lrw_uart_startup(struct uart_port *port) +{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ unsigned int mcr; ++ unsigned int mcfg; ++ int retval; ++ ++ retval = lrw_uart_hwinit(port); ++ if (retval) ++ goto clk_dis; ++ ++ retval = lrw_uart_allocate_irq(sup); ++ if (retval) ++ goto clk_dis; ++ ++ lrw_uart_write(sup->vendor->fcr, sup, REG_FCR); ++ ++ uart_port_lock_irq(&sup->port); ++ ++ mcr = lrw_uart_read(sup, REG_MCR); ++ mcr &= UARTMCR_RTS | UARTMCR_DTR; ++ ++ lrw_uart_write(mcr, sup, REG_MCR); ++ ++ mcfg = lrw_uart_read(sup, REG_MCFG); ++ ++ mcfg |= UARTMCFG_UARTEN | UARTMCFG_RXE; ++ ++ if (!(port->rs485.flags & SER_RS485_ENABLED)) ++ mcfg |= UARTMCFG_TXE; ++ ++ lrw_uart_write(mcfg, sup, REG_MCFG); ++ ++ uart_port_unlock_irq(&sup->port); ++ ++ /* ++ * initialise the old status of the modem signals ++ */ ++ sup->old_status = lrw_uart_read(sup, REG_FR) & UARTFR_MODEM_ANY; ++ ++ /* Startup DMA */ ++ lrw_uart_dma_startup(sup); ++ ++ lrw_uart_enable_interrupts(sup); ++ + return 0; ++ ++ clk_dis: ++ clk_disable_unprepare(sup->clk); ++ return retval; +} + -+static void serial_pxa_config_port(struct uart_port *port, int flags) ++static void lrw_uart_shutdown_channel(struct lrw_uart_port *sup, ++ unsigned int mcfg, unsigned int fcr) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ unsigned long val; ++ ++ val = lrw_uart_read(sup, mcfg); ++ val &= ~(UARTMCFG_BRK); ++ lrw_uart_write(val, sup, mcfg); + -+ up->port.type = PORT_PXA; ++ val = lrw_uart_read(sup, fcr); ++ val &= ~(UARTFCR_FEN); ++ lrw_uart_write(val, sup, fcr); +} + -+static int serial_pxa_verify_port(struct uart_port *port, struct serial_struct *ser) ++/* ++ * disable the port. It should not disable RTS and DTR. ++ * Also RTS and DTR state should be preserved to restore ++ * it during startup(). ++ */ ++static void lrw_uart_disable_uart(struct lrw_uart_port *sup) +{ -+ return -EINVAL; ++ unsigned int mcr; ++ unsigned int mcfg; ++ ++ sup->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); ++ uart_port_lock_irq(&sup->port); ++ mcr = lrw_uart_read(sup, REG_MCR); ++ mcr &= UARTMCR_RTS | UARTMCR_DTR; ++ lrw_uart_write(mcr, sup, REG_MCR); ++ ++ mcfg = lrw_uart_read(sup, REG_MCFG); ++ mcfg |= UARTMCFG_UARTEN | UARTMCFG_TXE; ++ lrw_uart_write(mcfg, sup, REG_MCFG); ++ uart_port_unlock_irq(&sup->port); ++ ++ /* ++ * disable break condition and fifos ++ */ ++ lrw_uart_shutdown_channel(sup, REG_MCFG, REG_FCR); +} + -+static const char *serial_pxa_type(struct uart_port *port) ++static void lrw_uart_disable_interrupts(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ uart_port_lock_irq(&sup->port); + -+ return up->name; -+} ++ /* mask all interrupts and clear all pending ones */ ++ sup->im = 0; ++ lrw_uart_write(sup->im, sup, REG_IMSC); ++ lrw_uart_write(0xffff, sup, REG_ICR); + -+static struct uart_pxa_port *serial_pxa_ports[NUM_UART_PORTS]; -+static struct uart_driver serial_pxa_reg; ++ uart_port_unlock_irq(&sup->port); ++} + -+#ifdef CONFIG_PM -+void serial_pxa_get_qos(int port) ++static void lrw_uart_shutdown(struct uart_port *port) +{ -+ struct uart_pxa_port *up; ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ if (port < 0 || port >= NUM_UART_PORTS) { -+ pr_err("%s: wrong uart port %d\n", __func__, port); -+ return; ++ lrw_uart_disable_interrupts(sup); ++ ++ lrw_uart_dma_shutdown(sup); ++ ++ if ((port->rs485.flags & SER_RS485_ENABLED) && sup->rs485_tx_started) ++ lrw_uart_rs485_tx_stop(sup); ++ ++ free_irq(sup->port.irq, sup); ++ ++ lrw_uart_disable_uart(sup); ++ ++ /* ++ * Shut down the clock producer ++ */ ++ clk_disable_unprepare(sup->clk); ++ /* Optionally let pins go into sleep states */ ++ pinctrl_pm_select_sleep_state(port->dev); ++ ++ if (dev_get_platdata(sup->port.dev)) { ++ struct lrw_uart_data *plat; ++ ++ plat = dev_get_platdata(sup->port.dev); ++ if (plat->exit) ++ plat->exit(); + } + -+ up = serial_pxa_ports[port]; -+ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) -+ pm_runtime_get_sync(up->port.dev); ++ if (sup->port.ops->flush_buffer) ++ sup->port.ops->flush_buffer(port); +} -+EXPORT_SYMBOL_GPL(serial_pxa_get_qos); -+#endif + -+void serial_pxa_assert_rts(int port) ++static void ++lrw_uart_setup_status_masks(struct uart_port *port, struct ktermios *termios) +{ -+ struct uart_pxa_port *up; -+ unsigned long flags; ++ port->read_status_mask = UARTDR_OE | 255; ++ if (termios->c_iflag & INPCK) ++ port->read_status_mask |= UARTDR_FE | UARTDR_PE; ++ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) ++ port->read_status_mask |= UARTDR_BE; + -+ if (port < 0 || port >= NUM_UART_PORTS) { -+ pr_err("%s: wrong uart port %d\n", __func__, port); -+ return; ++ /* ++ * Characters to ignore ++ */ ++ port->ignore_status_mask = 0; ++ if (termios->c_iflag & IGNPAR) ++ port->ignore_status_mask |= UARTDR_FE | UARTDR_PE; ++ if (termios->c_iflag & IGNBRK) { ++ port->ignore_status_mask |= UARTDR_BE; ++ /* ++ * If we're ignoring parity and break indicators, ++ * ignore overruns too (for real raw support). ++ */ ++ if (termios->c_iflag & IGNPAR) ++ port->ignore_status_mask |= UARTDR_OE; + } + -+ up = serial_pxa_ports[port]; -+ -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (!serial_pxa_is_open(up)) { -+ spin_unlock_irqrestore(&up->port.lock, flags); -+ pr_err("%s: uart %d is shutdown\n", __func__, port); -+ return; -+ } -+ serial_pxa_set_mctrl(&up->port, up->port.mctrl | TIOCM_RTS); -+ uart_handle_cts_change(&up->port, UART_MSR_CTS); -+ spin_unlock_irqrestore(&up->port.lock, flags); ++ /* ++ * Ignore all characters if CREAD is not set. ++ */ ++ if ((termios->c_cflag & CREAD) == 0) ++ port->ignore_status_mask |= UART_DUMMY_DR_RX; +} -+EXPORT_SYMBOL_GPL(serial_pxa_assert_rts); + -+void serial_pxa_deassert_rts(int port) -+{ -+ struct uart_pxa_port *up; ++static void ++lrw_uart_set_termios(struct uart_port *port, struct ktermios *termios, ++ const struct ktermios *old) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ unsigned int frcr; ++ unsigned int mcr, fccr; ++ unsigned int mcfg; + unsigned long flags; ++ unsigned int baud, quot, clkdiv; ++ unsigned int bits; ++ unsigned int clk; + -+ if (port < 0 || port >= NUM_UART_PORTS) { -+ pr_err("%s: wrong uart port %d\n", __func__, port); -+ return; ++ if (sup->vendor->oversampling) ++ clkdiv = 8; ++ else ++ clkdiv = 16; ++ ++ if (has_acpi_companion(sup->port.dev)) { ++ device_property_read_u32(sup->port.dev, "clock-frequency", &clk); ++ sup->port.uartclk = clk; + } + -+ up = serial_pxa_ports[port]; ++ /* ++ * Ask the core to calculate the divisor for us. ++ */ ++ baud = uart_get_baud_rate(port, termios, old, 0, ++ port->uartclk / clkdiv); + -+ spin_lock_irqsave(&up->port.lock, flags); -+ if (!serial_pxa_is_open(up)) { -+ spin_unlock_irqrestore(&up->port.lock, flags); -+ pr_err("%s: uart %d is shutdown\n", __func__, port); -+ return; ++#ifdef CONFIG_DMA_ENGINE ++ /* ++ * Adjust RX DMA polling rate with baud rate if not specified. ++ */ ++ if (sup->dmarx.auto_poll_rate) ++ sup->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); ++#endif ++ ++ if (baud > port->uartclk / 16) ++ quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); ++ else ++ quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); ++ ++ switch (termios->c_cflag & CSIZE) { ++ case CS5: ++ frcr = UARTFRCR_WLEN_5; ++ break; ++ case CS6: ++ frcr = UARTFRCR_WLEN_6; ++ break; ++ case CS7: ++ frcr = UARTFRCR_WLEN_7; ++ break; ++ default: // CS8 ++ frcr = UARTFRCR_WLEN_8; ++ break; + } -+ serial_pxa_set_mctrl(&up->port, up->port.mctrl & ~TIOCM_RTS); -+ spin_unlock_irqrestore(&up->port.lock, flags); -+} -+EXPORT_SYMBOL_GPL(serial_pxa_deassert_rts); + -+#ifdef CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE ++ if (termios->c_cflag & CSTOPB) ++ frcr |= UARTFRCR_STP2; ++ if (termios->c_cflag & PARENB) { ++ frcr |= UARTFRCR_PEN; ++ if (!(termios->c_cflag & PARODD)) ++ frcr |= UARTFRCR_EOP; ++ if (termios->c_cflag & CMSPAR) ++ frcr |= UARTFRCR_SPS; ++ } + -+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) + -+static void wait_for_xmitr(struct uart_pxa_port *up) -+{ -+ unsigned int status, tmout = 10000; -+ unsigned int cycle; ++ bits = tty_get_frame_size(termios->c_cflag); + -+ if (uart_console(&up->port)) -+ cycle = up->cons_udelay; -+ else -+ cycle = 1; ++ uart_port_lock_irqsave(port, &flags); + -+ tmout = 10000 / cycle; ++ /* ++ * Update the per-port timeout. ++ */ ++ uart_update_timeout(port, termios->c_cflag, baud); + -+ do { -+ status = serial_in(up, UART_LSR); ++ /* ++ * Calculate the approximated time it takes to transmit one character ++ * with the given baud rate. We use this as the poll interval when we ++ * wait for the tx queue to empty. ++ */ ++ sup->rs485_tx_drain_interval = DIV_ROUND_UP(bits * 1000 * 1000, baud); + -+ if (status & UART_LSR_BI) -+ up->lsr_break_flag = UART_LSR_BI; ++ lrw_uart_setup_status_masks(port, termios); + -+ if ((status & BOTH_EMPTY) == BOTH_EMPTY) -+ break; -+ udelay(cycle); -+ } while (--tmout); ++ if (UART_ENABLE_MS(port, termios->c_cflag)) ++ lrw_uart_enable_ms(port); + -+ if (up->port.flags & UPF_CONS_FLOW) { -+ tmout = 1000000; -+ while (--tmout && ((serial_in(up, UART_MSR) & UART_MSR_CTS) == 0)) -+ udelay(cycle); ++ if (port->rs485.flags & SER_RS485_ENABLED) ++ termios->c_cflag &= ~CRTSCTS; ++ ++ mcr = lrw_uart_read(sup, REG_MCR); ++ mcfg = lrw_uart_read(sup, REG_MCFG); ++ fccr = lrw_uart_read(sup, REG_FCCR); ++ ++ if (termios->c_cflag & CRTSCTS) { ++ if (mcr & UARTMCR_RTS) ++ fccr |= UARTFCCR_RTSEN; ++ ++ fccr |= UARTFCCR_CTSEN; ++ port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; ++ } else { ++ fccr &= ~(UARTFCCR_CTSEN | UARTFCCR_RTSEN); ++ port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); + } + -+ if (!tmout) { -+ if (up->port.flags & UPF_CONS_FLOW) -+ status = serial_in(up, UART_MSR); -+ else -+ status = serial_in(up, UART_LSR); -+ panic("failed to read uart status, status:0x%08x\n", status); ++ /* Set baud rate */ ++ lrw_uart_write(quot & 0x3f, sup, REG_FD); ++ lrw_uart_write(quot >> 6, sup, REG_IND); ++ ++ /* ++ * ----------v----------v----------v----------v----- ++ * NOTE: REG_FRCR MUST BE WRITTEN AFTER REG_FD & REG_IND. ++ * ----------^----------^----------^----------^----- ++ */ ++ lrw_uart_write(frcr, sup, REG_FRCR); ++ ++ lrw_uart_write(fccr, sup, REG_FCCR); ++ ++ /* ++ * Receive was disabled by lrw_uart_disable_uart during shutdown. ++ * Need to reenable receive if you need to use a tty_driver ++ * returns from tty_find_polling_driver() after a port shutdown. ++ */ ++ mcfg |= UARTMCFG_RXE | UARTMCFG_UARTEN; ++ lrw_uart_write(mcfg, sup, REG_MCFG); ++ ++ uart_port_unlock_irqrestore(port, flags); ++} ++ ++static const char *lrw_uart_type(struct uart_port *port) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ return sup->port.type == PORT_LRW ? sup->type : NULL; ++} ++ ++/* ++ * Configure/autoconfigure the port. ++ */ ++static void lrw_uart_config_port(struct uart_port *port, int flags) ++{ ++ if (flags & UART_CONFIG_TYPE) ++ port->type = PORT_LRW; ++} ++ ++/* ++ * verify the new serial_struct (for TIOCSSERIAL). ++ */ ++static int lrw_uart_verify_port(struct uart_port *port, struct serial_struct *ser) ++{ ++ int ret = 0; ++ ++ if (ser->type != PORT_UNKNOWN && ser->type != PORT_LRW) ++ ret = -EINVAL; ++ if (ser->irq < 0 || ser->irq >= nr_irqs) ++ ret = -EINVAL; ++ if (ser->baud_base < 9600) ++ ret = -EINVAL; ++ if (port->mapbase != (unsigned long)ser->iomem_base) ++ ret = -EINVAL; ++ return ret; ++} ++ ++static int lrw_uart_rs485_config(struct uart_port *port, struct ktermios *termios, ++ struct serial_rs485 *rs485) ++{ ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); ++ ++ if (port->rs485.flags & SER_RS485_ENABLED) ++ lrw_uart_rs485_tx_stop(sup); ++ ++ /* Make sure auto RTS is disabled */ ++ if (rs485->flags & SER_RS485_ENABLED) { ++ u32 fccr = lrw_uart_read(sup, REG_FCCR); ++ ++ fccr &= ~UARTFCCR_RTSEN; ++ lrw_uart_write(fccr, sup, REG_FCCR); ++ port->status &= ~UPSTAT_AUTORTS; + } ++ ++ return 0; +} + -+static void serial_pxa_console_putchar(struct uart_port *port, unsigned char ch) ++static const struct uart_ops lrw_uart_pops = { ++ .tx_empty = lrw_uart_tx_empty, ++ .set_mctrl = lrw_uart_set_mctrl, ++ .get_mctrl = lrw_uart_get_mctrl, ++ .stop_tx = lrw_uart_stop_tx, ++ .start_tx = lrw_uart_start_tx, ++ .stop_rx = lrw_uart_stop_rx, ++ .throttle = lrw_uart_throttle_rx, ++ .unthrottle = lrw_uart_unthrottle_rx, ++ .enable_ms = lrw_uart_enable_ms, ++ .break_ctl = lrw_uart_break_ctl, ++ .startup = lrw_uart_startup, ++ .shutdown = lrw_uart_shutdown, ++ .flush_buffer = lrw_uart_dma_flush_buffer, ++ .set_termios = lrw_uart_set_termios, ++ .type = lrw_uart_type, ++ .config_port = lrw_uart_config_port, ++ .verify_port = lrw_uart_verify_port, ++#ifdef CONFIG_CONSOLE_POLL ++ .poll_init = lrw_uart_hwinit, ++ .poll_get_char = lrw_uart_get_poll_char, ++ .poll_put_char = lrw_uart_put_poll_char, ++#endif ++}; ++ ++static struct lrw_uart_port *lrw_uart_console_ports[UART_NR]; ++ ++#ifdef CONFIG_SERIAL_LRW_UART_CONSOLE ++ ++static void lrw_uart_console_putchar(struct uart_port *port, unsigned char ch) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ struct lrw_uart_port *sup = ++ container_of(port, struct lrw_uart_port, port); + -+ wait_for_xmitr(up); -+ serial_out(up, UART_TX, ch); ++ while (lrw_uart_read(sup, REG_FR) & UARTFR_TXFF) ++ cpu_relax(); ++ lrw_uart_write(ch, sup, REG_DR); +} + -+static void serial_pxa_console_write(struct console *co, const char *s, unsigned int count) ++static void ++lrw_uart_console_write(struct console *co, const char *s, unsigned int count) +{ -+ struct uart_pxa_port *up = serial_pxa_ports[co->index]; -+ unsigned int ier; ++ struct lrw_uart_port *sup = lrw_uart_console_ports[co->index]; ++ unsigned int old_fccr = 0, new_fccr; ++ unsigned int old_mcfg = 0, new_mcfg; + unsigned long flags; + int locked = 1; + -+ clk_enable(up->gclk); -+ clk_enable(up->fclk); ++ clk_enable(sup->clk); + + local_irq_save(flags); -+ if (up->port.sysrq) ++ if (sup->port.sysrq) + locked = 0; + else if (oops_in_progress) -+ locked = spin_trylock(&up->port.lock); ++ locked = uart_port_trylock(&sup->port); + else -+ spin_lock(&up->port.lock); ++ uart_port_lock(&sup->port); + -+ ier = serial_in(up, UART_IER); -+ serial_out(up, UART_IER, UART_IER_UUE); ++ /* ++ * First save the FCCR then disable the interrupts ++ */ ++ if (!sup->vendor->always_enabled) { ++ old_fccr = lrw_uart_read(sup, REG_FCCR); ++ new_fccr = old_fccr & ~UARTFCCR_CTSEN; ++ lrw_uart_write(new_fccr, sup, REG_FCCR); + -+ uart_console_write(&up->port, s, count, serial_pxa_console_putchar); ++ old_mcfg = lrw_uart_read(sup, REG_MCFG); ++ new_mcfg |= UARTMCFG_UARTEN | UARTMCFG_TXE; ++ lrw_uart_write(new_mcfg, sup, REG_MCFG); ++ } ++ ++ uart_console_write(&sup->port, s, count, lrw_uart_console_putchar); + -+ wait_for_xmitr(up); -+ serial_out(up, UART_IER, ier); ++ /* ++ * Finally, wait for transmitter to become empty and restore the ++ * TCR. Allow feature register bits to be inverted to work around ++ * errata. ++ */ ++ while ((lrw_uart_read(sup, REG_FR) ^ sup->vendor->inv_fr) ++ & sup->vendor->fr_busy) ++ cpu_relax(); ++ if (!sup->vendor->always_enabled) { ++ lrw_uart_write(old_fccr, sup, REG_FCCR); ++ lrw_uart_write(old_mcfg, sup, REG_MCFG); ++ } + + if (locked) -+ spin_unlock(&up->port.lock); ++ uart_port_unlock(&sup->port); + local_irq_restore(flags); -+ clk_disable(up->fclk); -+ clk_disable(up->gclk); ++ ++ clk_disable(sup->clk); +} + -+#ifdef CONFIG_CONSOLE_POLL -+static int serial_pxa_get_poll_char(struct uart_port *port) ++static void lrw_uart_console_get_options(struct lrw_uart_port *sup, int *baud, ++ int *parity, int *bits) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; -+ unsigned char lsr = serial_in(up, UART_LSR); ++ unsigned int frcr, ind, fd; + -+ while (!(lsr & UART_LSR_DR)) -+ lsr = serial_in(up, UART_LSR); ++ if (!(lrw_uart_read(sup, REG_MCFG) & UARTMCFG_UARTEN)) ++ return; + -+ return serial_in(up, UART_RX); -+} ++ frcr = lrw_uart_read(sup, REG_FRCR); + -+static void serial_pxa_put_poll_char(struct uart_port *port, -+ unsigned char c) -+{ -+ unsigned int ier; -+ struct uart_pxa_port *up = (struct uart_pxa_port *)port; ++ *parity = 'n'; ++ if (frcr & UARTFRCR_PEN) { ++ if (frcr & UARTFRCR_EOP) ++ *parity = 'e'; ++ else ++ *parity = 'o'; ++ } + -+ ier = serial_in(up, UART_IER); -+ serial_out(up, UART_IER, UART_IER_UUE); ++ if ((frcr & 0x3) == UARTFRCR_WLEN_7) ++ *bits = 7; ++ else ++ *bits = 8; + -+ wait_for_xmitr(up); -+ serial_out(up, UART_TX, c); -+ if (c == 10) { -+ wait_for_xmitr(up); -+ serial_out(up, UART_TX, 13); -+ } ++ ind = lrw_uart_read(sup, REG_IND); ++ fd = lrw_uart_read(sup, REG_FD); + -+ wait_for_xmitr(up); -+ serial_out(up, UART_IER, ier); ++ *baud = sup->port.uartclk * 4 / (64 * ind + fd); +} + -+#endif /* CONFIG_CONSOLE_POLL */ -+ -+static int __init -+serial_pxa_console_setup(struct console *co, char *options) ++static int lrw_uart_console_setup(struct console *co, char *options) +{ -+ struct uart_pxa_port *up; -+ int baud = 9600; ++ struct lrw_uart_port *sup; ++ int baud = 38400; + int bits = 8; + int parity = 'n'; + int flow = 'n'; ++ int ret; ++ unsigned int clk; + -+ if (co->index == -1 || co->index >= serial_pxa_reg.nr) ++ /* ++ * Check whether an invalid uart number has been specified, and ++ * if so, search for the first available port that does have ++ * console support. ++ */ ++ if (co->index >= UART_NR) + co->index = 0; -+ up = serial_pxa_ports[co->index]; -+ if (!up) ++ sup = lrw_uart_console_ports[co->index]; ++ if (!sup) + return -ENODEV; + -+ if (options) -+ uart_parse_options(options, &baud, &parity, &bits, &flow); ++ /* Allow pins to be muxed in and configured */ ++ pinctrl_pm_select_default_state(sup->port.dev); + -+ return uart_set_options(&up->port, co, baud, parity, bits, flow); -+} ++ ret = clk_prepare(sup->clk); ++ if (ret) ++ return ret; + -+static struct console serial_pxa_console = { -+ .name = "ttySP", -+ .write = serial_pxa_console_write, -+ .device = uart_console_device, -+ .setup = serial_pxa_console_setup, -+ .flags = CON_PRINTBUFFER, -+ .index = -1, -+ .data = &serial_pxa_reg, -+}; ++ if (dev_get_platdata(sup->port.dev)) { ++ struct lrw_uart_data *plat; + -+static void pxa_early_write(struct console *con, const char *s, -+ unsigned int n) -+{ -+ struct earlycon_device *dev = con->data; ++ plat = dev_get_platdata(sup->port.dev); ++ if (plat->init) ++ plat->init(); ++ } + -+ uart_console_write(&dev->port, s, n, serial_pxa_console_putchar); -+} ++ if (has_acpi_companion(sup->port.dev)) { ++ device_property_read_u32(sup->port.dev, "clock-frequency", &clk); ++ sup->port.uartclk = clk; ++ } else { ++ sup->port.uartclk = clk_get_rate(sup->clk); ++ } + -+static int __init pxa_early_console_setup(struct earlycon_device *device, const char *opt) -+{ -+ if (!device->port.membase) -+ return -ENODEV; ++ if (sup->vendor->fixed_options) { ++ baud = sup->fixed_baud; ++ } else { ++ if (options) ++ uart_parse_options(options, ++ &baud, &parity, &bits, &flow); ++ else ++ lrw_uart_console_get_options(sup, &baud, &parity, &bits); ++ } + -+ device->con->write = pxa_early_write; -+ return 0; ++ return uart_set_options(&sup->port, co, baud, parity, bits, flow); +} + -+EARLYCON_DECLARE(pxa_serial, pxa_early_console_setup); -+OF_EARLYCON_DECLARE(pxa_serial, "spacemit,pxa-uart", pxa_early_console_setup); ++/** ++ * lrw_uart_console_match - non-standard console matching ++ * @co: registering console ++ * @name: name from console command line ++ * @idx: index from console command line ++ * @options: ptr to option string from console command line ++ * ++ * Only attempts to match console command lines of the form: ++ * console=lrw_uart,mmio|mmio32,[,] ++ * console=lrw_uart,0x[,] ++ * This form is used to register an initial earlycon boot console and ++ * replace it with the lrw_uart_console at lrw_uart driver init. ++ * ++ * Performs console setup for a match (as required by interface) ++ * If no are specified, then assume the h/w is already setup. ++ * ++ * Returns 0 if console matches; otherwise non-zero to use default matching ++ */ ++static int lrw_uart_console_match(struct console *co, char *name, int idx, ++ char *options) ++{ ++ unsigned char iotype; ++ resource_size_t addr; ++ int i; + -+#define PXA_CONSOLE (&serial_pxa_console) -+#else -+#define PXA_CONSOLE NULL -+#endif /* CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE */ ++ if (strcmp(name, "lrw_uart") != 0) ++ return -ENODEV; + -+static const struct uart_ops serial_pxa_pops = { -+ .tx_empty = serial_pxa_tx_empty, -+ .set_mctrl = serial_pxa_set_mctrl, -+ .get_mctrl = serial_pxa_get_mctrl, -+ .stop_tx = serial_pxa_stop_tx, -+ .start_tx = serial_pxa_start_tx, -+ .stop_rx = serial_pxa_stop_rx, -+ .enable_ms = serial_pxa_enable_ms, -+ .break_ctl = serial_pxa_break_ctl, -+ .startup = serial_pxa_startup, -+ .shutdown = serial_pxa_shutdown, -+ .set_termios = serial_pxa_set_termios, -+ .pm = serial_pxa_pm, -+ .type = serial_pxa_type, -+ .release_port = serial_pxa_release_port, -+ .request_port = serial_pxa_request_port, -+ .config_port = serial_pxa_config_port, -+ .verify_port = serial_pxa_verify_port, -+#if defined(CONFIG_CONSOLE_POLL) && defined(CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE) -+ .poll_get_char = serial_pxa_get_poll_char, -+ .poll_put_char = serial_pxa_put_poll_char, -+#endif -+}; ++ if (uart_parse_earlycon(options, &iotype, &addr, &options)) ++ return -ENODEV; + -+static struct uart_driver serial_pxa_reg = { -+ .owner = THIS_MODULE, -+ .driver_name = "PXA serial", -+ .dev_name = "ttySP", -+ .major = TTY_MAJOR, -+ .minor = 128, -+ .nr = NUM_UART_PORTS, -+ .cons = PXA_CONSOLE, -+}; ++ if (iotype != UPIO_MEM && iotype != UPIO_MEM32) ++ return -ENODEV; + -+static int serial_pxa_is_open(struct uart_pxa_port *up) -+{ -+ struct uart_state *state; -+ struct uart_pxa_dma *pxa_dma; ++ /* try to match the port specified on the command line */ ++ for (i = 0; i < ARRAY_SIZE(lrw_uart_console_ports); i++) { ++ struct uart_port *port; + -+ if (!up) -+ return 0; ++ if (!lrw_uart_console_ports[i]) ++ continue; + -+ state = serial_pxa_reg.state + up->port.line; -+ pxa_dma = &up->uart_dma; ++ port = &lrw_uart_console_ports[i]->port; + -+ if (up->dma_enable) { -+ return ((up->ier & UART_IER_DMAE) && pxa_dma->dma_init && -+ (state->pm_state == UART_PM_STATE_ON)); -+ } else { -+ return (state->pm_state == UART_PM_STATE_ON); ++ if (port->mapbase != addr) ++ continue; ++ ++ co->index = i; ++ port->cons = co; ++ return lrw_uart_console_setup(co, options); + } ++ ++ return -ENODEV; +} + -+#ifdef CONFIG_PM ++static struct uart_driver lrw_uart_driver; + -+#ifdef CONFIG_HIBERNATION -+unsigned long pxa_clk_freq; -+struct clk *pxa_clk_parent; -+#endif ++static struct console lrw_uart_console = { ++ .name = LRW_UART_TTY_PREFIX, ++ .write = lrw_uart_console_write, ++ .device = uart_console_device, ++ .setup = lrw_uart_console_setup, ++ .match = lrw_uart_console_match, ++ .flags = CON_PRINTBUFFER | CON_ANYTIME, ++ .index = -1, ++ .data = &lrw_uart_driver, ++}; ++ ++#define LRW_UART_CONSOLE (&lrw_uart_console) + -+static int __maybe_unused serial_pxa_suspend(struct device *dev) ++static void lrw_uart_putc(struct uart_port *port, unsigned char c) +{ -+ struct uart_pxa_port *sport = dev_get_drvdata(dev); -+ struct uart_pxa_dma *pxa_dma = &sport->uart_dma; -+ struct dma_tx_state dma_state; -+ unsigned char tmp[256]; -+ int fifo_cnt, cnt = 0; ++ while (readl(port->membase + UARTFR) & UARTFR_TXFF) ++ cpu_relax(); ++ if (port->iotype == UPIO_MEM32) ++ writel(c, port->membase + UARTDR); ++ else ++ writeb(c, port->membase + UARTDR); ++ while (readl(port->membase + UARTFR) & UARTFR_BUSY) ++ cpu_relax(); ++} + -+ if (!console_suspend_enabled || !sport) -+ return 0; ++static void lrw_uart_early_write(struct console *con, const char *s, unsigned int n) ++{ ++ struct earlycon_device *dev = con->data; + -+ if (serial_pxa_is_open(sport) && sport->dma_enable) { -+ int sent = 0; -+ unsigned long flags; ++ uart_console_write(&dev->port, s, n, lrw_uart_putc); ++} + -+ local_irq_save(flags); -+ pxa_dma->tx_stop = 1; -+ pxa_dma->rx_stop = 1; -+ pxa_dma->tx_saved_len = 0; -+ if (dma_async_is_tx_complete(pxa_dma->txdma_chan, -+ pxa_dma->tx_cookie, NULL, NULL) != DMA_COMPLETE) { -+ dmaengine_pause(pxa_dma->txdma_chan); -+ udelay(100); -+ dmaengine_tx_status(pxa_dma->txdma_chan, -+ pxa_dma->tx_cookie, &dma_state); -+ sent = pxa_dma->tx_size - dma_state.residue; -+ pxa_dma->tx_saved_len = dma_state.residue; -+ memcpy(pxa_dma->tx_buf_save, pxa_dma->txdma_addr + sent, -+ dma_state.residue); -+ stop_dma(sport, PXA_UART_TX); -+ } -+ -+ if (dma_async_is_tx_complete(pxa_dma->rxdma_chan, -+ pxa_dma->rx_cookie, NULL, NULL) != DMA_COMPLETE) { -+ spin_lock(&sport->port.lock); -+ serial_pxa_set_mctrl(&sport->port, sport->port.mctrl & ~TIOCM_RTS); -+ spin_unlock(&sport->port.lock); -+ udelay(100); -+ dmaengine_pause(pxa_dma->rxdma_chan); -+ udelay(100); -+ pxa_uart_receive_dma_cb(sport); -+ stop_dma(sport, PXA_UART_RX); ++#ifdef CONFIG_CONSOLE_POLL ++static int lrw_uart_getc(struct uart_port *port) ++{ ++ if (readl(port->membase + UARTFR) & UARTFR_RXFE) ++ return NO_POLL_CHAR; + -+ fifo_cnt = serial_in(sport, UART_FOR); -+ while (fifo_cnt > 0) { -+ *(tmp + cnt) = serial_in(sport, UART_RX) & 0xff; -+ cnt++; -+ fifo_cnt = serial_in(sport, UART_FOR); -+ } ++ if (port->iotype == UPIO_MEM32) ++ return readl(port->membase + UARTDR); ++ else ++ return readb(port->membase + UARTDR); ++} + -+ if (cnt > 0) { -+ tty_insert_flip_string(&sport->port.state->port, tmp, cnt); -+ sport->port.icount.rx += cnt; -+ tty_flip_buffer_push(&sport->port.state->port); -+ } -+ } -+ local_irq_restore(flags); -+ } ++static int lrw_uart_early_read(struct console *con, char *s, unsigned int n) ++{ ++ struct earlycon_device *dev = con->data; ++ int ch, num_read = 0; + -+ if (sport) { -+#ifdef CONFIG_HIBERNATION -+ pxa_clk_freq = clk_get_rate(sport->fclk); -+ pxa_clk_parent = clk_get_parent(sport->fclk); -+#endif -+ uart_suspend_port(&serial_pxa_reg, &sport->port); -+#ifdef CONFIG_HIBERNATION -+ clk_set_parent(sport->fclk, NULL); -+#endif -+ } ++ while (num_read < n) { ++ ch = lrw_uart_getc(&dev->port); ++ if (ch == NO_POLL_CHAR) ++ break; + -+ if (del_timer_sync(&sport->pxa_timer)) -+ _pxa_timer_handler(sport); ++ s[num_read++] = ch; ++ } + -+ return 0; ++ return num_read; +} ++#else ++#define lrw_uart_early_read NULL ++#endif + -+static int __maybe_unused serial_pxa_resume(struct device *dev) ++/* ++ * On non-ACPI systems, earlycon is enabled by specifying ++ * "earlycon=lrw_uart,
" on the kernel command line. ++ * ++ * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table, ++ * by specifying only "earlycon" on the command line. Because it requires ++ * SPCR, the console starts after ACPI is parsed, which is later than a ++ * traditional early console. ++ * ++ * To get the traditional early console that starts before ACPI is parsed, ++ * specify the full "earlycon=lrw_uart,
" option. ++ */ ++static int __init lrw_uart_early_console_setup(struct earlycon_device *device, ++ const char *opt) +{ -+ struct uart_pxa_port *sport = dev_get_drvdata(dev); -+ struct uart_pxa_dma *pxa_dma = &sport->uart_dma; -+ -+ if (!console_suspend_enabled || !sport) -+ return 0; -+ -+ sport->in_resume = true; -+ -+#ifdef CONFIG_HIBERNATION -+ clk_set_parent(sport->fclk, pxa_clk_parent); -+ clk_set_rate(sport->fclk, pxa_clk_freq); -+#endif -+ uart_resume_port(&serial_pxa_reg, &sport->port); -+ -+ if (serial_pxa_is_open(sport) && sport->dma_enable) { -+ if (pxa_dma->tx_saved_len > 0) { -+ sport->from_resume = true; -+ memcpy(pxa_dma->txdma_addr, pxa_dma->tx_buf_save, -+ pxa_dma->tx_saved_len); -+ pxa_uart_transmit_dma_start(sport, -+ pxa_dma->tx_saved_len); -+ } else { -+ tasklet_schedule(&pxa_dma->tklet); -+ } ++ if (!device->port.membase) ++ return -ENODEV; + -+ pxa_uart_receive_dma_start(sport); -+ } -+ sport->in_resume = false; ++ device->con->write = lrw_uart_early_write; ++ device->con->read = lrw_uart_early_read; + + return 0; +} + -+static SIMPLE_DEV_PM_OPS(serial_pxa_pm_ops, serial_pxa_suspend, serial_pxa_resume); ++OF_EARLYCON_DECLARE(lrw_uart, "lrw-uart", lrw_uart_early_console_setup); + -+static void _pxa_timer_handler(struct uart_pxa_port *up) -+{ -+#if SUPPORT_POWER_QOS -+ pm_runtime_put_sync(up->port.dev); ++#else ++#define LRW_UART_CONSOLE NULL +#endif -+} + -+static void pxa_timer_handler(struct timer_list *t) ++static struct uart_driver lrw_uart_driver = { ++ .owner = THIS_MODULE, ++ .driver_name = LRW_UART_NAME, ++ .dev_name = LRW_UART_TTY_PREFIX, ++ .nr = UART_NR, ++ .cons = LRW_UART_CONSOLE, ++}; ++ ++static int lrw_uart_probe_dt_alias(int index, struct device *dev) +{ -+ struct uart_pxa_port *up = from_timer(up, t, pxa_timer); ++ struct device_node *np; ++ static bool seen_dev_with_alias; ++ static bool seen_dev_without_alias; ++ int ret = index; + -+ _pxa_timer_handler(up); ++ if (!IS_ENABLED(CONFIG_OF)) ++ return ret; ++ ++ np = dev->of_node; ++ if (!np) ++ return ret; ++ ++ ret = of_alias_get_id(np, "serial"); ++ if (ret < 0) { ++ seen_dev_without_alias = true; ++ ret = index; ++ } else { ++ seen_dev_with_alias = true; ++ if (ret >= ARRAY_SIZE(lrw_uart_console_ports) || lrw_uart_console_ports[ret]) { ++ dev_warn(dev, "requested serial port %d not available.\n", ret); ++ ret = index; ++ } ++ } ++ ++ if (seen_dev_with_alias && seen_dev_without_alias) ++ dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n"); ++ ++ return ret; +} + -+static void __maybe_unused uart_edge_wakeup_handler(int gpio, void *data) ++/* unregisters the driver also if no more ports are left */ ++static void lrw_uart_unregister_port(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *up = (struct uart_pxa_port *)data; ++ int i; ++ bool busy = false; + -+ if (!mod_timer(&up->pxa_timer, jiffies + PXA_TIMER_TIMEOUT)) { -+#if SUPPORT_POWER_QOS -+ pm_runtime_get_sync(up->port.dev); -+#endif ++ for (i = 0; i < ARRAY_SIZE(lrw_uart_console_ports); i++) { ++ if (lrw_uart_console_ports[i] == sup) ++ lrw_uart_console_ports[i] = NULL; ++ else if (lrw_uart_console_ports[i]) ++ busy = true; + } -+ pm_wakeup_event(up->port.dev, BLOCK_SUSPEND_TIMEOUT); ++ lrw_uart_dma_remove(sup); ++ if (!busy) ++ uart_unregister_driver(&lrw_uart_driver); +} + -+static void uart_tx_lpm_handler(struct work_struct *work) ++static int lrw_uart_find_free_port(void) +{ -+ struct uart_pxa_port *up = container_of(work, struct uart_pxa_port, uart_tx_lpm_work); ++ int i; + -+ while (!(serial_in(up, UART_LSR) & UART_LSR_TEMT)) -+ usleep_range(1000, 2000); ++ for (i = 0; i < ARRAY_SIZE(lrw_uart_console_ports); i++) ++ if (!lrw_uart_console_ports[i]) ++ return i; + -+#if SUPPORT_POWER_QOS -+ pm_runtime_put_sync(up->port.dev); -+#endif ++ return -EBUSY; +} -+#endif -+ -+static const struct of_device_id serial_k1x_dt_ids[] = { -+ { .compatible = "spacemit,k1x-uart", }, -+ {} -+}; + -+static int serial_pxa_probe_dt(struct platform_device *pdev, struct uart_pxa_port *sport) ++static int lrw_uart_setup_port(struct device *dev, struct lrw_uart_port *sup, ++ struct resource *mmiobase, int index) +{ -+ struct device_node *np = pdev->dev.of_node; ++ void __iomem *base; + int ret; + -+ if (!np) -+ return 1; ++ base = devm_ioremap_resource(dev, mmiobase); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); + -+ if (of_get_property(np, "dmas", NULL)) -+ sport->dma_enable = 1; ++ index = lrw_uart_probe_dt_alias(index, dev); + -+ ret = of_alias_get_id(np, "serial"); -+ if (ret < 0) { -+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); ++ sup->port.dev = dev; ++ sup->port.mapbase = mmiobase->start; ++ sup->port.membase = base; ++ sup->port.fifosize = sup->fifosize; ++ sup->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_LRW_UART_CONSOLE); ++ sup->port.flags = UPF_BOOT_AUTOCONF; ++ sup->port.line = index; ++ ++ ret = uart_get_rs485_mode(&sup->port); ++ if (ret) + return ret; -+ } -+ sport->port.line = ret; + -+#ifdef CONFIG_PM -+ if (of_property_read_u32(np, "edge-wakeup-pin", &sport->edge_wakeup_gpio)) -+ dev_dbg(&pdev->dev, "no edge-wakeup-pin defined\n"); -+#endif -+ sport->device_ctrl_rts = of_property_read_bool(np, "device-control-rts"); ++ lrw_uart_console_ports[index] = sup; + + return 0; +} + -+static int serial_pxa_probe(struct platform_device *dev) ++static int lrw_uart_register_port(struct lrw_uart_port *sup) +{ -+ struct uart_pxa_port *sport; -+ struct resource *mmres; -+ int ret; -+ int irq; -+ struct resource *dmares; -+ struct uart_pxa_dma *pxa_dma; ++ int ret, i; + -+ mmres = platform_get_resource(dev, IORESOURCE_MEM, 0); -+ if (!mmres) -+ return -ENODEV; ++ /* Ensure interrupts from this UART are masked and cleared */ ++ lrw_uart_write(0, sup, REG_IMSC); ++ lrw_uart_write(0xffff, sup, REG_ICR); + -+ irq = platform_get_irq(dev, 0); -+ if (irq < 0) -+ return irq; ++ if (!lrw_uart_driver.state) { ++ ret = uart_register_driver(&lrw_uart_driver); ++ if (ret < 0) { ++ dev_err(sup->port.dev, ++ "Failed to register LRW UART driver\n"); ++ for (i = 0; i < ARRAY_SIZE(lrw_uart_console_ports); i++) ++ if (lrw_uart_console_ports[i] == sup) ++ lrw_uart_console_ports[i] = NULL; ++ return ret; ++ } ++ } + -+ sport = kzalloc(sizeof(*sport), GFP_KERNEL); -+ if (!sport) -+ return -ENOMEM; ++ ret = uart_add_one_port(&lrw_uart_driver, &sup->port); ++ if (ret) ++ lrw_uart_unregister_port(sup); + -+#ifdef CONFIG_PM -+ sport->uart_dma.tx_buf_save = kmalloc(DMA_BLOCK, GFP_KERNEL); -+ if (!sport->uart_dma.tx_buf_save) { -+ kfree(sport); -+ return -ENOMEM; -+ } -+#endif -+ sport->gclk = devm_clk_get(&dev->dev, "gate"); -+ if (IS_ERR(sport->gclk)) { -+ ret = PTR_ERR(sport->gclk); -+ goto err_free; -+ } ++ return ret; ++} + -+ sport->fclk = devm_clk_get(&dev->dev, "func"); -+ if (IS_ERR(sport->fclk)) { -+ ret = PTR_ERR(sport->fclk); -+ goto err_free; -+ } ++static const struct serial_rs485 lrw_uart_rs485_supported = { ++ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND | ++ SER_RS485_RX_DURING_TX, ++ .delay_rts_before_send = 1, ++ .delay_rts_after_send = 1, ++}; + -+ if (sport->gclk) { -+ ret = clk_prepare(sport->gclk); -+ if (ret) { -+ clk_put(sport->gclk); -+ goto err_free; -+ } -+ } ++static int lrw_uart_probe(struct platform_device *pdev) ++{ ++ struct lrw_uart_port *sup; ++ struct resource *r; ++ int portnr, ret; ++ unsigned int clk; ++ unsigned int baudrate; + -+ if (sport->fclk) { -+ ret = clk_prepare(sport->fclk); -+ if (ret) { -+ clk_put(sport->fclk); -+ goto err_free; -+ } -+ } ++ /* ++ * Check the mandatory baud rate parameter in the DT node early ++ * so that we can easily exit with the error. ++ */ ++ if (pdev->dev.of_node) { ++ struct device_node *np = pdev->dev.of_node; + -+ sport->port.type = PORT_PXA; -+ sport->port.iotype = UPIO_MEM; -+ sport->port.mapbase = mmres->start; -+ sport->port.irq = irq; -+ sport->port.fifosize = 64; -+ sport->port.ops = &serial_pxa_pops; -+ sport->port.dev = &dev->dev; -+ sport->port.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF; -+ sport->port.uartclk = clk_get_rate(sport->fclk); -+ sport->resets = devm_reset_control_get_optional(&dev->dev, NULL); -+ if (IS_ERR(sport->resets)) { -+ ret = PTR_ERR(sport->resets); -+ goto err_clk; ++ ret = of_property_read_u32(np, "current-speed", &baudrate); ++ if (ret) ++ return ret; ++ } else if (has_acpi_companion(&pdev->dev)) { ++ ret = device_property_read_u32(&pdev->dev, "current-speed", &baudrate); ++ if (ret) ++ return ret; ++ } else { ++ baudrate = 115200; + } -+ reset_control_deassert(sport->resets); + -+ sport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SPACEMIT_K1X_CONSOLE); ++ portnr = lrw_uart_find_free_port(); ++ if (portnr < 0) ++ return portnr; + -+ sport->edge_wakeup_gpio = -1; ++ sup = devm_kzalloc(&pdev->dev, sizeof(struct lrw_uart_port), ++ GFP_KERNEL); ++ if (!sup) ++ return -ENOMEM; + -+ pxa_dma = &sport->uart_dma; -+ pxa_dma->txdma_chan = NULL; -+ pxa_dma->rxdma_chan = NULL; -+ pxa_dma->txdma_addr = NULL; -+ pxa_dma->rxdma_addr = NULL; -+ pxa_dma->dma_init = false; -+ sport->dma_enable = 0; -+ sport->cons_udelay = 1; -+ sport->in_resume = false; ++ if (has_acpi_companion(&pdev->dev)) { ++ device_property_read_u32(&pdev->dev, "clock-frequency", &clk); ++ sup->port.uartclk = clk; ++ } else { ++ sup->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(sup->clk)) ++ return PTR_ERR(sup->clk); ++ } + -+ ret = serial_pxa_probe_dt(dev, sport); -+ if (ret > 0) -+ sport->port.line = dev->id; -+ else if (ret < 0) -+ goto err_rst; ++ ret = platform_get_irq(pdev, 0); ++ if (ret < 0) ++ return ret; ++ sup->port.irq = ret; + -+ if (sport->port.line >= ARRAY_SIZE(serial_pxa_ports)) { -+ dev_err(&dev->dev, "serial%d out of range\n", sport->port.line); -+ ret = -EINVAL; -+ goto err_rst; -+ } -+ snprintf(sport->name, PXA_NAME_LEN - 1, "UART%d", sport->port.line + 1); ++ sup->vendor = &vendor_lrw; + -+ dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); -+ dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(64)); -+ if (ret > 0 && sport->dma_enable) { -+ dmares = platform_get_resource(dev, IORESOURCE_DMA, 0); -+ if (dmares) { -+ dmares = platform_get_resource(dev, IORESOURCE_DMA, 1); -+ if (dmares) -+ sport->dma_enable = 1; -+ } -+ } ++ sup->reg_offset = sup->vendor->reg_offset; ++ sup->fifosize = LRW_UART_TX_FIFO_DEPTH; ++ sup->port.iotype = sup->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; ++ sup->port.ops = &lrw_uart_pops; ++ sup->port.rs485_config = lrw_uart_rs485_config; ++ sup->port.rs485_supported = lrw_uart_rs485_supported; ++ sup->fixed_baud = baudrate; + -+ ret = request_irq(sport->port.irq, serial_pxa_irq, 0, sport->name, sport); -+ if (ret) -+ goto err_rst; ++ snprintf(sup->type, sizeof(sup->type), "LRW UART"); + -+ disable_irq(sport->port.irq); ++ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + -+#ifdef CONFIG_PM -+#if SUPPORT_POWER_QOS -+ pm_runtime_enable(&dev->dev); -+ pm_runtime_set_active(&dev->dev); -+ pm_runtime_irq_safe(&dev->dev); -+#endif -+#endif ++ ret = lrw_uart_setup_port(&pdev->dev, sup, r, portnr); ++ if (ret) ++ return ret; + -+ sport->port.membase = ioremap(mmres->start, resource_size(mmres)); -+ if (!sport->port.membase) { -+ ret = -ENOMEM; -+ goto err_qos; -+ } ++ platform_set_drvdata(pdev, sup); + -+#ifdef CONFIG_PM -+ INIT_WORK(&sport->uart_tx_lpm_work, uart_tx_lpm_handler); ++ return lrw_uart_register_port(sup); ++} + -+ timer_setup(&sport->pxa_timer, pxa_timer_handler, 0); -+#endif ++static int lrw_uart_remove(struct platform_device *dev) ++{ ++ struct lrw_uart_port *sup = platform_get_drvdata(dev); + -+ serial_pxa_ports[sport->port.line] = sport; -+ uart_add_one_port(&serial_pxa_reg, &sport->port); -+ dev_dbg(&dev->dev, "uart clk_rate: %lu\n", clk_get_rate(sport->fclk)); -+ platform_set_drvdata(dev, sport); ++ uart_remove_one_port(&lrw_uart_driver, &sup->port); ++ lrw_uart_unregister_port(sup); + + return 0; ++} + -+#ifdef CONFIG_PM -+ uart_remove_one_port(&serial_pxa_reg, &sport->port); -+ iounmap(sport->port.membase); -+#endif -+err_qos: -+#ifdef CONFIG_PM -+ pm_runtime_disable(&dev->dev); -+#endif -+ free_irq(sport->port.irq, sport); -+err_rst: -+ reset_control_assert(sport->resets); -+err_clk: -+ clk_unprepare(sport->fclk); -+ clk_unprepare(sport->gclk); -+ clk_put(sport->fclk); -+ clk_put(sport->gclk); -+err_free: -+ kfree(sport); -+ return ret; ++#ifdef CONFIG_PM_SLEEP ++static int lrw_uart_suspend(struct device *dev) ++{ ++ struct lrw_uart_port *sup = dev_get_drvdata(dev); ++ ++ if (!sup) ++ return -EINVAL; ++ ++ return uart_suspend_port(&lrw_uart_driver, &sup->port); +} + -+static int serial_pxa_remove(struct platform_device *dev) ++static int lrw_uart_resume(struct device *dev) +{ -+ struct uart_pxa_port *sport = platform_get_drvdata(dev); ++ struct lrw_uart_port *sup = dev_get_drvdata(dev); + -+#ifdef CONFIG_PM -+ pm_runtime_disable(&dev->dev); ++ if (!sup) ++ return -EINVAL; ++ ++ return uart_resume_port(&lrw_uart_driver, &sup->port); ++} +#endif + -+ uart_remove_one_port(&serial_pxa_reg, &sport->port); ++static SIMPLE_DEV_PM_OPS(lrw_uart_pm_ops, lrw_uart_suspend, lrw_uart_resume); + -+ reset_control_assert(sport->resets); -+ free_irq(sport->port.irq, sport); -+ clk_unprepare(sport->fclk); -+ clk_unprepare(sport->gclk); -+ clk_put(sport->fclk); -+ clk_put(sport->gclk); ++static const struct of_device_id lrw_uart_of_match[] = { ++ { .compatible = "lrw,lrw-uart" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, lrw_uart_of_match); + -+#ifdef CONFIG_PM -+ kfree(sport->uart_dma.tx_buf_save); -+#endif -+ kfree(sport); -+ serial_pxa_ports[dev->id] = NULL; ++static const struct acpi_device_id __maybe_unused lrw_uart_acpi_match[] = { ++ { "LRWX0000", 0 }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(acpi, lrw_uart_acpi_match); ++ ++static struct platform_driver lrw_uart_platform_driver = { ++ .probe = lrw_uart_probe, ++ .remove = lrw_uart_remove, ++ .driver = { ++ .name = LRW_UART_NAME, ++ .pm = pm_sleep_ptr(&lrw_uart_pm_ops), ++ .of_match_table = of_match_ptr(lrw_uart_of_match), ++ .acpi_match_table = ACPI_PTR(lrw_uart_acpi_match), ++ .suppress_bind_attrs = IS_BUILTIN(CONFIG_SERIAL_LRW_UART), ++ }, ++}; ++ ++static int __init lrw_uart_init(void) ++{ ++ pr_info("Serial: LRW UART driver\n"); ++ ++ int ret; ++ ++ ret = uart_register_driver(&lrw_uart_driver); ++ if (ret < 0) { ++ pr_err("Could not register %s driver\n", ++ lrw_uart_driver.driver_name); ++ return ret; ++ } ++ ++ ret = platform_driver_register(&lrw_uart_platform_driver); ++ if (ret < 0) { ++ pr_err("LRW UART platform driver register failed, e = %d\n", ret); ++ uart_unregister_driver(&lrw_uart_driver); ++ return ret; ++ } + + return 0; +} + -+static struct platform_driver serial_pxa_driver = { -+ .probe = serial_pxa_probe, -+ .remove = serial_pxa_remove, -+ .driver = { -+ .name = "spacemit-k1x-uart", -+#ifdef CONFIG_PM -+ .pm = &serial_pxa_pm_ops, -+#endif -+ .suppress_bind_attrs = true, -+ .of_match_table = serial_k1x_dt_ids, -+ }, -+}; ++static void __exit lrw_uart_exit(void) ++{ ++ platform_driver_unregister(&lrw_uart_platform_driver); ++ uart_unregister_driver(&lrw_uart_driver); ++} + -+static int __init serial_pxa_init(void) ++/* ++ * While this can be a module, if builtin it's most likely the console ++ * So let's leave module_exit but move module_init to an earlier place ++ */ ++arch_initcall(lrw_uart_init); ++module_exit(lrw_uart_exit); ++ ++MODULE_AUTHOR("Wenhong Liu/Qingtao Liu"); ++MODULE_DESCRIPTION("LRW UART serial driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c +index ed3953bd0407..469ad26cde48 100644 +--- a/drivers/tty/serial/serial_port.c ++++ b/drivers/tty/serial/serial_port.c +@@ -8,7 +8,10 @@ + + #include + #include ++#include ++#include + #include ++#include + #include + #include + +@@ -146,6 +149,148 @@ void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port) + } + EXPORT_SYMBOL(uart_remove_one_port); + ++/** ++ * __uart_read_properties - read firmware properties of the given UART port ++ * @port: corresponding port ++ * @use_defaults: apply defaults (when %true) or validate the values (when %false) ++ * ++ * The following device properties are supported: ++ * - clock-frequency (optional) ++ * - fifo-size (optional) ++ * - no-loopback-test (optional) ++ * - reg-shift (defaults may apply) ++ * - reg-offset (value may be validated) ++ * - reg-io-width (defaults may apply or value may be validated) ++ * - interrupts (OF only) ++ * - serial [alias ID] (OF only) ++ * ++ * If the port->dev is of struct platform_device type the interrupt line ++ * will be retrieved via platform_get_irq() call against that device. ++ * Otherwise it will be assigned by fwnode_irq_get() call. In both cases ++ * the index 0 of the resource is used. ++ * ++ * The caller is responsible to initialize the following fields of the @port ++ * ->dev (must be valid) ++ * ->flags ++ * ->mapbase ++ * ->mapsize ++ * ->regshift (if @use_defaults is false) ++ * before calling this function. Alternatively the above mentioned fields ++ * may be zeroed, in such case the only ones, that have associated properties ++ * found, will be set to the respective values. ++ * ++ * If no error happened, the ->irq, ->mapbase, ->mapsize will be altered. ++ * The ->iotype is always altered. ++ * ++ * When @use_defaults is true and the respective property is not found ++ * the following values will be applied: ++ * ->regshift = 0 ++ * In this case IRQ must be provided, otherwise an error will be returned. ++ * ++ * When @use_defaults is false and the respective property is found ++ * the following values will be validated: ++ * - reg-io-width (->iotype) ++ * - reg-offset (->mapsize against ->mapbase) ++ * ++ * Returns: 0 on success or negative errno on failure ++ */ ++static int __uart_read_properties(struct uart_port *port, bool use_defaults) +{ ++ struct device *dev = port->dev; ++ u32 value; + int ret; + -+ ret = uart_register_driver(&serial_pxa_reg); -+ if (ret != 0) ++ /* Read optional UART functional clock frequency */ ++ device_property_read_u32(dev, "clock-frequency", &port->uartclk); ++ ++ /* Read the registers alignment (default: 8-bit) */ ++ ret = device_property_read_u32(dev, "reg-shift", &value); ++ if (ret) ++ port->regshift = use_defaults ? 0 : port->regshift; ++ else ++ port->regshift = value; ++ ++ /* Read the registers I/O access type (default: MMIO 8-bit) */ ++ ret = device_property_read_u32(dev, "reg-io-width", &value); ++ if (ret) { ++ port->iotype = UPIO_MEM; ++ } else { ++ switch (value) { ++ case 1: ++ port->iotype = UPIO_MEM; ++ break; ++ case 2: ++ port->iotype = UPIO_MEM16; ++ break; ++ case 4: ++ port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32; ++ break; ++ default: ++ if (!use_defaults) { ++ dev_err(dev, "Unsupported reg-io-width (%u)\n", value); ++ return -EINVAL; ++ } ++ port->iotype = UPIO_UNKNOWN; ++ break; ++ } ++ } ++ ++ /* Read the address mapping base offset (default: no offset) */ ++ ret = device_property_read_u32(dev, "reg-offset", &value); ++ if (ret) ++ value = 0; ++ ++ /* Check for shifted address mapping overflow */ ++ if (!use_defaults && port->mapsize < value) { ++ dev_err(dev, "reg-offset %u exceeds region size %pa\n", value, &port->mapsize); ++ return -EINVAL; ++ } ++ ++ port->mapbase += value; ++ port->mapsize -= value; ++ ++ /* Read optional FIFO size */ ++ device_property_read_u32(dev, "fifo-size", &port->fifosize); ++ ++ if (device_property_read_bool(dev, "no-loopback-test")) ++ port->flags |= UPF_SKIP_TEST; ++ ++ /* Get index of serial line, if found in DT aliases */ ++ ret = of_alias_get_id(dev_of_node(dev), "serial"); ++ if (ret >= 0) ++ port->line = ret; ++ ++ if (dev_is_platform(dev)) ++ ret = platform_get_irq(to_platform_device(dev), 0); ++ else ++ ret = fwnode_irq_get(dev_fwnode(dev), 0); ++ if (ret == -EPROBE_DEFER) ++ return ret; ++ if (ret > 0) ++ port->irq = ret; ++ else if (use_defaults) ++ /* By default IRQ support is mandatory */ + return ret; ++ else ++ port->irq = 0; + -+ ret = platform_driver_register(&serial_pxa_driver); -+ if (ret != 0) -+ uart_unregister_driver(&serial_pxa_reg); ++ port->flags |= UPF_SHARE_IRQ; + -+ return ret; ++ return 0; +} + -+static void __exit serial_pxa_exit(void) ++int uart_read_port_properties(struct uart_port *port) +{ -+ platform_driver_unregister(&serial_pxa_driver); -+ uart_unregister_driver(&serial_pxa_reg); ++ return __uart_read_properties(port, true); +} -+module_init(serial_pxa_init); -+module_exit(serial_pxa_exit); ++EXPORT_SYMBOL_GPL(uart_read_port_properties); + ++int uart_read_and_validate_port_properties(struct uart_port *port) ++{ ++ return __uart_read_properties(port, false); ++} ++EXPORT_SYMBOL_GPL(uart_read_and_validate_port_properties); ++ + static struct device_driver serial_port_driver = { + .name = "port", + .suppress_bind_attrs = true, diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c -index 3491de5272e8..ed952dc4ce3b 100644 +index b7380a0bbb53..4b7b89af63ac 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1812,8 +1812,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba) @@ -615328,6 +629259,84 @@ index 000000000000..9b3ba6becdbe +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DesignWare DWC3 XuanTie Glue Driver"); +MODULE_AUTHOR("Jisheng Zhang "); +diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig +index eb8f1756fd83..7615863a49df 100644 +--- a/drivers/vfio/Kconfig ++++ b/drivers/vfio/Kconfig +@@ -39,7 +39,7 @@ config VFIO_GROUP + + config VFIO_CONTAINER + bool "Support for the VFIO container /dev/vfio/vfio" +- select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64 || LOONGARCH) ++ select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64 || LOONGARCH || RISCV) + depends on VFIO_GROUP + default y + help +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index 000d6a4bd06d..4d4932bd489a 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -2500,7 +2500,7 @@ static int vfio_iommu_domain_alloc(struct device *dev, void *data) + { + struct iommu_domain **domain = data; + +- *domain = iommu_domain_alloc(dev->bus); ++ *domain = iommu_paging_domain_alloc(dev); + return 1; /* Don't iterate */ + } + +@@ -2627,11 +2627,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, + * us a representative device for the IOMMU API call. We don't actually + * want to iterate beyond the first device (if any). + */ +- ret = -EIO; + iommu_group_for_each_dev(iommu_group, &domain->domain, + vfio_iommu_domain_alloc); +- if (!domain->domain) ++ if (IS_ERR(domain->domain)) { ++ ret = PTR_ERR(domain->domain); + goto out_free_domain; ++ } + + #ifdef CONFIG_HISI_VIRTCCA_CODA + if (is_virtcca_cvm_enable() && iommu->secure) +diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c +index 35e5180b5470..a4e77dabd83a 100644 +--- a/drivers/vhost/vdpa.c ++++ b/drivers/vhost/vdpa.c +@@ -1536,26 +1536,24 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v) + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct device *dma_dev = vdpa_get_dma_dev(vdpa); +- const struct bus_type *bus; + int ret; + + /* Device want to do DMA by itself */ + if (ops->set_map || ops->dma_map) + return 0; + +- bus = dma_dev->bus; +- if (!bus) +- return -EFAULT; +- + if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) { + dev_warn_once(&v->dev, + "Failed to allocate domain, device is not IOMMU cache coherent capable\n"); + return -ENOTSUPP; + } + +- v->domain = iommu_domain_alloc(bus); +- if (!v->domain) +- return -EIO; ++ v->domain = iommu_paging_domain_alloc(dma_dev); ++ if (IS_ERR(v->domain)) { ++ ret = PTR_ERR(v->domain); ++ v->domain = NULL; ++ return ret; ++ } + + ret = iommu_attach_device(v->domain, dma_dev); + if (ret) diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 751458959411..f418a8cd71e8 100644 --- a/drivers/watchdog/Kconfig @@ -615822,7 +629831,7 @@ index 000000000000..80cb0b1e3d64 +MODULE_DESCRIPTION("PMIC Watchdog Driver for TH1520"); +MODULE_LICENSE("GPL"); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h -index 7c49c8a35005..396909300897 100644 +index 7c49c8a35005..b4124f0a08dc 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -13,11 +13,9 @@ @@ -615883,7 +629892,17 @@ index 7c49c8a35005..396909300897 100644 /* Device */ struct acpi_device { u32 pld_crc; -@@ -830,6 +849,8 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev) +@@ -637,8 +656,7 @@ struct iommu_ops; + bool acpi_dma_supported(const struct acpi_device *adev); + enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); + int acpi_iommu_fwspec_init(struct device *dev, u32 id, +- struct fwnode_handle *fwnode, +- const struct iommu_ops *ops); ++ struct fwnode_handle *fwnode); + int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map); + int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, + const u32 *input_id); +@@ -830,6 +848,8 @@ static inline void acpi_put_acpi_dev(struct acpi_device *adev) int acpi_wait_for_acpi_ipmi(void); @@ -615892,6 +629911,121 @@ index 7c49c8a35005..396909300897 100644 #else /* CONFIG_ACPI */ static inline int register_acpi_bus_type(void *bus) { return 0; } +diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h +index e490b4c37a48..32ef0bc220f1 100644 +--- a/include/acpi/actbl1.h ++++ b/include/acpi/actbl1.h +@@ -1529,7 +1529,8 @@ enum acpi_hest_notify_types { + ACPI_HEST_NOTIFY_SEI = 9, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_GSIV = 10, /* ACPI 6.1 */ + ACPI_HEST_NOTIFY_SOFTWARE_DELEGATED = 11, /* ACPI 6.2 */ +- ACPI_HEST_NOTIFY_RESERVED = 12 /* 12 and greater are reserved */ ++ ACPI_HEST_NOTIFY_SSE = 12, /* RISCV SSE */ ++ ACPI_HEST_NOTIFY_RESERVED = 13 /* 13 and greater are reserved */ + }; + + /* Values for config_write_enable bitfield above */ +diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h +index 84f87d8b9503..fff658af0188 100644 +--- a/include/acpi/actbl2.h ++++ b/include/acpi/actbl2.h +@@ -49,6 +49,7 @@ + #define ACPI_SIG_RASF "RASF" /* RAS Feature table */ + #define ACPI_SIG_RGRT "RGRT" /* Regulatory Graphics Resource Table */ + #define ACPI_SIG_RHCT "RHCT" /* RISC-V Hart Capabilities Table */ ++#define ACPI_SIG_RIMT "RIMT" /* RISC-V IO Mapping Table */ + #define ACPI_SIG_SBST "SBST" /* Smart Battery Specification Table */ + #define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */ + #define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */ +@@ -2968,6 +2969,88 @@ struct acpi_rhct_hart_info { + u32 uid; /* ACPI processor UID */ + }; + ++/******************************************************************************* ++ * ++ * RIMT - RISC-V IO Remapping Table ++ * ++ * https://github.com/riscv-non-isa/riscv-acpi-rimt ++ * ++ ******************************************************************************/ ++ ++struct acpi_table_rimt { ++ struct acpi_table_header header; /* Common ACPI table header */ ++ u32 num_nodes; /* Number of RIMT Nodes */ ++ u32 node_offset; /* Offset to RIMT Node Array */ ++ u32 reserved; ++}; ++ ++struct acpi_rimt_node { ++ u8 type; ++ u8 revision; ++ u16 length; ++ u16 reserved; ++ u16 id; ++ char node_data[]; ++}; ++ ++enum acpi_rimt_node_type { ++ ACPI_RIMT_NODE_TYPE_IOMMU = 0x0, ++ ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX = 0x1, ++ ACPI_RIMT_NODE_TYPE_PLAT_DEVICE = 0x2, ++}; ++ ++struct acpi_rimt_iommu { ++ u8 hardware_id[8]; /* Hardware ID */ ++ u64 base_address; /* Base Address */ ++ u32 flags; /* Flags */ ++ u32 proximity_domain; /* Proximity Domain */ ++ u16 pcie_segment_number; /* PCIe Segment number */ ++ u16 pcie_bdf; /* PCIe B/D/F */ ++ u16 num_interrupt_wires; /* Number of interrupt wires */ ++ u16 interrupt_wire_offset; /* Interrupt wire array offset */ ++ u64 interrupt_wire[]; /* Interrupt wire array */ ++}; ++ ++/* IOMMU Node Flags */ ++#define ACPI_RIMT_IOMMU_FLAGS_PCIE (1) ++#define ACPI_RIMT_IOMMU_FLAGS_PXM_VALID (1 << 1) ++ ++/* Interrupt Wire Structure */ ++struct acpi_rimt_iommu_wire_gsi { ++ u32 irq_num; /* Interrupt Number */ ++ u32 flags; /* Flags */ ++}; ++ ++/* Interrupt Wire Flags */ ++#define ACPI_RIMT_GSI_LEVEL_TRIGGERRED (1) ++#define ACPI_RIMT_GSI_ACTIVE_HIGH (1 << 1) ++ ++struct acpi_rimt_id_mapping { ++ u32 source_id_base; /* Source ID Base */ ++ u32 num_ids; /* Number of IDs */ ++ u32 dest_id_base; /* Destination Device ID Base */ ++ u32 dest_offset; /* Destination IOMMU Offset */ ++ u32 flags; /* Flags */ ++}; ++ ++struct acpi_rimt_pcie_rc { ++ u32 flags; /* Flags */ ++ u16 reserved; /* Reserved */ ++ u16 pcie_segment_number; /* PCIe Segment number */ ++ u16 id_mapping_offset; /* ID mapping array offset */ ++ u16 num_id_mappings; /* Number of ID mappings */ ++}; ++ ++/* PCIe Root Complex Node Flags */ ++#define ACPI_RIMT_PCIE_ATS_SUPPORTED (1) ++#define ACPI_RIMT_PCIE_PRI_SUPPORTED (1 << 1) ++ ++struct acpi_rimt_platform_device { ++ u16 id_mapping_offset; /* ID Mapping array offset */ ++ u16 num_id_mappings; /* Number of ID mappings */ ++ char device_name[]; /* Device Object Name */ ++}; ++ + /******************************************************************************* + * + * SBST - Smart Battery Specification Table diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h index c080d579a546..e32149d605dc 100644 --- a/include/acpi/actbl3.h @@ -615954,6 +630088,256 @@ index c75d4a753849..879e5f8aa5e9 100644 } #ifndef __HAVE_ARCH_PUD_FREE +diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h +index 0655aa5b57b2..bf47cca2c375 100644 +--- a/include/asm-generic/qspinlock.h ++++ b/include/asm-generic/qspinlock.h +@@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) + } + #endif + ++#ifndef __no_arch_spinlock_redefine + /* + * Remapping spinlock architecture specific functions to the corresponding + * queued spinlock functions. +@@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) + #define arch_spin_lock(l) queued_spin_lock(l) + #define arch_spin_trylock(l) queued_spin_trylock(l) + #define arch_spin_unlock(l) queued_spin_unlock(l) ++#endif + + #endif /* __ASM_GENERIC_QSPINLOCK_H */ +diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h +index fdfebcb050f4..970590baf61b 100644 +--- a/include/asm-generic/spinlock.h ++++ b/include/asm-generic/spinlock.h +@@ -1,92 +1,9 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + +-/* +- * 'Generic' ticket-lock implementation. +- * +- * It relies on atomic_fetch_add() having well defined forward progress +- * guarantees under contention. If your architecture cannot provide this, stick +- * to a test-and-set lock. +- * +- * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a +- * sub-word of the value. This is generally true for anything LL/SC although +- * you'd be hard pressed to find anything useful in architecture specifications +- * about this. If your architecture cannot do this you might be better off with +- * a test-and-set. +- * +- * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence +- * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with +- * a full fence after the spin to upgrade the otherwise-RCpc +- * atomic_cond_read_acquire(). +- * +- * The implementation uses smp_cond_load_acquire() to spin, so if the +- * architecture has WFE like instructions to sleep instead of poll for word +- * modifications be sure to implement that (see ARM64 for example). +- * +- */ +- + #ifndef __ASM_GENERIC_SPINLOCK_H + #define __ASM_GENERIC_SPINLOCK_H + +-#include +-#include +- +-static __always_inline void arch_spin_lock(arch_spinlock_t *lock) +-{ +- u32 val = atomic_fetch_add(1<<16, lock); +- u16 ticket = val >> 16; +- +- if (ticket == (u16)val) +- return; +- +- /* +- * atomic_cond_read_acquire() is RCpc, but rather than defining a +- * custom cond_read_rcsc() here we just emit a full fence. We only +- * need the prior reads before subsequent writes ordering from +- * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we +- * have no outstanding writes due to the atomic_fetch_add() the extra +- * orderings are free. +- */ +- atomic_cond_read_acquire(lock, ticket == (u16)VAL); +- smp_mb(); +-} +- +-static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) +-{ +- u32 old = atomic_read(lock); +- +- if ((old >> 16) != (old & 0xffff)) +- return false; +- +- return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ +-} +- +-static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) +-{ +- u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); +- u32 val = atomic_read(lock); +- +- smp_store_release(ptr, (u16)val + 1); +-} +- +-static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) +-{ +- u32 val = atomic_read(lock); +- +- return ((val >> 16) != (val & 0xffff)); +-} +- +-static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) +-{ +- u32 val = atomic_read(lock); +- +- return (s16)((val >> 16) - (val & 0xffff)) > 1; +-} +- +-static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) +-{ +- return !arch_spin_is_locked(&lock); +-} +- ++#include + #include + + #endif /* __ASM_GENERIC_SPINLOCK_H */ +diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h +index 8962bb730945..f534aa5de394 100644 +--- a/include/asm-generic/spinlock_types.h ++++ b/include/asm-generic/spinlock_types.h +@@ -3,15 +3,7 @@ + #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H + #define __ASM_GENERIC_SPINLOCK_TYPES_H + +-#include +-typedef atomic_t arch_spinlock_t; +- +-/* +- * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the +- * include. +- */ +-#include +- +-#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) ++#include ++#include + + #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ +diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h +new file mode 100644 +index 000000000000..325779970d8a +--- /dev/null ++++ b/include/asm-generic/ticket_spinlock.h +@@ -0,0 +1,105 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++ ++/* ++ * 'Generic' ticket-lock implementation. ++ * ++ * It relies on atomic_fetch_add() having well defined forward progress ++ * guarantees under contention. If your architecture cannot provide this, stick ++ * to a test-and-set lock. ++ * ++ * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a ++ * sub-word of the value. This is generally true for anything LL/SC although ++ * you'd be hard pressed to find anything useful in architecture specifications ++ * about this. If your architecture cannot do this you might be better off with ++ * a test-and-set. ++ * ++ * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence ++ * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with ++ * a full fence after the spin to upgrade the otherwise-RCpc ++ * atomic_cond_read_acquire(). ++ * ++ * The implementation uses smp_cond_load_acquire() to spin, so if the ++ * architecture has WFE like instructions to sleep instead of poll for word ++ * modifications be sure to implement that (see ARM64 for example). ++ * ++ */ ++ ++#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H ++#define __ASM_GENERIC_TICKET_SPINLOCK_H ++ ++#include ++#include ++ ++static __always_inline void ticket_spin_lock(arch_spinlock_t *lock) ++{ ++ u32 val = atomic_fetch_add(1<<16, &lock->val); ++ u16 ticket = val >> 16; ++ ++ if (ticket == (u16)val) ++ return; ++ ++ /* ++ * atomic_cond_read_acquire() is RCpc, but rather than defining a ++ * custom cond_read_rcsc() here we just emit a full fence. We only ++ * need the prior reads before subsequent writes ordering from ++ * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we ++ * have no outstanding writes due to the atomic_fetch_add() the extra ++ * orderings are free. ++ */ ++ atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); ++ smp_mb(); ++} ++ ++static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock) ++{ ++ u32 old = atomic_read(&lock->val); ++ ++ if ((old >> 16) != (old & 0xffff)) ++ return false; ++ ++ return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ ++} ++ ++static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock) ++{ ++ u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); ++ u32 val = atomic_read(&lock->val); ++ ++ smp_store_release(ptr, (u16)val + 1); ++} ++ ++static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock) ++{ ++ u32 val = lock.val.counter; ++ ++ return ((val >> 16) == (val & 0xffff)); ++} ++ ++static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock) ++{ ++ arch_spinlock_t val = READ_ONCE(*lock); ++ ++ return !ticket_spin_value_unlocked(val); ++} ++ ++static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) ++{ ++ u32 val = atomic_read(&lock->val); ++ ++ return (s16)((val >> 16) - (val & 0xffff)) > 1; ++} ++ ++#ifndef __no_arch_spinlock_redefine ++/* ++ * Remapping spinlock architecture specific functions to the corresponding ++ * ticket spinlock functions. ++ */ ++#define arch_spin_is_locked(l) ticket_spin_is_locked(l) ++#define arch_spin_is_contended(l) ticket_spin_is_contended(l) ++#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l) ++#define arch_spin_lock(l) ticket_spin_lock(l) ++#define arch_spin_trylock(l) ticket_spin_trylock(l) ++#define arch_spin_unlock(l) ticket_spin_unlock(l) ++#endif ++ ++#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */ diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h index 6a46baa0737c..30de0e07c2eb 100644 --- a/include/drm/bridge/dw_hdmi.h @@ -616162,19 +630546,19 @@ index 000000000000..5584243f9135 +#define SAFE_MODE 2 +#define BYPASS_MODE 3 +#endif -diff --git a/include/dt-bindings/clock/spacemit-k1x-clock.h b/include/dt-bindings/clock/spacemit-k1x-clock.h +diff --git a/include/dt-bindings/clock/spacemit-k1-clock.h b/include/dt-bindings/clock/spacemit-k1-clock.h new file mode 100644 -index 000000000000..5dd92a6cde71 +index 000000000000..679d95e85070 --- /dev/null -+++ b/include/dt-bindings/clock/spacemit-k1x-clock.h ++++ b/include/dt-bindings/clock/spacemit-k1-clock.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2022 Spacemit, Inc + */ + -+#ifndef _DT_BINDINGS_CLK_SPACEMIT_K1X_H_ -+#define _DT_BINDINGS_CLK_SPACEMIT_K1X_H_ ++#ifndef _DT_BINDINGS_CLK_SPACEMIT_K1_H_ ++#define _DT_BINDINGS_CLK_SPACEMIT_K1_H_ + +#define CLK_PLL2 0 +#define CLK_PLL3 1 @@ -616390,7 +630774,7 @@ index 000000000000..5dd92a6cde71 +#define CLK_RCPU2_PWM9 211 +#define CLK_MAX_NO 212 + -+#endif /* _DT_BINDINGS_CLK_SPACEMIT_K1X_H_ */ ++#endif /* _DT_BINDINGS_CLK_SPACEMIT_K1_H_ */ diff --git a/include/dt-bindings/clock/th1520-audiosys.h b/include/dt-bindings/clock/th1520-audiosys.h new file mode 100644 index 000000000000..2001545b68b8 @@ -617315,16 +631699,16 @@ index 000000000000..44b1e3f0ec03 +#define MMC_CAP_UHS_DDR50 (1 << 20) + +#endif /* K1_DT_BINDINGS_MMC_SDHCI_H */ -diff --git a/include/dt-bindings/pinctrl/k1-x-pinctrl.h b/include/dt-bindings/pinctrl/k1-x-pinctrl.h +diff --git a/include/dt-bindings/pinctrl/k1-pinctrl.h b/include/dt-bindings/pinctrl/k1-pinctrl.h new file mode 100644 -index 000000000000..3d3cb59e8aa3 +index 000000000000..0bcd4564fa9c --- /dev/null -+++ b/include/dt-bindings/pinctrl/k1-x-pinctrl.h ++++ b/include/dt-bindings/pinctrl/k1-pinctrl.h @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/*Copyright (C) 2023 Spacemit Co., Ltd. */ -+#ifndef __DT_BINDINGS_K1X_PINCTRL_H -+#define __DT_BINDINGS_K1X_PINCTRL_H ++#ifndef __DT_BINDINGS_K1_PINCTRL_H ++#define __DT_BINDINGS_K1_PINCTRL_H + +/* pin offset */ +#define PINID(x) ((x) + 1) @@ -617516,9 +631900,9 @@ index 000000000000..3d3cb59e8aa3 +#define PULL_UP (6 << 13) /* bit[15:13] 110 */ +#define PULL_DOWN (5 << 13) /* bit[15:13] 101 */ + -+#define K1X_PADCONF(pinid, conf, mux) ((pinid) * 4) (conf) (mux) ++#define K1_PINCFGID(pinid) ((pinid) * 4) + -+#endif /* __DT_BINDINGS_K1PRO_PINCTRL_H */ ++#endif /* __DT_BINDINGS_K1_PINCTRL_H */ diff --git a/include/dt-bindings/pinctrl/ur-dp1000-pinctrl.h b/include/dt-bindings/pinctrl/ur-dp1000-pinctrl.h new file mode 100644 index 000000000000..c4f2e889f8f7 @@ -617691,17 +632075,17 @@ index 000000000000..9ff8ca4c3d67 +#define RST_MAX_NUM (RST_RXU31+1) + +#endif -diff --git a/include/dt-bindings/reset/spacemit-k1x-reset.h b/include/dt-bindings/reset/spacemit-k1x-reset.h +diff --git a/include/dt-bindings/reset/spacemit-k1-reset.h b/include/dt-bindings/reset/spacemit-k1-reset.h new file mode 100644 -index 000000000000..dc5779b818fe +index 000000000000..ced13afecf8f --- /dev/null -+++ b/include/dt-bindings/reset/spacemit-k1x-reset.h ++++ b/include/dt-bindings/reset/spacemit-k1-reset.h @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023, spacemit Corporation. */ + -+#ifndef __DT_BINDINGS_RESET_SAPCEMIT_K1X_H__ -+#define __DT_BINDINGS_RESET_SAPCEMIT_K1X_H__ ++#ifndef __DT_BINDINGS_RESET_SAPCEMIT_K1_H__ ++#define __DT_BINDINGS_RESET_SAPCEMIT_K1_H__ + +#define RESET_UART1 1 +#define RESET_UART2 2 @@ -617822,7 +632206,7 @@ index 000000000000..dc5779b818fe +#define RESET_RCPU2_PWM9 117 +#define RESET_NUMBER 118 + -+#endif /* __DT_BINDINGS_RESET_SAPCEMIT_K1X_H__ */ ++#endif /* __DT_BINDINGS_RESET_SAPCEMIT_K1_H__ */ diff --git a/include/dt-bindings/reset/xuantie,th1520-reset.h b/include/dt-bindings/reset/xuantie,th1520-reset.h new file mode 100644 index 000000000000..44a4581cc229 @@ -618017,8 +632401,42 @@ index a5e397fe05a8..62823f1f12ae 100644 { return -ENODEV; } static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) { return -ENODEV; } +diff --git a/include/linux/acpi_rimt.h b/include/linux/acpi_rimt.h +new file mode 100644 +index 000000000000..fad3adc4d899 +--- /dev/null ++++ b/include/linux/acpi_rimt.h +@@ -0,0 +1,28 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright (C) 2024-2025, Ventana Micro Systems Inc. ++ * Author: Sunil V L ++ */ ++ ++#ifndef _ACPI_RIMT_H ++#define _ACPI_RIMT_H ++ ++#ifdef CONFIG_ACPI_RIMT ++int rimt_iommu_register(struct device *dev); ++#else ++static inline int rimt_iommu_register(struct device *dev) ++{ ++ return -ENODEV; ++} ++#endif ++ ++#if defined(CONFIG_IOMMU_API) && defined(CONFIG_ACPI_RIMT) ++int rimt_iommu_configure_id(struct device *dev, const u32 *id_in); ++#else ++static inline int rimt_iommu_configure_id(struct device *dev, const u32 *id_in) ++{ ++ return -ENODEV; ++} ++#endif ++ ++#endif /* _ACPI_RIMT_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h -index 8f3b474f3a70..5fc138b773c3 100644 +index 89fd528e8ef4..70aef4981169 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -158,6 +158,9 @@ enum cpuhp_state { @@ -618370,10 +632788,10 @@ index 000000000000..b88ae8727c29 + +#endif diff --git a/include/linux/iommu.h b/include/linux/iommu.h -index 3dcf3118fbef..5bb55e5c2661 100644 +index ceecbc5ba759..92fad1986b58 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h -@@ -744,7 +744,7 @@ struct iommu_ops { +@@ -747,7 +747,7 @@ struct iommu_ops { /* Request/Free a list of reserved regions for a device */ void (*get_resv_regions)(struct device *dev, struct list_head *list); @@ -618382,15 +632800,112 @@ index 3dcf3118fbef..5bb55e5c2661 100644 bool (*is_attach_deferred)(struct device *dev); /* Per device IOMMU features */ -@@ -1293,7 +1293,7 @@ struct iommu_mm_data { - int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, - const struct iommu_ops *ops); - void iommu_fwspec_free(struct device *dev); +@@ -1018,10 +1018,9 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) + } + + extern int bus_iommu_probe(const struct bus_type *bus); +-extern bool iommu_present(const struct bus_type *bus); + extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); + extern bool iommu_group_has_isolated_msi(struct iommu_group *group); +-extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); ++struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); + struct iommu_group *iommu_group_get_from_kobj(struct kobject *group_kobj); + extern struct iommu_group *iommu_group_get_by_id(int id); + extern void iommu_domain_free(struct iommu_domain *domain); +@@ -1240,7 +1239,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev); + + /** + * struct iommu_fwspec - per-device IOMMU instance data +- * @ops: ops for this device's IOMMU + * @iommu_fwnode: firmware handle for this device's IOMMU + * @flags: IOMMU_FWSPEC_* flags + * @num_ids: number of associated device IDs +@@ -1251,7 +1249,6 @@ extern struct iommu_group *generic_single_device_group(struct device *dev); + * consumers. + */ + struct iommu_fwspec { +- const struct iommu_ops *ops; + struct fwnode_handle *iommu_fwnode; + + KABI_RESERVE(1) +@@ -1293,11 +1290,8 @@ struct iommu_mm_data { + KABI_DEPRECATE(struct list_head, sva_handles) + }; + +-int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, +- const struct iommu_ops *ops); +-void iommu_fwspec_free(struct device *dev); -int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); +-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); ++int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode); +int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids); - const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) + { +@@ -1361,29 +1355,24 @@ struct iommu_iotlb_gather {}; + struct iommu_dirty_bitmap {}; + struct iommu_dirty_ops {}; + +-static inline bool iommu_present(const struct bus_type *bus) +-{ +- return false; +-} +- + static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) + { + return false; + } + +-static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) ++static inline struct iommu_group *iommu_group_get_from_kobj(struct kobject *group_kobj) + { + return NULL; + } + +-static inline struct iommu_group *iommu_group_get_from_kobj(struct kobject *group_kobj) ++static inline struct iommu_group *iommu_group_get_by_id(int id) + { + return NULL; + } + +-static inline struct iommu_group *iommu_group_get_by_id(int id) ++static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) + { +- return NULL; ++ return ERR_PTR(-ENODEV); + } + + static inline void iommu_domain_free(struct iommu_domain *domain) +@@ -1650,28 +1639,17 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) + } + + static inline int iommu_fwspec_init(struct device *dev, +- struct fwnode_handle *iommu_fwnode, +- const struct iommu_ops *ops) ++ struct fwnode_handle *iommu_fwnode) + { + return -ENODEV; + } + +-static inline void iommu_fwspec_free(struct device *dev) +-{ +-} +- + static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, + int num_ids) + { + return -ENODEV; + } + +-static inline +-const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) +-{ +- return NULL; +-} +- + static inline int + iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) + { diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h new file mode 100644 index 000000000000..ec8f7df50583 @@ -618673,18 +633188,69 @@ index 8594cd9b642e..2992c1851b63 100644 static inline void irq_dispose_mapping(unsigned int virq) { } static inline struct irq_domain *irq_find_matching_fwnode( diff --git a/include/linux/irqdomain_defs.h b/include/linux/irqdomain_defs.h -index 7a82107b0f3a..f2d9b8eb0d8d 100644 +index 7a82107b0f3a..9a1d91d900b6 100644 --- a/include/linux/irqdomain_defs.h +++ b/include/linux/irqdomain_defs.h -@@ -28,6 +28,8 @@ enum irq_domain_bus_token { +@@ -28,6 +28,9 @@ enum irq_domain_bus_token { DOMAIN_BUS_DMAR, DOMAIN_BUS_AMDVI, DOMAIN_BUS_PCI_DEVICE_IMS, + DOMAIN_BUS_DEVICE_MSI, + DOMAIN_BUS_WIRED_TO_MSI, ++ DOMAIN_BUS_MSI_REMAP, KABI_EXTEND_ENUM(DOMAIN_BUS_UB_MSI) }; +diff --git a/include/linux/kexec.h b/include/linux/kexec.h +index dfb9f50aea5f..7e109c358cc5 100644 +--- a/include/linux/kexec.h ++++ b/include/linux/kexec.h +@@ -412,7 +412,7 @@ bool kexec_load_permitted(int kexec_image_type); + + /* List of defined/legal kexec file flags */ + #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ +- KEXEC_FILE_NO_INITRAMFS) ++ KEXEC_FILE_NO_INITRAMFS | KEXEC_FILE_DEBUG) + + /* flag to track if kexec reboot is in progress */ + extern bool kexec_in_progress; +@@ -509,6 +509,11 @@ static inline int crash_hotplug_memory_support(void) { return 0; } + static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; } + #endif + ++extern bool kexec_file_dbg_print; ++ ++#define kexec_dprintk(fmt, arg...) \ ++ do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0) ++ + #else /* !CONFIG_KEXEC_CORE */ + struct pt_regs; + struct task_struct; +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h +index 31d336759c2e..34f9f7cd0cd5 100644 +--- a/include/linux/kvm_host.h ++++ b/include/linux/kvm_host.h +@@ -2376,6 +2376,8 @@ struct kvm_vcpu *kvm_get_running_vcpu(void); + struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); + + #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS ++struct kvm_kernel_irqfd; ++ + bool kvm_arch_has_irq_bypass(void); + int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); +@@ -2383,8 +2385,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, + struct irq_bypass_producer *); + void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); + void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); +-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, +- uint32_t guest_irq, bool set); ++void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, ++ struct kvm_kernel_irq_routing_entry *old, ++ struct kvm_kernel_irq_routing_entry *new); + bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *, + struct kvm_kernel_irq_routing_entry *); + #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ diff --git a/include/linux/mfd/spacemit_p1.h b/include/linux/mfd/spacemit_p1.h new file mode 100644 index 000000000000..52614b8dca58 @@ -618955,10 +633521,10 @@ index 27f42f713c89..7617930d3157 100644 #define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \ (dev_cap).num_ports * MIN_MSIX_P_PORT) diff --git a/include/linux/mm.h b/include/linux/mm.h -index 55bb6ba97a63..df173e9aacf7 100644 +index c0040a2014c4..74d5d3bb696d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -3182,6 +3182,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) +@@ -3213,6 +3213,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) return ptl; } @@ -618982,7 +633548,7 @@ index 55bb6ba97a63..df173e9aacf7 100644 extern void free_initmem(void); diff --git a/include/linux/msi.h b/include/linux/msi.h -index 468319fac33d..86e79e630cdf 100644 +index 468319fac33d..bd1ca1978737 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -437,6 +437,7 @@ bool arch_restore_msi_irqs(struct pci_dev *dev); @@ -619022,7 +633588,20 @@ index 468319fac33d..86e79e630cdf 100644 /* Mask for the generic functionality */ MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0), -@@ -598,6 +607,11 @@ enum { +@@ -589,15 +598,22 @@ enum { + MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19), + /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */ + MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20), ++ /* PCI MSIs cannot be steered separately to CPU cores */ ++ MSI_FLAG_NO_AFFINITY = (1 << 21), + /* Support for PCI/IMS */ +- MSI_FLAG_PCI_IMS = (1 << 21), +- KABI_EXTEND_ENUM(MSI_FLAG_UB_INTR = (1 << 22)) ++ MSI_FLAG_PCI_IMS = (1 << 22), ++ KABI_EXTEND_ENUM(MSI_FLAG_UB_INTR = (1 << 23)) + }; + + /** * struct msi_parent_ops - MSI parent domain callbacks and configuration info * * @supported_flags: Required: The supported MSI flags of the parent domain @@ -619034,7 +633613,7 @@ index 468319fac33d..86e79e630cdf 100644 * @prefix: Optional: Prefix for the domain and chip name * @init_dev_msi_info: Required: Callback for MSI parent domains to setup parent * domain specific domain flags, domain ops and interrupt chip -@@ -605,6 +619,9 @@ enum { +@@ -605,6 +621,9 @@ enum { */ struct msi_parent_ops { u32 supported_flags; @@ -619044,7 +633623,7 @@ index 468319fac33d..86e79e630cdf 100644 const char *prefix; bool (*init_dev_msi_info)(struct device *dev, struct irq_domain *domain, struct irq_domain *msi_parent_domain, -@@ -653,13 +670,6 @@ struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); +@@ -653,13 +672,6 @@ struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent); @@ -619058,7 +633637,7 @@ index 468319fac33d..86e79e630cdf 100644 /* When an MSI domain is used as an intermediate domain */ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev, -@@ -686,6 +696,10 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir +@@ -686,6 +698,10 @@ int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int vir void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nvec); void *platform_msi_get_host_data(struct irq_domain *domain); @@ -619081,6 +633660,27 @@ index ac473d00e9a1..88a4dc3d1f46 100644 #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) +diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h +index 43282e22ebe1..66c27ccf7587 100644 +--- a/include/linux/perf/riscv_pmu.h ++++ b/include/linux/perf/riscv_pmu.h +@@ -28,6 +28,8 @@ + + #define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1 + ++struct sse_event; ++ + struct cpu_hw_events { + /* currently enabled events */ + int n_events; +@@ -46,6 +48,7 @@ struct riscv_pmu { + char *name; + + irqreturn_t (*handle_irq)(int irq_num, void *dev); ++ struct sse_event *sse_evt; + + unsigned long cmask; + u64 (*ctr_read)(struct perf_event *event); diff --git a/include/linux/platform_data/spacemit_k1_sdhci.h b/include/linux/platform_data/spacemit_k1_sdhci.h new file mode 100644 index 000000000000..9f25a53010df @@ -619186,6 +633786,87 @@ index 000000000000..9f25a53010df +}; + +#endif /* _SPACEMIT_K1_SDHCI_H_ */ +diff --git a/include/linux/riscv_sse.h b/include/linux/riscv_sse.h +new file mode 100644 +index 000000000000..f4a0a7add790 +--- /dev/null ++++ b/include/linux/riscv_sse.h +@@ -0,0 +1,75 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2024 Rivos Inc. ++ */ ++ ++#ifndef __LINUX_RISCV_SSE_H ++#define __LINUX_RISCV_SSE_H ++ ++#include ++#include ++#include ++#include ++ ++struct sse_event; ++struct pt_regs; ++ ++struct ghes; ++ ++typedef int (sse_event_handler)(u32 event_num, void *arg, struct pt_regs *regs); ++ ++#ifdef CONFIG_RISCV_SSE ++ ++struct sse_event *sse_event_register(u32 event_num, u32 priority, ++ sse_event_handler *handler, void *arg); ++ ++void sse_event_unregister(struct sse_event *evt); ++ ++int sse_event_set_target_cpu(struct sse_event *sse_evt, unsigned int cpu); ++ ++int sse_event_enable(struct sse_event *sse_evt); ++ ++void sse_event_disable(struct sse_event *sse_evt); ++ ++int sse_event_enable_local(struct sse_event *sse_evt); ++int sse_event_disable_local(struct sse_event *sse_evt); ++ ++int sse_register_ghes(struct ghes *ghes, sse_event_handler *lo_cb, ++ sse_event_handler *hi_cb); ++int sse_unregister_ghes(struct ghes *ghes); ++#else ++static inline struct sse_event *sse_event_register(u32 event_num, u32 priority, ++ sse_event_handler *handler, ++ void *arg) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++static inline void sse_event_unregister(struct sse_event *evt) {} ++ ++static inline int sse_event_set_target_cpu(struct sse_event *sse_evt, ++ unsigned int cpu) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline int sse_event_enable(struct sse_event *sse_evt) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline void sse_event_disable(struct sse_event *sse_evt) {} ++ ++static inline int sse_register_ghes(struct ghes *ghes, sse_event_handler *lo_cb, ++ sse_event_handler *hi_cb) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline int sse_unregister_ghes(struct ghes *ghes) ++{ ++ return -EOPNOTSUPP; ++} ++ ++#endif ++#endif /* __LINUX_RISCV_SSE_H */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 5136eade96a4..052df85dfd59 100644 --- a/include/linux/serial_core.h @@ -619675,11 +634356,35 @@ index 000000000000..cfb1f017480c +}; + +#endif /* __VS_DRM_H__ */ +diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h +index 3be3e81c67ae..2ebe51c640de 100644 +--- a/include/uapi/linux/kexec.h ++++ b/include/uapi/linux/kexec.h +@@ -25,6 +25,7 @@ + #define KEXEC_FILE_UNLOAD 0x00000001 + #define KEXEC_FILE_ON_CRASH 0x00000002 + #define KEXEC_FILE_NO_INITRAMFS 0x00000004 ++#define KEXEC_FILE_DEBUG 0x00000008 + + /* These values match the ELF architecture values. + * Unless there is a good reason that should continue to be the case. +diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h +index add349889d0a..9af36490fb5d 100644 +--- a/include/uapi/linux/serial_core.h ++++ b/include/uapi/linux/serial_core.h +@@ -245,4 +245,7 @@ + /* Sunplus UART */ + #define PORT_SUNPLUS 123 + ++/* LRW UART */ ++#define PORT_LRW 124 ++ + #endif /* _UAPILINUX_SERIAL_CORE_H */ diff --git a/init/Kconfig b/init/Kconfig -index 2720083aaa17..29ad20cdb2dd 100644 +index 6c1f5079467f..43a90599800b 100644 --- a/init/Kconfig +++ b/init/Kconfig -@@ -2257,6 +2257,9 @@ source "kernel/Kconfig.locks" +@@ -2258,6 +2258,9 @@ source "kernel/Kconfig.locks" config ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE bool @@ -619689,6 +634394,25 @@ index 2720083aaa17..29ad20cdb2dd 100644 config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE bool +diff --git a/kernel/crash_core.c b/kernel/crash_core.c +index 748adee5b06b..116f0a7057c2 100644 +--- a/kernel/crash_core.c ++++ b/kernel/crash_core.c +@@ -409,9 +409,11 @@ int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map, + phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; + phdr->p_align = 0; + ehdr->e_phnum++; +- pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", +- phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, +- ehdr->e_phnum, phdr->p_offset); ++#ifdef CONFIG_KEXEC_FILE ++ kexec_dprintk("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", ++ phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, ++ ehdr->e_phnum, phdr->p_offset); ++#endif + phdr++; + } + diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index f5cfc5069746..9cd4bb204a96 100644 --- a/kernel/irq/irqdomain.c @@ -619837,7 +634561,7 @@ index 75d0ae490e29..8f222d1cccec 100644 } diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c -index 038a05ae83dd..7b25d32e44ae 100644 +index 038a05ae83dd..f8e8f7624b90 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -733,11 +733,26 @@ static void msi_domain_free(struct irq_domain *domain, unsigned int virq, @@ -619867,6 +634591,15 @@ index 038a05ae83dd..7b25d32e44ae 100644 }; static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, +@@ -808,7 +823,7 @@ static void msi_domain_update_chip_ops(struct msi_domain_info *info) + struct irq_chip *chip = info->chip; + + BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask); +- if (!chip->irq_set_affinity) ++ if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY)) + chip->irq_set_affinity = msi_domain_set_affinity; + } + @@ -837,8 +852,11 @@ static struct irq_domain *__msi_create_irq_domain(struct fwnode_handle *fwnode, domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0, fwnode, &msi_domain_ops, info); @@ -620113,8 +634846,78 @@ index 038a05ae83dd..7b25d32e44ae 100644 /** * msi_get_domain_info - Get the MSI interrupt domain info for @domain * @domain: The interrupt domain to retrieve data from +diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c +index 88024cb22a9d..ec71540c791c 100644 +--- a/kernel/kexec_core.c ++++ b/kernel/kexec_core.c +@@ -69,6 +69,8 @@ struct resource crashk_low_res = { + .desc = IORES_DESC_CRASH_KERNEL + }; + ++bool kexec_file_dbg_print; ++ + int kexec_should_crash(struct task_struct *p) + { + /* +diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c +index 830344627e9f..82b92468a0e9 100644 +--- a/kernel/kexec_file.c ++++ b/kernel/kexec_file.c +@@ -123,6 +123,8 @@ void kimage_file_post_load_cleanup(struct kimage *image) + */ + kfree(image->image_loader_data); + image->image_loader_data = NULL; ++ ++ kexec_file_dbg_print = false; + } + + #ifdef CONFIG_KEXEC_SIG +@@ -202,6 +204,8 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd, + if (ret < 0) + return ret; + image->kernel_buf_len = ret; ++ kexec_dprintk("kernel: %p kernel_size: %#lx\n", ++ image->kernel_buf, image->kernel_buf_len); + + /* Call arch image probe handlers */ + ret = arch_kexec_kernel_image_probe(image, image->kernel_buf, +@@ -278,6 +282,7 @@ kimage_file_alloc_init(struct kimage **rimage, int kernel_fd, + if (!image) + return -ENOMEM; + ++ kexec_file_dbg_print = !!(flags & KEXEC_FILE_DEBUG); + image->file_mode = 1; + + if (kexec_on_panic) { +@@ -384,13 +389,14 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, + if (ret) + goto out; + ++ kexec_dprintk("nr_segments = %lu\n", image->nr_segments); + for (i = 0; i < image->nr_segments; i++) { + struct kexec_segment *ksegment; + + ksegment = &image->segment[i]; +- pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", +- i, ksegment->buf, ksegment->bufsz, ksegment->mem, +- ksegment->memsz); ++ kexec_dprintk("segment[%d]: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", ++ i, ksegment->buf, ksegment->bufsz, ksegment->mem, ++ ksegment->memsz); + + ret = kimage_load_segment(image, &image->segment[i]); + if (ret) +@@ -403,6 +409,8 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, + if (ret) + goto out; + ++ kexec_dprintk("kexec_file_load: type:%u, start:0x%lx head:0x%lx flags:0x%lx\n", ++ image->type, image->start, image->head, flags); + /* + * Free up any temporary buffers allocated which are not needed + * after image has been loaded diff --git a/kernel/panic.c b/kernel/panic.c -index f352869feb25..6f88d7031594 100644 +index 7be1971d8f87..f9438e7187e8 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -37,6 +37,7 @@ @@ -620140,7 +634943,7 @@ index f352869feb25..6f88d7031594 100644 /* * This thread may hit another WARN() in the panic path. diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 2740e9b918f5..bde5f0de1759 100644 +index d9afdd7e588e..f71c3ab176e5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6691,7 +6691,9 @@ static void __sched notrace __schedule(unsigned int sched_mode) @@ -620271,32 +635074,6 @@ index 32f99e9a670e..dacadd904250 100644 #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. -diff --git a/mm/memblock.c b/mm/memblock.c -index dca1c491dcdb..f2c535d90f87 100644 ---- a/mm/memblock.c -+++ b/mm/memblock.c -@@ -1811,6 +1811,7 @@ phys_addr_t __init_memblock memblock_end_of_DRAM(void) - - static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) - { -+ phys_addr_t phys_ram_base = memblock_start_of_DRAM(); - phys_addr_t max_addr = PHYS_ADDR_MAX; - struct memblock_region *r; - -@@ -1820,11 +1821,10 @@ static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) - * of those regions, max_addr will keep original value PHYS_ADDR_MAX - */ - for_each_mem_region(r) { -- if (limit <= r->size) { -- max_addr = r->base + limit; -+ if ((r->base + r->size) >= (phys_ram_base + limit)) { -+ max_addr = phys_ram_base + limit; - break; - } -- limit -= r->size; - } - - return max_addr; diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 4fcd959dcc4d..bb12d12028b3 100644 --- a/mm/pgtable-generic.c @@ -620897,6 +635674,21 @@ index 3eee0143e0c5..3d115449f2b2 100644 %if %{with_devel} %{make} %{makeflags} run-command KBUILD_RUN_COMMAND='${srctree}/scripts/package/install-extmod-build %{buildroot}/usr/src/kernels/%{KERNELRELEASE}' %endif +diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c +index ad133fe120db..dadc1d138118 100644 +--- a/security/integrity/ima/ima_kexec.c ++++ b/security/integrity/ima/ima_kexec.c +@@ -129,8 +129,8 @@ void ima_add_kexec_buffer(struct kimage *image) + image->ima_buffer_size = kexec_segment_size; + image->ima_buffer = kexec_buffer; + +- pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n", +- kbuf.mem); ++ kexec_dprintk("kexec measurement buffer for the loaded kernel at 0x%lx.\n", ++ kbuf.mem); + } + #endif /* IMA_KEXEC */ + diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index a11cd7d6295f..03ecfa43bc3a 100644 --- a/sound/core/pcm_lib.c @@ -620910,10 +635702,10 @@ index a11cd7d6295f..03ecfa43bc3a 100644 return -EPIPE; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c -index cb8f46028159..da563420ba29 100644 +index 363756ac0c07..3e0e52c2aa56 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c -@@ -298,8 +298,7 @@ enum { +@@ -299,8 +299,7 @@ enum { /* quirks for ATI/AMD HDMI */ #define AZX_DCAPS_PRESET_ATI_HDMI \ @@ -620923,7 +635715,7 @@ index cb8f46028159..da563420ba29 100644 /* quirks for ATI HDMI with snoop off */ #define AZX_DCAPS_PRESET_ATI_HDMI_NS \ -@@ -313,7 +312,7 @@ enum { +@@ -314,7 +313,7 @@ enum { /* quirks for Nvidia */ #define AZX_DCAPS_PRESET_NVIDIA \ @@ -629270,16 +644062,1563 @@ index 2a5a29217374..b53c22b909b8 100644 tmp_cpus[k++] = __perf_cpu_map__cpu(other, j++); assert(k <= tmp_len); +diff --git a/tools/perf/arch/riscv/Makefile b/tools/perf/arch/riscv/Makefile +index a8d25d005207..90c3c476a242 100644 +--- a/tools/perf/arch/riscv/Makefile ++++ b/tools/perf/arch/riscv/Makefile +@@ -3,3 +3,4 @@ PERF_HAVE_DWARF_REGS := 1 + endif + PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 + PERF_HAVE_JITDUMP := 1 ++HAVE_KVM_STAT_SUPPORT := 1 +diff --git a/tools/perf/arch/riscv/util/Build b/tools/perf/arch/riscv/util/Build +index 603dbb5ae4dc..d72b04f8d32b 100644 +--- a/tools/perf/arch/riscv/util/Build ++++ b/tools/perf/arch/riscv/util/Build +@@ -1,5 +1,6 @@ + perf-y += perf_regs.o + perf-y += header.o + ++perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o + perf-$(CONFIG_DWARF) += dwarf-regs.o + perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +diff --git a/tools/perf/arch/riscv/util/kvm-stat.c b/tools/perf/arch/riscv/util/kvm-stat.c +new file mode 100644 +index 000000000000..491aef449d1a +--- /dev/null ++++ b/tools/perf/arch/riscv/util/kvm-stat.c +@@ -0,0 +1,78 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Arch specific functions for perf kvm stat. ++ * ++ * Copyright 2024 Beijing ESWIN Computing Technology Co., Ltd. ++ * ++ */ ++#include ++#include ++#include "../../../util/evsel.h" ++#include "../../../util/kvm-stat.h" ++#include "riscv_exception_types.h" ++#include "debug.h" ++ ++define_exit_reasons_table(riscv_exit_reasons, kvm_riscv_exception_class); ++ ++const char *vcpu_id_str = "id"; ++const char *kvm_exit_reason = "scause"; ++const char *kvm_entry_trace = "kvm:kvm_entry"; ++const char *kvm_exit_trace = "kvm:kvm_exit"; ++ ++const char *kvm_events_tp[] = { ++ "kvm:kvm_entry", ++ "kvm:kvm_exit", ++ NULL, ++}; ++ ++static void event_get_key(struct evsel *evsel, ++ struct perf_sample *sample, ++ struct event_key *key) ++{ ++ key->info = 0; ++ key->key = evsel__intval(evsel, sample, kvm_exit_reason); ++ key->exit_reasons = riscv_exit_reasons; ++} ++ ++static bool event_begin(struct evsel *evsel, ++ struct perf_sample *sample __maybe_unused, ++ struct event_key *key __maybe_unused) ++{ ++ return evsel__name_is(evsel, kvm_entry_trace); ++} ++ ++static bool event_end(struct evsel *evsel, ++ struct perf_sample *sample, ++ struct event_key *key) ++{ ++ if (evsel__name_is(evsel, kvm_exit_trace)) { ++ event_get_key(evsel, sample, key); ++ return true; ++ } ++ return false; ++} ++ ++static struct kvm_events_ops exit_events = { ++ .is_begin_event = event_begin, ++ .is_end_event = event_end, ++ .decode_key = exit_event_decode_key, ++ .name = "VM-EXIT" ++}; ++ ++struct kvm_reg_events_ops kvm_reg_events_ops[] = { ++ { ++ .name = "vmexit", ++ .ops = &exit_events, ++ }, ++ { NULL, NULL }, ++}; ++ ++const char * const kvm_skip_events[] = { ++ NULL, ++}; ++ ++int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused) ++{ ++ kvm->exit_reasons_isa = "riscv64"; ++ return 0; ++} +diff --git a/tools/perf/arch/riscv/util/riscv_exception_types.h b/tools/perf/arch/riscv/util/riscv_exception_types.h +new file mode 100644 +index 000000000000..c49b8fa5e847 +--- /dev/null ++++ b/tools/perf/arch/riscv/util/riscv_exception_types.h +@@ -0,0 +1,35 @@ ++// SPDX-License-Identifier: GPL-2.0 ++#ifndef ARCH_PERF_RISCV_EXCEPTION_TYPES_H ++#define ARCH_PERF_RISCV_EXCEPTION_TYPES_H ++ ++#define EXC_INST_MISALIGNED 0 ++#define EXC_INST_ACCESS 1 ++#define EXC_INST_ILLEGAL 2 ++#define EXC_BREAKPOINT 3 ++#define EXC_LOAD_MISALIGNED 4 ++#define EXC_LOAD_ACCESS 5 ++#define EXC_STORE_MISALIGNED 6 ++#define EXC_STORE_ACCESS 7 ++#define EXC_SYSCALL 8 ++#define EXC_HYPERVISOR_SYSCALL 9 ++#define EXC_SUPERVISOR_SYSCALL 10 ++#define EXC_INST_PAGE_FAULT 12 ++#define EXC_LOAD_PAGE_FAULT 13 ++#define EXC_STORE_PAGE_FAULT 15 ++#define EXC_INST_GUEST_PAGE_FAULT 20 ++#define EXC_LOAD_GUEST_PAGE_FAULT 21 ++#define EXC_VIRTUAL_INST_FAULT 22 ++#define EXC_STORE_GUEST_PAGE_FAULT 23 ++ ++#define EXC(x) {EXC_##x, #x } ++ ++#define kvm_riscv_exception_class \ ++ EXC(INST_MISALIGNED), EXC(INST_ACCESS), EXC(INST_ILLEGAL), \ ++ EXC(BREAKPOINT), EXC(LOAD_MISALIGNED), EXC(LOAD_ACCESS), \ ++ EXC(STORE_MISALIGNED), EXC(STORE_ACCESS), EXC(SYSCALL), \ ++ EXC(HYPERVISOR_SYSCALL), EXC(SUPERVISOR_SYSCALL), \ ++ EXC(INST_PAGE_FAULT), EXC(LOAD_PAGE_FAULT), EXC(STORE_PAGE_FAULT), \ ++ EXC(INST_GUEST_PAGE_FAULT), EXC(LOAD_GUEST_PAGE_FAULT), \ ++ EXC(VIRTUAL_INST_FAULT), EXC(STORE_GUEST_PAGE_FAULT) ++ ++#endif /* ARCH_PERF_RISCV_EXCEPTION_TYPES_H */ +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/branch.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/branch.json +new file mode 100644 +index 000000000000..53ab39430e1e +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/branch.json +@@ -0,0 +1,77 @@ ++[ ++ { ++ "EventName": "BR_MIS_PRED", ++ "EventCode": "0x00000109", ++ "BriefDescription": "Mispredicted or not predicted branch speculatively executed" ++ }, ++ { ++ "EventName": "BR_PRED", ++ "EventCode": "0x00000209", ++ "BriefDescription": "Predictable branch speculatively executed" ++ }, ++ { ++ "EventName": "BR_RETIRED", ++ "EventCode": "0x00000409", ++ "BriefDescription": "Branch instruction architecturally executed" ++ }, ++ { ++ "EventName": "BR_MIS_PRED_RETIRED", ++ "EventCode": "0x00000809", ++ "BriefDescription": "Mispredicted branch instruction architecturally executed" ++ }, ++ { ++ "EventName": "BR_IMMED_SPEC", ++ "EventCode": "0x00001009", ++ "BriefDescription": "Branch immediate instructions speculatively executed" ++ }, ++ { ++ "EventName": "BR_RETURN_SPEC", ++ "EventCode": "0x00002009", ++ "BriefDescription": "Procedure return instruction speculatively executed" ++ }, ++ { ++ "EventName": "BR_INDIRECT_SPEC", ++ "EventCode": "0x00004009", ++ "BriefDescription": "Indirect branch instruction speculatively executed" ++ }, ++ { ++ "EventName": "BR_PRED_BTB_RGN_UPDATE", ++ "EventCode": "0x00008009", ++ "BriefDescription": "Branch prediction branch target buffer region update" ++ }, ++ { ++ "EventName": "SPEC_RET_STACK_FULL", ++ "EventCode": "0x00010009", ++ "BriefDescription": "Speculative return stack full" ++ }, ++ { ++ "EventName": "FETCH_FQ_EMPTY", ++ "EventCode": "0x00020009", ++ "BriefDescription": "Fetch queue empty" ++ }, ++ { ++ "EventName": "FETCH_MCACHE_INVALIDATE", ++ "EventCode": "0x00040009", ++ "BriefDescription": "Fetch mopcache invalidate" ++ }, ++ { ++ "EventName": "FETCH_MCACHE_ICACHE_SWITCH", ++ "EventCode": "0x00080009", ++ "BriefDescription": "Fetch mopcache instruction cache switch" ++ }, ++ { ++ "EventName": "FETCH_MCACHE_ICACHE_SWITCH_PENALTY", ++ "EventCode": "0x00100009", ++ "BriefDescription": "Fetch mopcache instruction cache switch penalty" ++ }, ++ { ++ "EventName": "BR_PRED_BTB_RGN_UPDATE_NREADY", ++ "EventCode": "0x00200009", ++ "BriefDescription": "Branch prediction branch target buffer region update not ready" ++ }, ++ { ++ "EventName": "BR_PRED_BTB_CTX_UPDATE", ++ "EventCode": "0x00400009", ++ "BriefDescription": "Branch prediction branch target buffer context update" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/exception.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/exception.json +new file mode 100644 +index 000000000000..26b10dbd2e4f +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/exception.json +@@ -0,0 +1,102 @@ ++[ ++ { ++ "EventName": "EXC_TAKEN", ++ "EventCode": "0x0000010a", ++ "BriefDescription": "Exception taken" ++ }, ++ { ++ "EventName": "EXC_RETURN", ++ "EventCode": "0x0000020a", ++ "BriefDescription": "Exception return" ++ }, ++ { ++ "EventName": "EXC_UNDEF", ++ "EventCode": "0x0000040a", ++ "BriefDescription": "Undefined exception taken locally" ++ }, ++ { ++ "EventName": "EXC_ECALL_U", ++ "EventCode": "0x0000080a", ++ "BriefDescription": "User Call exception taken locally" ++ }, ++ { ++ "EventName": "EXC_PABORT", ++ "EventCode": "0x0000100a", ++ "BriefDescription": "Instruction abort exception taken locally" ++ }, ++ { ++ "EventName": "EXC_DABORT", ++ "EventCode": "0x0000200a", ++ "BriefDescription": "Data abort or SError taken locally" ++ }, ++ { ++ "EventName": "EXC_IRQ", ++ "EventCode": "0x0000400a", ++ "BriefDescription": "IRQ exception taken locally" ++ }, ++ { ++ "EventName": "EXC_FIQ", ++ "EventCode": "0x0000800a", ++ "BriefDescription": "FIQ exception taken locally" ++ }, ++ { ++ "EventName": "EXC_ECALL_M", ++ "EventCode": "0x0001000a", ++ "BriefDescription": "M-mode Call exception" ++ }, ++ { ++ "EventName": "EXC_ECALL_S", ++ "EventCode": "0x0002000a", ++ "BriefDescription": "S-mode Call exception" ++ }, ++ { ++ "EventName": "EXC_TRAP_PABORT", ++ "EventCode": "0x0004000a", ++ "BriefDescription": "Instruction abort exception not taken locally" ++ }, ++ { ++ "EventName": "EXC_TRAP_DABORT", ++ "EventCode": "0x0008000a", ++ "BriefDescription": "Data abort or SError not taken locally" ++ }, ++ { ++ "EventName": "EXC_ECALL_VS", ++ "EventCode": "0x0010000a", ++ "BriefDescription": "VS-mode Call exception" ++ }, ++ { ++ "EventName": "EXC_TRAP_IRQ", ++ "EventCode": "0x0020000a", ++ "BriefDescription": "IRQ exception not taken locally" ++ }, ++ { ++ "EventName": "EXC_TRAP_FIQ", ++ "EventCode": "0x0040000a", ++ "BriefDescription": "FIQ exception not taken locally" ++ }, ++ { ++ "EventName": "FLUSH", ++ "EventCode": "0x0080000a", ++ "BriefDescription": "Flush" ++ }, ++ { ++ "EventName": "FLUSH_MEM", ++ "EventCode": "0x0100000a", ++ "BriefDescription": "Access memory flush" ++ }, ++ { ++ "EventName": "FLUSH_ISB", ++ "EventCode": "0x0400000a", ++ "BriefDescription": "Flush Instruction Synchronization Barrier" ++ }, ++ { ++ "EventName": "FLUSH_OTHER", ++ "EventCode": "0x0800000a", ++ "BriefDescription": "Flush Other" ++ }, ++ { ++ "EventName": "FLUSH_BAD_BRANCH", ++ "EventCode": "0x1000000a", ++ "BriefDescription": "Flush Bad Branch" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/firmware.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/firmware.json +new file mode 100644 +index 000000000000..7149caec4f80 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/firmware.json +@@ -0,0 +1,68 @@ ++[ ++ { ++ "ArchStdEvent": "FW_MISALIGNED_LOAD" ++ }, ++ { ++ "ArchStdEvent": "FW_MISALIGNED_STORE" ++ }, ++ { ++ "ArchStdEvent": "FW_ACCESS_LOAD" ++ }, ++ { ++ "ArchStdEvent": "FW_ACCESS_STORE" ++ }, ++ { ++ "ArchStdEvent": "FW_ILLEGAL_INSN" ++ }, ++ { ++ "ArchStdEvent": "FW_SET_TIMER" ++ }, ++ { ++ "ArchStdEvent": "FW_IPI_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_IPI_RECEIVED" ++ }, ++ { ++ "ArchStdEvent": "FW_FENCE_I_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_FENCE_I_RECEIVED" ++ }, ++ { ++ "ArchStdEvent": "FW_SFENCE_VMA_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" ++ }, ++ { ++ "ArchStdEvent": "FW_SFENCE_VMA_ASID_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_GVMA_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_VVMA_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT" ++ }, ++ { ++ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_cpu.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_cpu.json +new file mode 100644 +index 000000000000..51b01e841bd2 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_cpu.json +@@ -0,0 +1,12 @@ ++[ ++ { ++ "EventName": "CID_WRITE_RETIRED", ++ "EventCode": "0x0000020b", ++ "BriefDescription": "CONTEXTIDR register write" ++ }, ++ { ++ "EventName": "CPU_CYCLES", ++ "EventCode": "0x0000040b", ++ "BriefDescription": "cycles" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_inst.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_inst.json +new file mode 100644 +index 000000000000..ad6106c43f74 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/general_inst.json +@@ -0,0 +1,57 @@ ++[ ++ { ++ "EventName": "INST_RETIRED", ++ "EventCode": "0x00000108", ++ "BriefDescription": "Instruction architecturally executed" ++ }, ++ { ++ "EventName": "INST_SPEC", ++ "EventCode": "0x00000208", ++ "BriefDescription": "Instruction speculatively executed" ++ }, ++ { ++ "EventName": "DP_SPEC", ++ "EventCode": "0x00000408", ++ "BriefDescription": "Integer data-processing instruction speculatively executed" ++ }, ++ { ++ "EventName": "PC_WRITE_SPEC", ++ "EventCode": "0x00000808", ++ "BriefDescription": "PC write instruction speculatively executed" ++ }, ++ { ++ "EventName": "CRYPTO_SPEC", ++ "EventCode": "0x00001008", ++ "BriefDescription": "Crypto instruction speculatively executed" ++ }, ++ { ++ "EventName": "FENCE.i_SPEC", ++ "EventCode": "0x00002008", ++ "BriefDescription": "FENCE.i speculatively executed" ++ }, ++ { ++ "EventName": "OP_RETIRED", ++ "EventCode": "0x00004008", ++ "BriefDescription": "Micro-operation architecturally executed" ++ }, ++ { ++ "EventName": "OP_SPEC", ++ "EventCode": "0x00008008", ++ "BriefDescription": "Micro-operation speculatively executed" ++ }, ++ { ++ "EventName": "DECODE_MOP", ++ "EventCode": "0x00010008", ++ "BriefDescription": "decode mop" ++ }, ++ { ++ "EventName": "TPE_SPEC", ++ "EventCode": "0x00020008", ++ "BriefDescription": "TPE Instruction speculatively executed" ++ }, ++ { ++ "EventName": "FUSION_SPEC", ++ "EventCode": "0x00040008", ++ "BriefDescription": "Fusion Instruction speculatively executed" ++ } ++] +\ No newline at end of file +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1dcache.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1dcache.json +new file mode 100644 +index 000000000000..e66a559c69f6 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1dcache.json +@@ -0,0 +1,197 @@ ++[ ++ { ++ "EventName": "L1D_CACHE_REFILL", ++ "EventCode": "0x00000101", ++ "BriefDescription": "L1 data cache refill" ++ }, ++ { ++ "EventName": "L1D_CACHE", ++ "EventCode": "0x00000201", ++ "BriefDescription": "L1 data cache access" ++ }, ++ { ++ "EventName": "L1D_CACHE_WB", ++ "EventCode": "0x00000401", ++ "BriefDescription": "L1 data cache write-back" ++ }, ++ { ++ "EventName": "L1D_CACHE_LMISS_RD", ++ "EventCode": "0x00000801", ++ "BriefDescription": "Level 1 data cache long-latency miss" ++ }, ++ { ++ "EventName": "L1D_CACHE_RD", ++ "EventCode": "0x00001001", ++ "BriefDescription": "L1 data cache read" ++ }, ++ { ++ "EventName": "L1D_CACHE_WR", ++ "EventCode": "0x00002001", ++ "BriefDescription": "L1 data cache write" ++ }, ++ { ++ "EventName": "L1D_CACHE_REFILL_RD", ++ "EventCode": "0x00004001", ++ "BriefDescription": "L1 data cache refill read" ++ }, ++ { ++ "EventName": "L1D_CACHE_REFILL_WR", ++ "EventCode": "0x00008001", ++ "BriefDescription": "L1 data cache refill write" ++ }, ++ { ++ "EventName": "L1D_CACHE_REFILL_INNER", ++ "EventCode": "0x00010001", ++ "BriefDescription": "L1 data cache refill inner" ++ }, ++ { ++ "EventName": "L1D_CACHE_REFILL_OUTER", ++ "EventCode": "0x00020001", ++ "BriefDescription": "L1 data cache refill outer" ++ }, ++ { ++ "EventName": "L1D_CACHE_WB_VICTIM", ++ "EventCode": "0x00040001", ++ "BriefDescription": "L1 data cache write-back victim" ++ }, ++ { ++ "EventName": "L1D_CACHE_WB_CLEAN", ++ "EventCode": "0x00080001", ++ "BriefDescription": "L1 data cache write-back clean" ++ }, ++ { ++ "EventName": "L1D_CACHE_INVAL", ++ "EventCode": "0x00100001", ++ "BriefDescription": "L1 data cache invalidate" ++ }, ++ { ++ "EventName": "LDST_L1_PF_REFILL", ++ "EventCode": "0x00200001", ++ "BriefDescription": "L1 refill due to load store prefetch miss" ++ }, ++ { ++ "EventName": "LDST_L1_PF_LATE", ++ "EventCode": "0x00400001", ++ "BriefDescription": "Load store hit prefetch" ++ }, ++ { ++ "EventName": "L1D_CACHE_REFILL_INER_L2", ++ "EventCode": "0x00800001", ++ "BriefDescription": "L1 data cache refill iner L2" ++ }, ++ { ++ "EventName": "L1D_CACHE_REFILL_INER_L3", ++ "EventCode": "0x01000001", ++ "BriefDescription": "L1 data cache refill iner L3" ++ }, ++ { ++ "EventName": "L1D_TLB_REFILL_RD_PF", ++ "EventCode": "0x02000001", ++ "BriefDescription": "L1 data TLB refill with read prefetch" ++ }, ++ { ++ "EventName": "STORE_STREAM", ++ "EventCode": "0x04000001", ++ "BriefDescription": "store stream" ++ }, ++ { ++ "EventName": "LDST_NUKE_RAR", ++ "EventCode": "0x08000001", ++ "BriefDescription": "Load/Store nuke due to read after read" ++ }, ++ { ++ "EventName": "LDST_NUKE_RAW", ++ "EventCode": "0x10000001", ++ "BriefDescription": "Load/Store nuke due to read after write" ++ }, ++ { ++ "EventName": "LDST_L1_PF_GEN_PAGE", ++ "EventCode": "0x20000001", ++ "BriefDescription": "Load/Store L1 cache pager prefetch" ++ }, ++ { ++ "EventName": "LDST_L1_PF_GEN_STRIDE", ++ "EventCode": "0x40000001", ++ "BriefDescription": "Load/Store L1 cahce stride prefetch" ++ }, ++ { ++ "EventName": "LDST_L2_PF_GEN_LD", ++ "EventCode": "0x80000001", ++ "BriefDescription": "L2 cache prefetch load" ++ }, ++ { ++ "EventName": "LDST_L2_PF_GEN_ST", ++ "EventCode": "0x100000001", ++ "BriefDescription": "L2 cache prefetch store" ++ }, ++ { ++ "EventName": "LS_PF_TRAIN_TABLE_ALLOC", ++ "EventCode": "0x200000001", ++ "BriefDescription": "prefetch training table allocation" ++ }, ++ { ++ "EventName": "LS_PF_PHT_LOOKUP", ++ "EventCode": "0x400000001", ++ "BriefDescription": "Pattern History Table prefetch lookup" ++ }, ++ { ++ "EventName": "LS_PF_PHT_LOOKUP_HIT", ++ "EventCode": "0x800000001", ++ "BriefDescription": "Pattern History Table prefetch lookup hit" ++ }, ++ { ++ "EventName": "LS_PF_GEN_TABLE_ALLOC", ++ "EventCode": "0x1000000001", ++ "BriefDescription": "Load/Store Prefetch Generated Page Table Allocation" ++ }, ++ { ++ "EventName": "LS_PF_GEN_TABLE_ALLOC_PF_PEND", ++ "EventCode": "0x2000000001", ++ "BriefDescription": "Load/Store Prefetch Generated Page Table Allocation Pending Due to Full Queue" ++ }, ++ { ++ "EventName": "LS_PF_PRQ_ALLOC_PF_PEND", ++ "EventCode": "0x4000000001", ++ "BriefDescription": "Load/Store Prefetch Request Queue Allocation Pending Prefetch" ++ }, ++ { ++ "EventName": "LDST_L1_PF", ++ "EventCode": "0x8000000001", ++ "BriefDescription": "Load/Store L1 Software Prefetch Count" ++ }, ++ { ++ "EventName": "LDST_L1_PF_HIT", ++ "EventCode": "0x10000000001", ++ "BriefDescription": "Load/Store L1 Software Prefetch Hit Count" ++ }, ++ { ++ "EventName": "LDST_LRQ_FULL", ++ "EventCode": "0x20000000001", ++ "BriefDescription": "Load/Store Load Request Queue Full Count" ++ }, ++ { ++ "EventName": "MAX_POWER_THROTTLE_ACTIVE", ++ "EventCode": "0x80000000001", ++ "BriefDescription": "Maximum Power Throttle Active Event" ++ }, ++ { ++ "EventName": "DIDT_THROTTLE_ACTIVE", ++ "EventCode": "0x100000000001", ++ "BriefDescription": "Dynamic Instruction Deferral Throttle Active Event" ++ }, ++ { ++ "EventName": "NEAR_CAS", ++ "EventCode": "0x200000000001", ++ "BriefDescription": "Compare-And-Swap Near Address Event Count" ++ }, ++ { ++ "EventName": "NEAR_CAS_PASS", ++ "EventCode": "0x400000000001", ++ "BriefDescription": "Compare-And-Swap Near Address Passed Event Count" ++ }, ++ { ++ "EventName": "FAR_CAS", ++ "EventCode": "0x800000000001", ++ "BriefDescription": "Compare-And-Swap Far Address Event Count" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1icache.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1icache.json +new file mode 100644 +index 000000000000..02b7d54395c4 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l1icache.json +@@ -0,0 +1,97 @@ ++[ ++ { ++ "EventName": "L1I_CACHE_REFILL", ++ "EventCode": "0x00000102", ++ "BriefDescription": "L1 instruction cache refill" ++ }, ++ { ++ "EventName": "L1I_CACHE", ++ "EventCode": "0x00000202", ++ "BriefDescription": "Level 1 instruction cache access" ++ }, ++ { ++ "EventName": "L1I_CACHE_LMISS", ++ "EventCode": "0x00000402", ++ "BriefDescription": "L1 instruction cache long latency miss" ++ }, ++ { ++ "EventName": "UNUSED_PF", ++ "EventCode": "0x00000802", ++ "BriefDescription": "Unused prefetch data" ++ }, ++ { ++ "EventName": "PFT_SENT", ++ "EventCode": "0x00001002", ++ "BriefDescription": "Number of the prefetch target request" ++ }, ++ { ++ "EventName": "PFT_USEFUL", ++ "EventCode": "0x00002002", ++ "BriefDescription": "Useful prefetch target" ++ }, ++ { ++ "EventName": "PFT_DROP", ++ "EventCode": "0x00004002", ++ "BriefDescription": "drop prefetch target" ++ }, ++ { ++ "EventName": "TQ_FULL", ++ "EventCode": "0x00008002", ++ "BriefDescription": "TQ entry is full" ++ }, ++ { ++ "EventName": "STASH_DROP", ++ "EventCode": "0x00010002", ++ "BriefDescription": "Drop stash request" ++ }, ++ { ++ "EventName": "L1I_CACH_INVAL_ALL", ++ "EventCode": "0x00020002", ++ "BriefDescription": "Level 1 Instruction Cache Invalidate All" ++ }, ++ { ++ "EventName": "TXDAT_LCRD_BLOCK", ++ "EventCode": "0x00040002", ++ "BriefDescription": "Transmit Data Link Credit Block" ++ }, ++ { ++ "EventName": "TXRSP_LCRD_BLOCK", ++ "EventCode": "0x00080002", ++ "BriefDescription": "Transmit Response Link Credit Block" ++ }, ++ { ++ "EventName": "TXREQ_LCRD_BLOCK", ++ "EventCode": "0x00100002", ++ "BriefDescription": "Transmit Request Link Credit Block" ++ }, ++ { ++ "EventName": "L1I_CACH_INVAL_ALL_LOCAL", ++ "EventCode": "0x00200002", ++ "BriefDescription": "Level 1 Instruction Cache Invalidate All (Local)" ++ }, ++ { ++ "EventName": "L1I_CACH_INVAL_ALL_REMOTE", ++ "EventCode": "0x00400002", ++ "BriefDescription": "Level 1 Instruction Cache Invalidate All (Remote)" ++ }, ++ { ++ "EventName": "L1I_CACH_INVAL_VA_LOCAL", ++ "EventCode": "0x00800002", ++ "BriefDescription": "Level 1 Instruction Cache Invalidate Virtual Address (Local)" ++ }, ++ { ++ "EventName": "L1I_CACH_INVAL_VA_REMOTE", ++ "EventCode": "0x01000002", ++ "BriefDescription": "Level 1 Instruction Cache Invalidate Virtual Address (Remote)" ++ }, ++ { ++ "EventName": "DEADBLOCK", ++ "EventCode": "0x04000002", ++ "BriefDescription": "Dead Block" ++ }, ++ { ++ "EventName": "L2_CACHE_HIT_LINE_PF", ++ "EventCode": "0x08000002", ++ "BriefDescription": "Level 2 Cache Hit Line Prefetch" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l2cache.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l2cache.json +new file mode 100644 +index 000000000000..0751e31b1610 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l2cache.json +@@ -0,0 +1,112 @@ ++[ ++ { ++ "EventName": "L2_CACHE", ++ "EventCode": "0x00000103", ++ "BriefDescription": "L2 cache access" ++ }, ++ { ++ "EventName": "L2_CACHE_REFILL", ++ "EventCode": "0x00000203", ++ "BriefDescription": "L2cache refill" ++ }, ++ { ++ "EventName": "L2_CACHE_WB", ++ "EventCode": "0x00000403", ++ "BriefDescription": "L2 cache write-back" ++ }, ++ { ++ "EventName": "L2_CACHE_RD", ++ "EventCode": "0x00000803", ++ "BriefDescription": "L2 cache access, read" ++ }, ++ { ++ "EventName": "L2_CACHE_WR", ++ "EventCode": "0x00001003", ++ "BriefDescription": "L2 cache access, write" ++ }, ++ { ++ "EventName": "L2_CACHE_REFILL_RD", ++ "EventCode": "0x00002003", ++ "BriefDescription": "L2 cache refill, read" ++ }, ++ { ++ "EventName": "L2_CACHE_WB_VICTIM", ++ "EventCode": "0x00004003", ++ "BriefDescription": "L2 cache write-back, victim" ++ }, ++ { ++ "EventName": "L2_CACHE_WB_CLEAN", ++ "EventCode": "0x00008003", ++ "BriefDescription": "L2 cache write-back, cleaning and coherency" ++ }, ++ { ++ "EventName": "L2_CACHE_INVAL", ++ "EventCode": "0x00010003", ++ "BriefDescription": "L2 cache invalidate" ++ }, ++ { ++ "EventName": "L2_CACHE_LMISS_RD", ++ "EventCode": "0x00020003", ++ "BriefDescription": "L2 cache long latency miss" ++ }, ++ { ++ "EventName": "CACHE_IF_REFILL", ++ "EventCode": "0x00080003", ++ "BriefDescription": "L2 cache refill due to ifu read" ++ }, ++ { ++ "EventName": "CACHE_LS_REFILL", ++ "EventCode": "0x00100003", ++ "BriefDescription": "L2 cache refill due to ls read" ++ }, ++ { ++ "EventName": "CACHE_TBW_REFILL", ++ "EventCode": "0x00200003", ++ "BriefDescription": "L2 cache refill due to MMU read" ++ }, ++ { ++ "EventName": "CACHE_PF_REFILL", ++ "EventCode": "0x00400003", ++ "BriefDescription": "L2 cache refill due to prefetch" ++ }, ++ { ++ "EventName": "CACHE_PF_LATE_REFILL", ++ "EventCode": "0x00800003", ++ "BriefDescription": "L2 cache refill due to prefetch late" ++ }, ++ { ++ "EventName": "CACHE_PF", ++ "EventCode": "0x01000003", ++ "BriefDescription": "L2 cache prefetch" ++ }, ++ { ++ "EventName": "CACHE_PF_HIT", ++ "EventCode": "0x02000003", ++ "BriefDescription": "L2 cache prefetch hit" ++ }, ++ { ++ "EventName": "CACHE_INNER_REFILL", ++ "EventCode": "0x04000003", ++ "BriefDescription": "L2 cache refill due to inner read request" ++ }, ++ { ++ "EventName": "CACHE_OUTER_REFILL", ++ "EventCode": "0x08000003", ++ "BriefDescription": "L2 cache refill due to outer read request" ++ }, ++ { ++ "EventName": "L2_CACHE_ALLOCATE", ++ "EventCode": "0x10000003", ++ "BriefDescription": "L2 cache allocate" ++ }, ++ { ++ "EventName": "BUS_ACCESS_TRACE", ++ "EventCode": "0x20000003", ++ "BriefDescription": "Bus access trace request" ++ }, ++ { ++ "EventName": "BUS_REQUEST_RETRYNOCRD", ++ "EventCode": "0x40000003", ++ "BriefDescription": "Bus request retrynocrd" ++ } ++] +\ No newline at end of file +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l3cache.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l3cache.json +new file mode 100644 +index 000000000000..b8262dc4eb0a +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/l3cache.json +@@ -0,0 +1,32 @@ ++[ ++ { ++ "EventName": "L3_CACHE_ALLOCATE", ++ "EventCode": "0x00000104", ++ "BriefDescription": "Attributable L 3 data cache allocation without refill" ++ }, ++ { ++ "EventName": "L3_CACHE_REFILL", ++ "EventCode": "0x00000204", ++ "BriefDescription": "Attributable L3 unified cache refill" ++ }, ++ { ++ "EventName": "L3_CACHE", ++ "EventCode": "0x00000404", ++ "BriefDescription": "Attributable Level 3 unified cache access" ++ }, ++ { ++ "EventName": "LL_CACHE_RD", ++ "EventCode": "0x00000804", ++ "BriefDescription": "Last level cache access, read" ++ }, ++ { ++ "EventName": "LL_CACHE_MISS_RD", ++ "EventCode": "0x00001004", ++ "BriefDescription": "Last level cache miss, read" ++ }, ++ { ++ "EventName": "L3_CACHE_RD", ++ "EventCode": "0x00002004", ++ "BriefDescription": "L3 cache read" ++ } ++] +\ No newline at end of file +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/ldst.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/ldst.json +new file mode 100644 +index 000000000000..1ff880296cbf +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/ldst.json +@@ -0,0 +1,67 @@ ++[ ++ { ++ "EventName": "UNALIGNED_LD_SPEC", ++ "EventCode": "0x00000107", ++ "BriefDescription": "Unaligned load speculation" ++ }, ++ { ++ "EventName": "UNALIGNED_ST_SPEC", ++ "EventCode": "0x00000207", ++ "BriefDescription": "Unaligned store speculation" ++ }, ++ { ++ "EventName": "UNALIGNED_LDST_SPEC", ++ "EventCode": "0x00000407", ++ "BriefDescription": "Unaligned load/store speculation" ++ }, ++ { ++ "EventName": "LD_SPEC", ++ "EventCode": "0x00000807", ++ "BriefDescription": "Load speculation" ++ }, ++ { ++ "EventName": "ST_SPEC", ++ "EventCode": "0x00001007", ++ "BriefDescription": "Store speculation" ++ }, ++ { ++ "EventName": "SFENCE_SPEC", ++ "EventCode": "0x00002007", ++ "BriefDescription": "SFENCE speculatively executed" ++ }, ++ { ++ "EventName": "FENCE_SPEC", ++ "EventCode": "0x00004007", ++ "BriefDescription": "FENCE speculatively executed" ++ }, ++ { ++ "EventName": "RC_LD_SPEC", ++ "EventCode": "0x00008007", ++ "BriefDescription": "RC load speculation" ++ }, ++ { ++ "EventName": "RC_ST_SPEC", ++ "EventCode": "0x00010007", ++ "BriefDescription": "RC store speculation" ++ }, ++ { ++ "EventName": "LR_SPEC", ++ "EventCode": "0x00020007", ++ "BriefDescription": "Load-Reserved speculatively executed" ++ }, ++ { ++ "EventName": "SC_PASS_SPEC", ++ "EventCode": "0x00040007", ++ "BriefDescription": "Successful store-condiitional speculatively executed" ++ }, ++ { ++ "EventName": "SC_FAlL_SPEC", ++ "EventCode": "0x00080007", ++ "BriefDescription": "Failed store-condiitional speculatively executed" ++ }, ++ { ++ "EventName": "SC_SPEC", ++ "EventCode": "0x00100007", ++ "BriefDescription": "Store-condiitional speculatively executed" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/mem.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/mem.json +new file mode 100644 +index 000000000000..3f9371de2af7 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/mem.json +@@ -0,0 +1,182 @@ ++[ ++ { ++ "EventName": "MEM_ACCESS", ++ "EventCode": "0x00000105", ++ "BriefDescription": "Memory access" ++ }, ++ { ++ "EventName": "BUS_ACCESS", ++ "EventCode": "0x00000205", ++ "BriefDescription": "Bus access" ++ }, ++ { ++ "EventName": "MEMORY_ERROR", ++ "EventCode": "0x00000405", ++ "BriefDescription": "Memory error" ++ }, ++ { ++ "EventName": "REMOTE_ACCESS", ++ "EventCode": "0x00000805", ++ "BriefDescription": "Remote access" ++ }, ++ { ++ "EventName": "BUS_ACCESS_RD", ++ "EventCode": "0x00001005", ++ "BriefDescription": "Bus access read" ++ }, ++ { ++ "EventName": "BUS_ACCESS_WR", ++ "EventCode": "0x00002005", ++ "BriefDescription": "Bus access write" ++ }, ++ { ++ "EventName": "MEM_ACCESS_RD", ++ "EventCode": "0x00004005", ++ "BriefDescription": "Memory access read" ++ }, ++ { ++ "EventName": "MEM_ACCESS_WR", ++ "EventCode": "0x00008005", ++ "BriefDescription": "Memory access write" ++ }, ++ { ++ "EventName": "LDST_ALIGN_LAT", ++ "EventCode": "0x00010005", ++ "BriefDescription": "Load/store align latency" ++ }, ++ { ++ "EventName": "LD_ALIGN_LAT", ++ "EventCode": "0x00020005", ++ "BriefDescription": "Load align latency" ++ }, ++ { ++ "EventName": "ST_ALIGN_LAT", ++ "EventCode": "0x00040005", ++ "BriefDescription": "Store align latency" ++ }, ++ { ++ "EventName": "BUS_REQUEST_REQ", ++ "EventCode": "0x000400005", ++ "BriefDescription": "CHI Bus Request Sent" ++ }, ++ { ++ "EventName": "BUS_REQUEST_RETRY", ++ "EventCode": "0x000800005", ++ "BriefDescription": "CHI Bus Request Retried" ++ }, ++ { ++ "EventName": "BUS_REQUEST_SN", ++ "EventCode": "0x001000005", ++ "BriefDescription": "CHI Snoop Request Sent to L2" ++ }, ++ { ++ "EventName": "L2TLB_PF_REFILL", ++ "EventCode": "0x002000005", ++ "BriefDescription": "L2 TLB Prefetch-Induced Leaf Page Refill" ++ }, ++ { ++ "EventName": "TBW", ++ "EventCode": "0x004000005", ++ "BriefDescription": "Page Table Walk" ++ }, ++ { ++ "EventName": "TBW_DESC", ++ "EventCode": "0x008000005", ++ "BriefDescription": "Page Table Walk Descriptor" ++ }, ++ { ++ "EventName": "S1L2_HIT", ++ "EventCode": "0x010000005", ++ "BriefDescription": "Stage1 level 2 page table hit" ++ }, ++ { ++ "EventName": "S1L1_HIT", ++ "EventCode": "0x020000005", ++ "BriefDescription": "Stage 1 level 1 page table hit" ++ }, ++ { ++ "EventName": "S1L0_HIT", ++ "EventCode": "0x040000005", ++ "BriefDescription": "Stage 1 level 0 page table hit" ++ }, ++ { ++ "EventName": "S2L2_HIT", ++ "EventCode": "0x080000005", ++ "BriefDescription": "Stage 2 level 2 page table hit" ++ }, ++ { ++ "EventName": "IPA_REQ", ++ "EventCode": "0x100000005", ++ "BriefDescription": "Intermediate physical address request" ++ }, ++ { ++ "EventName": "IPA_REFILL", ++ "EventCode": "0x200000005", ++ "BriefDescription": "Intermediate physical address refill" ++ }, ++ { ++ "EventName": "S1_FLT", ++ "EventCode": "0x400000005", ++ "BriefDescription": "Stage 1 fault" ++ }, ++ { ++ "EventName": "S2_FLT", ++ "EventCode": "0x800000005", ++ "BriefDescription": "Stage 2 fault" ++ }, ++ { ++ "EventName": "COLT_REFILL", ++ "EventCode": "0x1000000005", ++ "BriefDescription": "Coalescing translation refill" ++ }, ++ { ++ "EventName": "PMP_PLT", ++ "EventCode": "0x2000000005", ++ "BriefDescription": "Physical memory protection fault" ++ }, ++ { ++ "EventName": "MTT_REQ", ++ "EventCode": "0x4000000005", ++ "BriefDescription": "Memory tracking table request" ++ }, ++ { ++ "EventName": "MTT_L3_HIT", ++ "EventCode": "0x8000000005", ++ "BriefDescription": "Memory tracking table level 3 hit" ++ }, ++ { ++ "EventName": "MTT_L2_HIT", ++ "EventCode": "0x10000000005", ++ "BriefDescription": "Memory tracking table level 2 hit" ++ }, ++ { ++ "EventName": "MTT_L1_HIT", ++ "EventCode": "0x20000000005", ++ "BriefDescription": "Memory tracking table level 1 hit" ++ }, ++ { ++ "EventName": "MTT_LEAFPAGE_HIT", ++ "EventCode": "0x40000000005", ++ "BriefDescription": "Memory tracking table leaf page hit" ++ }, ++ { ++ "EventName": "MTT_WLK", ++ "EventCode": "0x80000000005", ++ "BriefDescription": "Memory tracking table walk" ++ }, ++ { ++ "EventName": "MTTCACHE_REFILL", ++ "EventCode": "0x100000000005", ++ "BriefDescription": "Memory tracking table cache refill" ++ }, ++ { ++ "EventName": "MTT_FLT", ++ "EventCode": "0x200000000005", ++ "BriefDescription": "Memory tracking table fault" ++ }, ++ { ++ "EventName": "MTTSIZE_LESS_THAN_PTESIZE", ++ "EventCode": "0x400000000005", ++ "BriefDescription": "Memory tracking table size less than pte size" ++ } ++] +\ No newline at end of file +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/pipeline.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/pipeline.json +new file mode 100644 +index 000000000000..1773b59e4aca +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/pipeline.json +@@ -0,0 +1,132 @@ ++[ ++ { ++ "EventName": "STALL_FRONTEND", ++ "EventCode": "0x00000106", ++ "BriefDescription": "Frontend stall" ++ }, ++ { ++ "EventName": "STALL_BACKEND", ++ "EventCode": "0x00000206", ++ "BriefDescription": "Backend stall" ++ }, ++ { ++ "EventName": "STALL", ++ "EventCode": "0x00000406", ++ "BriefDescription": "Stall" ++ }, ++ { ++ "EventName": "STALL_SLOT_BACKEND", ++ "EventCode": "0x00000806", ++ "BriefDescription": "Backend stall slot" ++ }, ++ { ++ "EventName": "STALL_SLOT_FRONTEND", ++ "EventCode": "0x00001006", ++ "BriefDescription": "Frontend stall slot" ++ }, ++ { ++ "EventName": "STALL_SLOT", ++ "EventCode": "0x00002006", ++ "BriefDescription": "Stall slot" ++ }, ++ { ++ "EventName": "STALL_BACKEND_MEM", ++ "EventCode": "0x00004006", ++ "BriefDescription": "Backend stall due to memory" ++ }, ++ { ++ "EventName": "IDR_STALL_IXU_SCHED", ++ "EventCode": "0x00008006", ++ "BriefDescription": "IXU scheduler stall" ++ }, ++ { ++ "EventName": "VX_IQ_STALL", ++ "EventCode": "0x00010006", ++ "BriefDescription": "Dispatch stall due to VX scheduler entries" ++ }, ++ { ++ "EventName": "MCQ_FULL_STALL", ++ "EventCode": "0x00020006", ++ "BriefDescription": "Dispatch stall due to MCQ entries" ++ }, ++ { ++ "EventName": "STALL_FRONTED_FLUSH", ++ "EventCode": "0x00040006", ++ "BriefDescription": "Dispatch stall due to fronted flush" ++ }, ++ { ++ "EventName": "STALL_BACKEND_CACHE", ++ "EventCode": "0x00080006", ++ "BriefDescription": "Dispatch stall due to L1 data cache miss" ++ }, ++ { ++ "EventName": "STALL_BACKEND_TLB", ++ "EventCode": "0x00100006", ++ "BriefDescription": "Dispatch stall due to L1 data TLB miss" ++ }, ++ { ++ "EventName": "STALL_BACKEND_RESOURCE", ++ "EventCode": "0x00200006", ++ "BriefDescription": "Dispatch stall due to lack of any core resource" ++ }, ++ { ++ "EventName": "FSU_ISSUED", ++ "EventCode": "0x00400006", ++ "BriefDescription": "Uops issued by the FSU scheduler" ++ }, ++ { ++ "EventName": "IXU_NUM_UOP_ISSUED", ++ "EventCode": "0x00800006", ++ "BriefDescription": "Instructions issued by the IXU scheduler" ++ }, ++ { ++ "EventName": "FLAG_DISP_STALL", ++ "EventCode": "0x01000006", ++ "BriefDescription": "Dispatch stall due to mop flag miss" ++ }, ++ { ++ "EventName": "GEN_DISP_STALL", ++ "EventCode": "0x02000006", ++ "BriefDescription": "Dispatch stall due to general register miss" ++ }, ++ { ++ "EventName": "VEC_DISP_STALL", ++ "EventCode": "0x04000006", ++ "BriefDescription": "Dispatch stall due to vector register miss" ++ }, ++ { ++ "EventName": "SX_IQ_STALL", ++ "EventCode": "0x08000006", ++ "BriefDescription": "Stall due to sx issue queue full" ++ }, ++ { ++ "EventName": "MX_IQ_STALL", ++ "EventCode": "0x10000006", ++ "BriefDescription": "Stall due to mx issue queue full" ++ }, ++ { ++ "EventName": "LS_IQ_STALL", ++ "EventCode": "0x20000006", ++ "BriefDescription": "Stall due to ls queue full" ++ }, ++ { ++ "EventName": "STALL_FRONTEND_TLB", ++ "EventCode": "0x80000006", ++ "BriefDescription": "No operation issued due to the frontend TLB" ++ }, ++ { ++ "EventName": "STALL_FRONTEND_CACHE", ++ "EventCode": "0x100000006", ++ "BriefDescription": "No operation issued due to the front cache" ++ }, ++ { ++ "EventName": "FPG", ++ "EventCode": "0x200000006", ++ "BriefDescription": "Front-End Pipeline Gating" ++ }, ++ { ++ "EventName": "VTYPE_FLUSH", ++ "EventCode": "0x400000006", ++ "BriefDescription": "Flush due to fialed prediction of writing vtype csr" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/spe.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/spe.json +new file mode 100644 +index 000000000000..08e80365e044 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/spe.json +@@ -0,0 +1,7 @@ ++[ ++ { ++ "EventName": "SAMPLE_POP", ++ "EventCode": "0x0000010d", ++ "BriefDescription": "Sample population" ++ } ++] +\ No newline at end of file +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/tlb.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/tlb.json +new file mode 100644 +index 000000000000..d1deb3496cb4 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/tlb.json +@@ -0,0 +1,82 @@ ++[ ++ { ++ "EventName": "L1I_TLB_REFILL", ++ "EventCode": "0x00000100", ++ "BriefDescription": "L1 instruction TLB refill" ++ }, ++ { ++ "EventName": "L1D_TLB_REFILL", ++ "EventCode": "0x00000200", ++ "BriefDescription": "L1 data TLB refill" ++ }, ++ { ++ "EventName": "L1D_TLB", ++ "EventCode": "0x00000800", ++ "BriefDescription": "L1 data TLB access" ++ }, ++ { ++ "EventName": "L1I_TLB", ++ "EventCode": "0x00001000", ++ "BriefDescription": "L1 instruction TLB access" ++ }, ++ { ++ "EventName": "L2_TLB_REFILL", ++ "EventCode": "0x00002000", ++ "BriefDescription": "Atrributable L2 unified TLB refill" ++ }, ++ { ++ "EventName": "L2_TLB", ++ "EventCode": "0x00004000", ++ "BriefDescription": "Attributable L2 unified TLB access" ++ }, ++ { ++ "EventName": "DTLB_WALK", ++ "EventCode": "0x00008000", ++ "BriefDescription": "Access to data TLB that caused a translation (or page) table walk" ++ }, ++ { ++ "EventName": "ITLB_WALK", ++ "EventCode": "0x00010000", ++ "BriefDescription": "Access to instruction TLB that caused a page table walk" ++ }, ++ { ++ "EventName": "L1D_TLB_REFILL_RD", ++ "EventCode": "0x00020000", ++ "BriefDescription": "L1 data TLB refill read" ++ }, ++ { ++ "EventName": "L1D_TLB_REFILL_WR", ++ "EventCode": "0x00040000", ++ "BriefDescription": "L1 data TLB refill write" ++ }, ++ { ++ "EventName": "L1D_TLB_RD", ++ "EventCode": "0x00080000", ++ "BriefDescription": "L1 data TLB read" ++ }, ++ { ++ "EventName": "L1D_TLB_WR", ++ "EventCode": "0x00100000", ++ "BriefDescription": "L1 data TLB write" ++ }, ++ { ++ "EventName": "L2_TLB_REFILL_RD", ++ "EventCode": "0x00200000", ++ "BriefDescription": "L2 unified TLB refill, read" ++ }, ++ { ++ "EventName": "L2_TLB_REFILL_WR", ++ "EventCode": "0x00400000", ++ "BriefDescription": "L2 unified TLB refill, write" ++ }, ++ { ++ "EventName": "L2_TLB_RD", ++ "EventCode": "0x00800000", ++ "BriefDescription": "L2 unified TLB access, read" ++ }, ++ { ++ "EventName": "L2_TLB_WR", ++ "EventCode": "0x01000000", ++ "BriefDescription": "L2 unified TLB access, write" ++ } ++] +diff --git a/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/vec.json b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/vec.json +new file mode 100644 +index 000000000000..fe8c4c9fedc5 +--- /dev/null ++++ b/tools/perf/pmu-events/arch/riscv/lrw/lrw-core/vec.json +@@ -0,0 +1,82 @@ ++[ ++ { ++ "EventName": "VFP_SPEC", ++ "EventCode": "0x0000010c", ++ "BriefDescription": "Floating point operation speculatively executed" ++ }, ++ { ++ "EventName": "VEC_INST_SPEC", ++ "EventCode": "0x0000020c", ++ "BriefDescription": "Vector instrution speculatively executed" ++ }, ++ { ++ "EventName": "FP_HP_SPEC", ++ "EventCode": "0x0000040c", ++ "BriefDescription": "Half-precision floating-point operation speculatively executed" ++ }, ++ { ++ "EventName": "FP_SP_SPEC", ++ "EventCode": "0x0000080c", ++ "BriefDescription": "Single-precision floating-point operation speculatively executed" ++ }, ++ { ++ "EventName": "FP_DP_SPEC", ++ "EventCode": "0x0000100c", ++ "BriefDescription": "Double-precision floating-point operation speculatively executed" ++ }, ++ { ++ "EventName": "VX_MASKED_OP", ++ "EventCode": "0x0000200c", ++ "BriefDescription": "Vector masked operation" ++ }, ++ { ++ "EventName": "VX_EMPTY_MASKED_OP", ++ "EventCode": "0x0000400c", ++ "BriefDescription": "Vector mask empty operation" ++ }, ++ { ++ "EventName": "VX_FULL_MASKED_OP", ++ "EventCode": "0x0000800c", ++ "BriefDescription": "Vector mask full operation" ++ }, ++ { ++ "EventName": "VX_PARTIAL_MASKED_OP", ++ "EventCode": "0x0001000c", ++ "BriefDescription": "Vector mask partial operation" ++ }, ++ { ++ "EventName": "VX_NOT_FULL_MASKED_OP", ++ "EventCode": "0x0002000c", ++ "BriefDescription": "Vector mask not full operation" ++ }, ++ { ++ "EventName": "FP_VEC_SPEC", ++ "EventCode": "0x010000c", ++ "BriefDescription": "Vector floating-point element operations speculatively executed" ++ }, ++ { ++ "EventName": "FP_SCALE_SPEC", ++ "EventCode": "0x020000c", ++ "BriefDescription": "Scalable floating-point element operations speculatively executed" ++ }, ++ { ++ "EventName": "VEC_INT08_SPEC", ++ "EventCode": "0x040000c", ++ "BriefDescription": "Operation counted by VEC_INT_SPEC where the largest type is 8-bit integer" ++ }, ++ { ++ "EventName": "VEC_INT16_SPEC", ++ "EventCode": "0x080000c", ++ "BriefDescription": "Operation counted by ASE_SVE_INT_SPEC where the largest type is 16-bit integer" ++ }, ++ { ++ "EventName": "VEC_INT32_SPEC", ++ "EventCode": "0x100000c", ++ "BriefDescription": "Operation counted by ASE_SVE_INT_SPEC where the largest type is 32-bit integer" ++ }, ++ { ++ "EventName": "VEC_INT64_SPEC", ++ "EventCode": "0x200000c", ++ "BriefDescription": "Operation counted by ASE_SVE_INT_SPEC where the largest type is 64-bit integer" ++ } ++] diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv -index c61b3d6ef616..d39d9692aba3 100644 +index c61b3d6ef616..325f5120d88b 100644 --- a/tools/perf/pmu-events/arch/riscv/mapfile.csv +++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv -@@ -15,3 +15,5 @@ +@@ -15,3 +15,6 @@ # #MVENDORID-MARCHID-MIMPID,Version,Filename,EventType 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core +0x5b7-0x0-0x0,v1,thead/c900-legacy,core +0x90c010d,v1,thead/th1520-ddr,uncore ++0x0-0x8000000000000920-0x3000020240831,v1,lrw/lrw-core,core diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json new file mode 100644 index 000000000000..2b142348d635 @@ -632116,6 +648455,19 @@ index 9f99ea42f45f..d8b7c9d78ad1 100644 }; /* +diff --git a/tools/testing/selftests/riscv/Makefile b/tools/testing/selftests/riscv/Makefile +index 4a9ff515a3a0..b9c3da3fd7aa 100644 +--- a/tools/testing/selftests/riscv/Makefile ++++ b/tools/testing/selftests/riscv/Makefile +@@ -5,7 +5,7 @@ + ARCH ?= $(shell uname -m 2>/dev/null || echo not) + + ifneq (,$(filter $(ARCH),riscv)) +-RISCV_SUBTARGETS ?= hwprobe vector mm ++RISCV_SUBTARGETS ?= hwprobe vector mm sse + else + RISCV_SUBTARGETS := + endif diff --git a/tools/testing/selftests/riscv/hwprobe/Makefile b/tools/testing/selftests/riscv/hwprobe/Makefile index ebdbb3c22e54..f224b84591fb 100644 --- a/tools/testing/selftests/riscv/hwprobe/Makefile @@ -632505,6 +648857,608 @@ index 000000000000..e3fccb390c4d + size_t cpusetsize, unsigned long *cpus, unsigned int flags); + +#endif +diff --git a/tools/testing/selftests/riscv/sse/Makefile b/tools/testing/selftests/riscv/sse/Makefile +new file mode 100644 +index 000000000000..67eaee06f213 +--- /dev/null ++++ b/tools/testing/selftests/riscv/sse/Makefile +@@ -0,0 +1,5 @@ ++TEST_GEN_MODS_DIR := module ++ ++TEST_FILES := run_sse_test.sh ++ ++include ../../lib.mk +diff --git a/tools/testing/selftests/riscv/sse/module/Makefile b/tools/testing/selftests/riscv/sse/module/Makefile +new file mode 100644 +index 000000000000..02018f083456 +--- /dev/null ++++ b/tools/testing/selftests/riscv/sse/module/Makefile +@@ -0,0 +1,16 @@ ++TESTMODS_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) ++KDIR ?= /lib/modules/$(shell uname -r)/build ++ ++obj-m += riscv_sse_test.o ++ ++# Ensure that KDIR exists, otherwise skip the compilation ++modules: ++ifneq ("$(wildcard $(KDIR))", "") ++ $(Q)$(MAKE) -C $(KDIR) modules KBUILD_EXTMOD=$(TESTMODS_DIR) ++endif ++ ++# Ensure that KDIR exists, otherwise skip the clean target ++clean: ++ifneq ("$(wildcard $(KDIR))", "") ++ $(Q)$(MAKE) -C $(KDIR) clean KBUILD_EXTMOD=$(TESTMODS_DIR) ++endif +diff --git a/tools/testing/selftests/riscv/sse/module/riscv_sse_test.c b/tools/testing/selftests/riscv/sse/module/riscv_sse_test.c +new file mode 100644 +index 000000000000..65df41a2d40a +--- /dev/null ++++ b/tools/testing/selftests/riscv/sse/module/riscv_sse_test.c +@@ -0,0 +1,513 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Copyright (C) 2024 Rivos Inc. ++ */ ++ ++#define pr_fmt(fmt) "riscv_sse_test: " fmt ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#define RUN_LOOP_COUNT 1000 ++#define SSE_FAILED_PREFIX "FAILED: " ++#define sse_err(...) pr_err(SSE_FAILED_PREFIX __VA_ARGS__) ++ ++struct sse_event_desc { ++ u32 evt_id; ++ const char *name; ++ bool can_inject; ++}; ++ ++static struct sse_event_desc sse_event_descs[] = { ++ { ++ .evt_id = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, ++ .name = "local_high_prio_ras", ++ }, ++ { ++ .evt_id = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP, ++ .name = "local_double_trap", ++ }, ++ { ++ .evt_id = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, ++ .name = "global_high_prio_ras", ++ }, ++ { ++ .evt_id = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, ++ .name = "local_pmu_overflow", ++ }, ++ { ++ .evt_id = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, ++ .name = "local_low_prio_ras", ++ }, ++ { ++ .evt_id = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, ++ .name = "global_low_prio_ras", ++ }, ++ { ++ .evt_id = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED, ++ .name = "local_software_injected", ++ }, ++ { ++ .evt_id = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED, ++ .name = "global_software_injected", ++ } ++}; ++ ++static struct sse_event_desc *sse_get_evt_desc(u32 evt) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(sse_event_descs); i++) { ++ if (sse_event_descs[i].evt_id == evt) ++ return &sse_event_descs[i]; ++ } ++ ++ return NULL; ++} ++ ++static const char *sse_evt_name(u32 evt) ++{ ++ struct sse_event_desc *desc = sse_get_evt_desc(evt); ++ ++ return desc != NULL ? desc->name : NULL; ++} ++ ++static bool sse_test_can_inject_event(u32 evt) ++{ ++ struct sse_event_desc *desc = sse_get_evt_desc(evt); ++ ++ return desc != NULL ? desc->can_inject : false; ++} ++ ++static struct sbiret sbi_sse_ecall(int fid, unsigned long arg0, unsigned long arg1) ++{ ++ return sbi_ecall(SBI_EXT_SSE, fid, arg0, arg1, 0, 0, 0, 0); ++} ++ ++static int sse_event_attr_get(u32 evt, unsigned long attr_id, ++ unsigned long *val) ++{ ++ struct sbiret sret; ++ unsigned long *attr_buf, phys; ++ ++ attr_buf = kmalloc(sizeof(unsigned long), GFP_KERNEL); ++ if (!attr_buf) ++ return -ENOMEM; ++ ++ phys = virt_to_phys(attr_buf); ++ ++ sret = sbi_ecall(SBI_EXT_SSE, SBI_SSE_EVENT_ATTR_READ, evt, attr_id, 1, ++ phys, 0, 0); ++ if (sret.error) ++ return sbi_err_map_linux_errno(sret.error); ++ ++ *val = *attr_buf; ++ ++ return 0; ++} ++ ++static int sse_test_signal(u32 evt, unsigned int cpu) ++{ ++ unsigned int hart_id = cpuid_to_hartid_map(cpu); ++ struct sbiret ret; ++ ++ ret = sbi_sse_ecall(SBI_SSE_EVENT_SIGNAL, evt, hart_id); ++ if (ret.error) { ++ sse_err("Failed to signal event %x, error %ld\n", evt, ret.error); ++ return sbi_err_map_linux_errno(ret.error); ++ } ++ ++ return 0; ++} ++ ++static int sse_test_inject_event(struct sse_event *event, u32 evt, unsigned int cpu) ++{ ++ int res; ++ unsigned long status; ++ ++ if (sse_event_is_global(evt)) { ++ /* ++ * Due to the fact the completion might happen faster than ++ * the call to SBI_SSE_COMPLETE in the handler, if the event was ++ * running on another CPU, we need to wait for the event status ++ * to be !RUNNING. ++ */ ++ do { ++ res = sse_event_attr_get(evt, SBI_SSE_ATTR_STATUS, &status); ++ if (res) { ++ sse_err("Failed to get status for evt %x, error %d\n", evt, res); ++ return res; ++ } ++ status = status & SBI_SSE_ATTR_STATUS_STATE_MASK; ++ } while (status == SBI_SSE_STATE_RUNNING); ++ ++ res = sse_event_set_target_cpu(event, cpu); ++ if (res) { ++ sse_err("Failed to set cpu for evt %x, error %d\n", evt, res); ++ return res; ++ } ++ } ++ ++ return sse_test_signal(evt, cpu); ++} ++ ++struct fast_test_arg { ++ u32 evt; ++ int cpu; ++ bool completion; ++}; ++ ++static int sse_test_handler(u32 evt, void *arg, struct pt_regs *regs) ++{ ++ int ret = 0; ++ struct fast_test_arg *targ = arg; ++ u32 test_evt = READ_ONCE(targ->evt); ++ int cpu = READ_ONCE(targ->cpu); ++ ++ if (evt != test_evt) { ++ sse_err("Received SSE event id %x instead of %x\n", test_evt, evt); ++ ret = -EINVAL; ++ } ++ ++ if (cpu != smp_processor_id()) { ++ sse_err("Received SSE event %d on CPU %d instead of %d\n", evt, smp_processor_id(), ++ cpu); ++ ret = -EINVAL; ++ } ++ ++ WRITE_ONCE(targ->completion, true); ++ ++ return ret; ++} ++ ++static void sse_run_fast_test(struct fast_test_arg *test_arg, struct sse_event *event, u32 evt) ++{ ++ unsigned long timeout; ++ int ret, cpu; ++ ++ for_each_online_cpu(cpu) { ++ WRITE_ONCE(test_arg->completion, false); ++ WRITE_ONCE(test_arg->cpu, cpu); ++ /* Test arg is used on another CPU */ ++ smp_wmb(); ++ ++ ret = sse_test_inject_event(event, evt, cpu); ++ if (ret) { ++ sse_err("event %s injection failed, err %d\n", sse_evt_name(evt), ret); ++ return; ++ } ++ ++ timeout = jiffies + HZ / 100; ++ /* We can not use since they are not NMI safe */ ++ while (!READ_ONCE(test_arg->completion) && ++ time_before(jiffies, timeout)) { ++ cpu_relax(); ++ } ++ if (!time_before(jiffies, timeout)) { ++ sse_err("Failed to wait for event %s completion on CPU %d\n", ++ sse_evt_name(evt), cpu); ++ return; ++ } ++ } ++} ++ ++static void sse_test_injection_fast(void) ++{ ++ int i, ret = 0, j; ++ u32 evt; ++ struct fast_test_arg test_arg; ++ struct sse_event *event; ++ ++ pr_info("Starting SSE test (fast)\n"); ++ ++ for (i = 0; i < ARRAY_SIZE(sse_event_descs); i++) { ++ evt = sse_event_descs[i].evt_id; ++ WRITE_ONCE(test_arg.evt, evt); ++ ++ if (!sse_event_descs[i].can_inject) ++ continue; ++ ++ event = sse_event_register(evt, 0, sse_test_handler, ++ (void *)&test_arg); ++ if (IS_ERR(event)) { ++ sse_err("Failed to register event %s, err %ld\n", sse_evt_name(evt), ++ PTR_ERR(event)); ++ goto out; ++ } ++ ++ ret = sse_event_enable(event); ++ if (ret) { ++ sse_err("Failed to enable event %s, err %d\n", sse_evt_name(evt), ret); ++ goto err_unregister; ++ } ++ ++ pr_info("Starting testing event %s\n", sse_evt_name(evt)); ++ ++ for (j = 0; j < RUN_LOOP_COUNT; j++) ++ sse_run_fast_test(&test_arg, event, evt); ++ ++ pr_info("Finished testing event %s\n", sse_evt_name(evt)); ++ ++ sse_event_disable(event); ++err_unregister: ++ sse_event_unregister(event); ++ } ++out: ++ pr_info("Finished SSE test (fast)\n"); ++} ++ ++struct priority_test_arg { ++ unsigned long evt; ++ struct sse_event *event; ++ bool called; ++ u32 prio; ++ struct priority_test_arg *next_evt_arg; ++ void (*check_func)(struct priority_test_arg *arg); ++}; ++ ++static int sse_hi_priority_test_handler(u32 evt, void *arg, ++ struct pt_regs *regs) ++{ ++ struct priority_test_arg *targ = arg; ++ struct priority_test_arg *next = READ_ONCE(targ->next_evt_arg); ++ ++ WRITE_ONCE(targ->called, 1); ++ ++ if (next) { ++ sse_test_signal(next->evt, smp_processor_id()); ++ if (!READ_ONCE(next->called)) { ++ sse_err("Higher priority event %s was not handled %s\n", ++ sse_evt_name(next->evt), sse_evt_name(evt)); ++ } ++ } ++ ++ return 0; ++} ++ ++static int sse_low_priority_test_handler(u32 evt, void *arg, struct pt_regs *regs) ++{ ++ struct priority_test_arg *targ = arg; ++ struct priority_test_arg *next = READ_ONCE(targ->next_evt_arg); ++ ++ WRITE_ONCE(targ->called, 1); ++ ++ if (next) { ++ sse_test_signal(next->evt, smp_processor_id()); ++ if (READ_ONCE(next->called)) { ++ sse_err("Lower priority event %s was handle before %s\n", ++ sse_evt_name(next->evt), sse_evt_name(evt)); ++ } ++ } ++ ++ return 0; ++} ++ ++static void sse_test_injection_priority_arg(struct priority_test_arg *args, unsigned int args_size, ++ sse_event_handler handler, const char *test_name) ++{ ++ unsigned int i; ++ int ret; ++ struct sse_event *event; ++ struct priority_test_arg *arg, *first_arg = NULL, *prev_arg = NULL; ++ ++ pr_info("Starting SSE priority test (%s)\n", test_name); ++ for (i = 0; i < args_size; i++) { ++ arg = &args[i]; ++ ++ if (!sse_test_can_inject_event(arg->evt)) ++ continue; ++ ++ WRITE_ONCE(arg->called, false); ++ WRITE_ONCE(arg->next_evt_arg, NULL); ++ if (prev_arg) ++ WRITE_ONCE(prev_arg->next_evt_arg, arg); ++ ++ prev_arg = arg; ++ ++ if (!first_arg) ++ first_arg = arg; ++ ++ event = sse_event_register(arg->evt, arg->prio, handler, (void *)arg); ++ if (IS_ERR(event)) { ++ sse_err("Failed to register event %s, err %ld\n", sse_evt_name(arg->evt), ++ PTR_ERR(event)); ++ goto release_events; ++ } ++ arg->event = event; ++ ++ if (sse_event_is_global(arg->evt)) { ++ /* Target event at current CPU */ ++ ret = sse_event_set_target_cpu(event, smp_processor_id()); ++ if (ret) { ++ sse_err("Failed to set event %s target CPU, err %d\n", ++ sse_evt_name(arg->evt), ret); ++ goto release_events; ++ } ++ } ++ ++ ret = sse_event_enable(event); ++ if (ret) { ++ sse_err("Failed to enable event %s, err %d\n", sse_evt_name(arg->evt), ret); ++ goto release_events; ++ } ++ } ++ ++ if (!first_arg) { ++ sse_err("No injectable event available\n"); ++ return; ++ } ++ ++ /* Inject first event, handler should trigger the others in chain. */ ++ ret = sse_test_inject_event(first_arg->event, first_arg->evt, smp_processor_id()); ++ if (ret) { ++ sse_err("SSE event %s injection failed\n", sse_evt_name(first_arg->evt)); ++ goto release_events; ++ } ++ ++ /* ++ * Event are injected directly on the current CPU after calling sse_test_inject_event() ++ * so that execution is premmpted right away, no need to wait for timeout. ++ */ ++ arg = first_arg; ++ while (arg) { ++ if (!READ_ONCE(arg->called)) { ++ sse_err("Event %s handler was not called\n", ++ sse_evt_name(arg->evt)); ++ ret = -EINVAL; ++ } ++ ++ ++ event = arg->event; ++ arg = READ_ONCE(arg->next_evt_arg); ++ } ++ ++release_events: ++ ++ arg = first_arg; ++ while (arg) { ++ event = arg->event; ++ if (!event) ++ break; ++ ++ sse_event_disable(event); ++ sse_event_unregister(event); ++ arg = READ_ONCE(arg->next_evt_arg); ++ } ++ ++ pr_info("Finished SSE priority test (%s)\n", test_name); ++} ++ ++static void sse_test_injection_priority(void) ++{ ++ struct priority_test_arg default_hi_prio_args[] = { ++ { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED }, ++ { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS }, ++ { .evt = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS }, ++ { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS }, ++ { .evt = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP }, ++ { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS }, ++ }; ++ ++ struct priority_test_arg default_low_prio_args[] = { ++ { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS }, ++ { .evt = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS }, ++ { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW }, ++ { .evt = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS }, ++ { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED }, ++ ++ }; ++ struct priority_test_arg set_prio_args[] = { ++ { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED, .prio = 5 }, ++ { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED, .prio = 10 }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_LOW_PRIO_RAS, .prio = 15 }, ++ { .evt = SBI_SSE_EVENT_LOCAL_LOW_PRIO_RAS, .prio = 20 }, ++ { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, .prio = 25 }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, .prio = 30 }, ++ { .evt = SBI_SSE_EVENT_LOCAL_DOUBLE_TRAP, .prio = 35 }, ++ { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, .prio = 40 }, ++ }; ++ ++ struct priority_test_arg same_prio_args[] = { ++ { .evt = SBI_SSE_EVENT_LOCAL_PMU_OVERFLOW, .prio = 0 }, ++ { .evt = SBI_SSE_EVENT_LOCAL_HIGH_PRIO_RAS, .prio = 10 }, ++ { .evt = SBI_SSE_EVENT_LOCAL_SOFTWARE_INJECTED, .prio = 10 }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_SOFTWARE_INJECTED, .prio = 10 }, ++ { .evt = SBI_SSE_EVENT_GLOBAL_HIGH_PRIO_RAS, .prio = 20 }, ++ }; ++ ++ sse_test_injection_priority_arg(default_hi_prio_args, ARRAY_SIZE(default_hi_prio_args), ++ sse_hi_priority_test_handler, "high"); ++ ++ sse_test_injection_priority_arg(default_low_prio_args, ARRAY_SIZE(default_low_prio_args), ++ sse_low_priority_test_handler, "low"); ++ ++ sse_test_injection_priority_arg(set_prio_args, ARRAY_SIZE(set_prio_args), ++ sse_low_priority_test_handler, "set"); ++ ++ sse_test_injection_priority_arg(same_prio_args, ARRAY_SIZE(same_prio_args), ++ sse_low_priority_test_handler, "same_prio_args"); ++} ++ ++ ++static bool sse_get_inject_status(u32 evt) ++{ ++ int ret; ++ unsigned long val; ++ ++ /* Check if injection is supported */ ++ ret = sse_event_attr_get(evt, SBI_SSE_ATTR_STATUS, &val); ++ if (ret) ++ return false; ++ ++ return !!(val & BIT(SBI_SSE_ATTR_STATUS_INJECT_OFFSET)); ++} ++ ++static void sse_init_events(void) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(sse_event_descs); i++) { ++ struct sse_event_desc *desc = &sse_event_descs[i]; ++ ++ desc->can_inject = sse_get_inject_status(desc->evt_id); ++ if (!desc->can_inject) ++ pr_info("Can not inject event %s, tests using this event will be skipped\n", ++ desc->name); ++ } ++} ++ ++static int __init sse_test_init(void) ++{ ++ sse_init_events(); ++ ++ sse_test_injection_fast(); ++ sse_test_injection_priority(); ++ ++ return 0; ++} ++ ++static void __exit sse_test_exit(void) ++{ ++} ++ ++module_init(sse_test_init); ++module_exit(sse_test_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Clément Léger "); ++MODULE_DESCRIPTION("Test module for SSE"); +diff --git a/tools/testing/selftests/riscv/sse/run_sse_test.sh b/tools/testing/selftests/riscv/sse/run_sse_test.sh +new file mode 100644 +index 000000000000..888bc4a99cb3 +--- /dev/null ++++ b/tools/testing/selftests/riscv/sse/run_sse_test.sh +@@ -0,0 +1,44 @@ ++#!/bin/bash ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Copyright (C) 2025 Rivos Inc. ++ ++MODULE_NAME=riscv_sse_test ++DRIVER="./module/${MODULE_NAME}.ko" ++ ++check_test_failed_prefix() { ++ if dmesg | grep -q "${MODULE_NAME}: FAILED:";then ++ echo "${MODULE_NAME} failed, please check dmesg" ++ exit 1 ++ fi ++} ++ ++# Kselftest framework requirement - SKIP code is 4. ++ksft_skip=4 ++ ++check_test_requirements() ++{ ++ uid=$(id -u) ++ if [ $uid -ne 0 ]; then ++ echo "$0: Must be run as root" ++ exit $ksft_skip ++ fi ++ ++ if ! which insmod > /dev/null 2>&1; then ++ echo "$0: You need insmod installed" ++ exit $ksft_skip ++ fi ++ ++ if [ ! -f $DRIVER ]; then ++ echo "$0: You need to compile ${MODULE_NAME} module" ++ exit $ksft_skip ++ fi ++} ++ ++check_test_requirements ++ ++insmod $DRIVER > /dev/null 2>&1 ++rmmod $MODULE_NAME ++check_test_failed_prefix ++ ++exit 0 diff --git a/tools/testing/selftests/riscv/vector/vstate_prctl.c b/tools/testing/selftests/riscv/vector/vstate_prctl.c index 8ad94e08ff4d..27668fb3b6d0 100644 --- a/tools/testing/selftests/riscv/vector/vstate_prctl.c @@ -632531,6 +649485,37 @@ index 8ad94e08ff4d..27668fb3b6d0 100644 #define NEXT_PROGRAM "./vstate_exec_nolibc" static int launch_test(int test_inherit) { +diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c +index 6b9c240e22de..8a4ad17d5bd3 100644 +--- a/virt/kvm/eventfd.c ++++ b/virt/kvm/eventfd.c +@@ -294,11 +294,10 @@ void __attribute__((weak)) kvm_arch_irq_bypass_start( + { + } + +-int __attribute__((weak)) kvm_arch_update_irqfd_routing( +- struct kvm *kvm, unsigned int host_irq, +- uint32_t guest_irq, bool set) ++void __attribute__((weak)) kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, ++ struct kvm_kernel_irq_routing_entry *old, ++ struct kvm_kernel_irq_routing_entry *new) + { +- return 0; + } + + bool __attribute__((weak)) kvm_arch_irqfd_route_changed( +@@ -651,10 +650,7 @@ void kvm_irq_routing_update(struct kvm *kvm) + #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS + if (irqfd->producer && + kvm_arch_irqfd_route_changed(&old, &irqfd->irq_entry)) { +- int ret = kvm_arch_update_irqfd_routing( +- irqfd->kvm, irqfd->producer->irq, +- irqfd->gsi, 1); +- WARN_ON(ret); ++ kvm_arch_update_irqfd_routing(irqfd, &old, &irqfd->irq_entry); + } + #endif + } -- 2.34.1 diff --git a/kernel.spec b/kernel.spec index 85bf81e2..cf875e2e 100644 --- a/kernel.spec +++ b/kernel.spec @@ -42,7 +42,7 @@ rm -f test_openEuler_sign.ko test_openEuler_sign.ko.sig %global upstream_sublevel 0 %global devel_release 126 %global maintenance_release .0.0 -%global pkg_release .102 +%global pkg_release .103 %global openeuler_lts 1 %global openeuler_major 2403 @@ -1138,6 +1138,12 @@ fi %endif %changelog +* Thu Dec 04 2025 Mingzheng Xing - 6.6.0-126.0.0.103 +- RISC-V kernel upgrade to 6.6.0-126.0.0 +- Add support for LRW, DP1000 +- Add IOMMU related support +- Backport mainline RISC-V features. + * Thu Dec 04 2025 Li Nan - 6.6.0-126.0.0.102 - !19468 urma: Bugfix for ubcore_get_route_list implement - urma: Bugfix for ubcore_get_route_list implement -- Gitee